diff --git a/.ansible-lint b/.ansible-lint
new file mode 100644
index 00000000..101357cb
--- /dev/null
+++ b/.ansible-lint
@@ -0,0 +1,24 @@
+---
+skip_list:
+ - yaml[line-length]
+ - name[casing]
+ - yaml[comments]
+quiet: true
+exclude_paths:
+ - .cache/ # implicit unless exclude_paths is defined in config
+ - collections/
+ - roles/PyratLabs.k3s
+ - roles/gantsign.ctop
+ - roles/geerlingguy.ansible
+ - roles/geerlingguy.docker
+ - roles/geerlingguy.helm
+ - roles/geerlingguy.nfs_server
+ - roles/geerlingguy.pip
+ - roles/hifis-net.unattended_upgrades
+ - roles/l3d.gitea
+ - roles/mrlesmithjr.ansible-manage-lvm
+ - roles/oefenweb.ufw
+ - roles/pandemonium1986.ansible-role-k9s
+ - roles/robertdebock.bootstrap
+ - roles/PyratLabs.k3s
+ - .gitlab-ci.yml
diff --git a/.ansible-lint-ignore b/.ansible-lint-ignore
new file mode 100644
index 00000000..2b18e9e6
--- /dev/null
+++ b/.ansible-lint-ignore
@@ -0,0 +1,3 @@
+# This file contains ignores rule violations for ansible-lint
+playbooks/on-off/remove_old_ssh_key.yml name[play]
+playbooks/on-off/remove_old_ssh_key.yml yaml[truthy]
diff --git a/.drone.yml b/.drone.yml
index 491ad1d4..dab97558 100644
--- a/.drone.yml
+++ b/.drone.yml
@@ -19,15 +19,18 @@ type: docker
name: ansible-lint
steps:
- name: ansible-lint
- image: cytopia/ansible-lint
+ image: quay.io/ansible/creator-ee
commands:
- - /usr/bin/ansible-lint *.*
+ - ansible-lint --version
+ - echo $ANSIBLE_VAULT_PASSWORD > ./vault-pass.yml
+ - ansible-lint
when:
event:
exclude:
- tag
-depends_on:
- - gitleaks
+ environment:
+ ANSIBLE_VAULT_PASSWORD:
+ from_secret: vault-pass
---
kind: pipeline
@@ -42,5 +45,3 @@ steps:
event:
exclude:
- tag
-depends_on:
- - gitleaks
diff --git a/.gitignore b/.gitignore
index e70d0af2..845533c2 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,10 +1,5 @@
.git/
vault-pass.yml
id_rsa_ansible_user
-id_rsa_ansible_user_pub
-id_rsa_ansible_user.pub
-plugins/lookup/__pycache__/**
-plugins/callback/__pycache__/
-trace/**json
id_ed25519
id_ed25519.pub
diff --git a/ansible.cfg b/ansible.cfg
index aaa758e4..ec0906a8 100644
--- a/ansible.cfg
+++ b/ansible.cfg
@@ -4,30 +4,9 @@ nocows = 1
retry_files_enabled = False
roles_path = ./roles
lookup_plugins = ./plugins/lookup
-collections_paths = ./ansible_collections
+collections_paths = ./collections
private_key_file = ./id_ed25519
vault_password_file = vault-pass.yml
gathering = smart
-#display_ok_hosts = no # zeigt nur noch changed und error tasks/hosts an
-#display_skipped_hosts = yes # dito
-# callback_plugins = ./plugins/callback
-# python3 -m ara.setup.callback_plugins
-# callbacks_enabled = mhansen.ansible_trace.trace # https://github.com/mhansen/ansible-trace
-[inventory]
-
-[privilege_escalation]
-
-[paramiko_connection]
-
-[ssh_connection]
-
-[persistent_connection]
-
-[accelerate]
-
-[selinux]
-
-[colors]
-
[diff]
always = true
diff --git a/ansible_collections/community/general/.azure-pipelines/azure-pipelines.yml b/ansible_collections/community/general/.azure-pipelines/azure-pipelines.yml
deleted file mode 100644
index 99767c6d..00000000
--- a/ansible_collections/community/general/.azure-pipelines/azure-pipelines.yml
+++ /dev/null
@@ -1,491 +0,0 @@
-trigger:
- batch: true
- branches:
- include:
- - main
- - stable-*
-
-pr:
- autoCancel: true
- branches:
- include:
- - main
- - stable-*
-
-schedules:
- - cron: 0 8 * * *
- displayName: Nightly (main)
- always: true
- branches:
- include:
- - main
- - cron: 0 10 * * *
- displayName: Nightly (active stable branches)
- always: true
- branches:
- include:
- - stable-3
- - stable-4
- - cron: 0 11 * * 0
- displayName: Weekly (old stable branches)
- always: true
- branches:
- include:
- - stable-1
- - stable-2
-
-variables:
- - name: checkoutPath
- value: ansible_collections/community/general
- - name: coverageBranches
- value: main
- - name: pipelinesCoverage
- value: coverage
- - name: entryPoint
- value: tests/utils/shippable/shippable.sh
- - name: fetchDepth
- value: 0
-
-resources:
- containers:
- - container: default
- image: quay.io/ansible/azure-pipelines-test-container:1.9.0
-
-pool: Standard
-
-stages:
-### Sanity
- - stage: Sanity_devel
- displayName: Sanity devel
- dependsOn: []
- jobs:
- - template: templates/matrix.yml
- parameters:
- nameFormat: Test {0}
- testFormat: devel/sanity/{0}
- targets:
- - test: 1
- - test: 2
- - test: 3
- - test: 4
- - test: extra
- - stage: Sanity_2_12
- displayName: Sanity 2.12
- dependsOn: []
- jobs:
- - template: templates/matrix.yml
- parameters:
- nameFormat: Test {0}
- testFormat: 2.12/sanity/{0}
- targets:
- - test: 1
- - test: 2
- - test: 3
- - test: 4
- - stage: Sanity_2_11
- displayName: Sanity 2.11
- dependsOn: []
- jobs:
- - template: templates/matrix.yml
- parameters:
- nameFormat: Test {0}
- testFormat: 2.11/sanity/{0}
- targets:
- - test: 1
- - test: 2
- - test: 3
- - test: 4
- - stage: Sanity_2_10
- displayName: Sanity 2.10
- dependsOn: []
- jobs:
- - template: templates/matrix.yml
- parameters:
- nameFormat: Test {0}
- testFormat: 2.10/sanity/{0}
- targets:
- - test: 1
- - test: 2
- - test: 3
- - test: 4
- - stage: Sanity_2_9
- displayName: Sanity 2.9
- dependsOn: []
- jobs:
- - template: templates/matrix.yml
- parameters:
- nameFormat: Test {0}
- testFormat: 2.9/sanity/{0}
- targets:
- - test: 1
- - test: 2
- - test: 3
- - test: 4
-### Units
- - stage: Units_devel
- displayName: Units devel
- dependsOn: []
- jobs:
- - template: templates/matrix.yml
- parameters:
- nameFormat: Python {0}
- testFormat: devel/units/{0}/1
- targets:
- - test: 2.7
- - test: 3.5
- - test: 3.6
- - test: 3.7
- - test: 3.8
- - test: 3.9
- - test: '3.10'
- - stage: Units_2_12
- displayName: Units 2.12
- dependsOn: []
- jobs:
- - template: templates/matrix.yml
- parameters:
- nameFormat: Python {0}
- testFormat: 2.12/units/{0}/1
- targets:
- - test: 2.6
- - test: 2.7
- - test: 3.5
- - test: 3.6
- - test: 3.7
- - test: 3.8
- - test: '3.10'
- - stage: Units_2_11
- displayName: Units 2.11
- dependsOn: []
- jobs:
- - template: templates/matrix.yml
- parameters:
- nameFormat: Python {0}
- testFormat: 2.11/units/{0}/1
- targets:
- - test: 2.6
- - test: 2.7
- - test: 3.5
- - test: 3.6
- - test: 3.7
- - test: 3.8
- - test: 3.9
- - stage: Units_2_10
- displayName: Units 2.10
- dependsOn: []
- jobs:
- - template: templates/matrix.yml
- parameters:
- nameFormat: Python {0}
- testFormat: 2.10/units/{0}/1
- targets:
- - test: 2.7
- - test: 3.6
- - stage: Units_2_9
- displayName: Units 2.9
- dependsOn: []
- jobs:
- - template: templates/matrix.yml
- parameters:
- nameFormat: Python {0}
- testFormat: 2.9/units/{0}/1
- targets:
- - test: 2.6
- - test: 2.7
- - test: 3.5
- - test: 3.6
- - test: 3.7
- - test: 3.8
-
-## Remote
- - stage: Remote_devel
- displayName: Remote devel
- dependsOn: []
- jobs:
- - template: templates/matrix.yml
- parameters:
- testFormat: devel/{0}
- targets:
- - name: macOS 12.0
- test: macos/12.0
- - name: RHEL 7.9
- test: rhel/7.9
- - name: RHEL 8.5
- test: rhel/8.5
- - name: FreeBSD 12.3
- test: freebsd/12.3
- - name: FreeBSD 13.0
- test: freebsd/13.0
- groups:
- - 1
- - 2
- - 3
- - stage: Remote_2_12
- displayName: Remote 2.12
- dependsOn: []
- jobs:
- - template: templates/matrix.yml
- parameters:
- testFormat: 2.12/{0}
- targets:
- - name: macOS 11.1
- test: macos/11.1
- - name: RHEL 8.4
- test: rhel/8.4
- - name: FreeBSD 13.0
- test: freebsd/13.0
- groups:
- - 1
- - 2
- - stage: Remote_2_11
- displayName: Remote 2.11
- dependsOn: []
- jobs:
- - template: templates/matrix.yml
- parameters:
- testFormat: 2.11/{0}
- targets:
- - name: RHEL 7.9
- test: rhel/7.9
- - name: RHEL 8.3
- test: rhel/8.3
- - name: FreeBSD 12.2
- test: freebsd/12.2
- groups:
- - 1
- - 2
- - stage: Remote_2_10
- displayName: Remote 2.10
- dependsOn: []
- jobs:
- - template: templates/matrix.yml
- parameters:
- testFormat: 2.10/{0}
- targets:
- - name: OS X 10.11
- test: osx/10.11
- - name: macOS 10.15
- test: macos/10.15
- groups:
- - 1
- - 2
- - stage: Remote_2_9
- displayName: Remote 2.9
- dependsOn: []
- jobs:
- - template: templates/matrix.yml
- parameters:
- testFormat: 2.9/{0}
- targets:
- - name: RHEL 8.2
- test: rhel/8.2
- - name: RHEL 7.8
- test: rhel/7.8
- - name: FreeBSD 12.0
- test: freebsd/12.0
- groups:
- - 1
- - 2
-
-### Docker
- - stage: Docker_devel
- displayName: Docker devel
- dependsOn: []
- jobs:
- - template: templates/matrix.yml
- parameters:
- testFormat: devel/linux/{0}
- targets:
- - name: CentOS 7
- test: centos7
- - name: Fedora 34
- test: fedora34
- - name: Fedora 35
- test: fedora35
- - name: openSUSE 15 py2
- test: opensuse15py2
- - name: openSUSE 15 py3
- test: opensuse15
- - name: Ubuntu 18.04
- test: ubuntu1804
- - name: Ubuntu 20.04
- test: ubuntu2004
- - name: Alpine 3
- test: alpine3
- groups:
- - 1
- - 2
- - 3
- - stage: Docker_2_12
- displayName: Docker 2.12
- dependsOn: []
- jobs:
- - template: templates/matrix.yml
- parameters:
- testFormat: 2.12/linux/{0}
- targets:
- - name: CentOS 6
- test: centos6
- - name: Fedora 34
- test: fedora34
- - name: openSUSE 15 py3
- test: opensuse15
- - name: Ubuntu 20.04
- test: ubuntu2004
- groups:
- - 1
- - 2
- - 3
- - stage: Docker_2_11
- displayName: Docker 2.11
- dependsOn: []
- jobs:
- - template: templates/matrix.yml
- parameters:
- testFormat: 2.11/linux/{0}
- targets:
- - name: CentOS 7
- test: centos7
- - name: Fedora 33
- test: fedora33
- - name: openSUSE 15 py2
- test: opensuse15py2
- - name: Alpine 3
- test: alpine3
- groups:
- - 2
- - 3
- - stage: Docker_2_10
- displayName: Docker 2.10
- dependsOn: []
- jobs:
- - template: templates/matrix.yml
- parameters:
- testFormat: 2.10/linux/{0}
- targets:
- - name: Fedora 32
- test: fedora32
- - name: Ubuntu 16.04
- test: ubuntu1604
- groups:
- - 2
- - 3
- - stage: Docker_2_9
- displayName: Docker 2.9
- dependsOn: []
- jobs:
- - template: templates/matrix.yml
- parameters:
- testFormat: 2.9/linux/{0}
- targets:
- - name: Fedora 31
- test: fedora31
- - name: openSUSE 15 py3
- test: opensuse15
- groups:
- - 2
- - 3
-
-### Community Docker
- - stage: Docker_community_devel
- displayName: Docker (community images) devel
- dependsOn: []
- jobs:
- - template: templates/matrix.yml
- parameters:
- testFormat: devel/linux-community/{0}
- targets:
- - name: Debian Bullseye
- test: debian-bullseye/3.9
- - name: ArchLinux
- test: archlinux/3.10
- - name: CentOS Stream 8
- test: centos-stream8/3.8
- groups:
- - 1
- - 2
- - 3
-
-### Cloud
- - stage: Cloud_devel
- displayName: Cloud devel
- dependsOn: []
- jobs:
- - template: templates/matrix.yml
- parameters:
- nameFormat: Python {0}
- testFormat: devel/cloud/{0}/1
- targets:
- - test: 2.7
- - test: 3.9
- - stage: Cloud_2_12
- displayName: Cloud 2.12
- dependsOn: []
- jobs:
- - template: templates/matrix.yml
- parameters:
- nameFormat: Python {0}
- testFormat: 2.12/cloud/{0}/1
- targets:
- - test: 3.8
- - stage: Cloud_2_11
- displayName: Cloud 2.11
- dependsOn: []
- jobs:
- - template: templates/matrix.yml
- parameters:
- nameFormat: Python {0}
- testFormat: 2.11/cloud/{0}/1
- targets:
- - test: 3.6
- - stage: Cloud_2_10
- displayName: Cloud 2.10
- dependsOn: []
- jobs:
- - template: templates/matrix.yml
- parameters:
- nameFormat: Python {0}
- testFormat: 2.10/cloud/{0}/1
- targets:
- - test: 3.5
- - stage: Cloud_2_9
- displayName: Cloud 2.9
- dependsOn: []
- jobs:
- - template: templates/matrix.yml
- parameters:
- nameFormat: Python {0}
- testFormat: 2.9/cloud/{0}/1
- targets:
- - test: 2.7
- - stage: Summary
- condition: succeededOrFailed()
- dependsOn:
- - Sanity_devel
- - Sanity_2_9
- - Sanity_2_10
- - Sanity_2_11
- - Sanity_2_12
- - Units_devel
- - Units_2_9
- - Units_2_10
- - Units_2_11
- - Units_2_12
- - Remote_devel
- - Remote_2_9
- - Remote_2_10
- - Remote_2_11
- - Remote_2_12
- - Docker_devel
- - Docker_2_9
- - Docker_2_10
- - Docker_2_11
- - Docker_2_12
- - Docker_community_devel
- - Cloud_devel
- - Cloud_2_9
- - Cloud_2_10
- - Cloud_2_11
- - Cloud_2_12
- jobs:
- - template: templates/coverage.yml
diff --git a/ansible_collections/community/general/.azure-pipelines/scripts/aggregate-coverage.sh b/ansible_collections/community/general/.azure-pipelines/scripts/aggregate-coverage.sh
deleted file mode 100644
index 1ccfcf20..00000000
--- a/ansible_collections/community/general/.azure-pipelines/scripts/aggregate-coverage.sh
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/usr/bin/env bash
-# Aggregate code coverage results for later processing.
-
-set -o pipefail -eu
-
-agent_temp_directory="$1"
-
-PATH="${PWD}/bin:${PATH}"
-
-mkdir "${agent_temp_directory}/coverage/"
-
-options=(--venv --venv-system-site-packages --color -v)
-
-ansible-test coverage combine --group-by command --export "${agent_temp_directory}/coverage/" "${options[@]}"
-
-if ansible-test coverage analyze targets generate --help >/dev/null 2>&1; then
- # Only analyze coverage if the installed version of ansible-test supports it.
- # Doing so allows this script to work unmodified for multiple Ansible versions.
- ansible-test coverage analyze targets generate "${agent_temp_directory}/coverage/coverage-analyze-targets.json" "${options[@]}"
-fi
diff --git a/ansible_collections/community/general/.azure-pipelines/scripts/report-coverage.sh b/ansible_collections/community/general/.azure-pipelines/scripts/report-coverage.sh
deleted file mode 100644
index c039f7dc..00000000
--- a/ansible_collections/community/general/.azure-pipelines/scripts/report-coverage.sh
+++ /dev/null
@@ -1,15 +0,0 @@
-#!/usr/bin/env bash
-# Generate code coverage reports for uploading to Azure Pipelines and codecov.io.
-
-set -o pipefail -eu
-
-PATH="${PWD}/bin:${PATH}"
-
-if ! ansible-test --help >/dev/null 2>&1; then
- # Install the devel version of ansible-test for generating code coverage reports.
- # This is only used by Ansible Collections, which are typically tested against multiple Ansible versions (in separate jobs).
- # Since a version of ansible-test is required that can work the output from multiple older releases, the devel version is used.
- pip install https://github.com/ansible/ansible/archive/devel.tar.gz --disable-pip-version-check
-fi
-
-ansible-test coverage xml --group-by command --stub --venv --venv-system-site-packages --color -v
diff --git a/ansible_collections/community/general/.azure-pipelines/templates/coverage.yml b/ansible_collections/community/general/.azure-pipelines/templates/coverage.yml
deleted file mode 100644
index 1b36ea45..00000000
--- a/ansible_collections/community/general/.azure-pipelines/templates/coverage.yml
+++ /dev/null
@@ -1,39 +0,0 @@
-# This template adds a job for processing code coverage data.
-# It will upload results to Azure Pipelines and codecov.io.
-# Use it from a job stage that completes after all other jobs have completed.
-# This can be done by placing it in a separate summary stage that runs after the test stage(s) have completed.
-
-jobs:
- - job: Coverage
- displayName: Code Coverage
- container: default
- workspace:
- clean: all
- steps:
- - checkout: self
- fetchDepth: $(fetchDepth)
- path: $(checkoutPath)
- - task: DownloadPipelineArtifact@2
- displayName: Download Coverage Data
- inputs:
- path: coverage/
- patterns: "Coverage */*=coverage.combined"
- - bash: .azure-pipelines/scripts/combine-coverage.py coverage/
- displayName: Combine Coverage Data
- - bash: .azure-pipelines/scripts/report-coverage.sh
- displayName: Generate Coverage Report
- condition: gt(variables.coverageFileCount, 0)
- - task: PublishCodeCoverageResults@1
- inputs:
- codeCoverageTool: Cobertura
- # Azure Pipelines only accepts a single coverage data file.
- # That means only Python or PowerShell coverage can be uploaded, but not both.
- # Set the "pipelinesCoverage" variable to determine which type is uploaded.
- # Use "coverage" for Python and "coverage-powershell" for PowerShell.
- summaryFileLocation: "$(outputPath)/reports/$(pipelinesCoverage).xml"
- displayName: Publish to Azure Pipelines
- condition: gt(variables.coverageFileCount, 0)
- - bash: .azure-pipelines/scripts/publish-codecov.py "$(outputPath)"
- displayName: Publish to codecov.io
- condition: gt(variables.coverageFileCount, 0)
- continueOnError: true
diff --git a/ansible_collections/community/general/.github/BOTMETA.yml b/ansible_collections/community/general/.github/BOTMETA.yml
deleted file mode 100644
index 00578f5e..00000000
--- a/ansible_collections/community/general/.github/BOTMETA.yml
+++ /dev/null
@@ -1,1259 +0,0 @@
-notifications: true
-automerge: true
-files:
- plugins/:
- supershipit: quidame
- changelogs/: {}
- changelogs/fragments/:
- support: community
- $actions:
- labels: action
- $actions/system/iptables_state.py:
- maintainers: quidame
- $actions/system/shutdown.py:
- maintainers: nitzmahone samdoran aminvakil
- $becomes/:
- labels: become
- $becomes/doas.py:
- maintainers: $team_ansible_core
- $becomes/dzdo.py:
- maintainers: $team_ansible_core
- $becomes/ksu.py:
- maintainers: $team_ansible_core
- $becomes/machinectl.py:
- maintainers: $team_ansible_core
- $becomes/pbrun.py:
- maintainers: $team_ansible_core
- $becomes/pfexec.py:
- maintainers: $team_ansible_core
- $becomes/pmrun.py:
- maintainers: $team_ansible_core
- $becomes/sesu.py:
- maintainers: nekonyuu
- $becomes/sudosu.py:
- maintainers: dagwieers
- $caches/:
- labels: cache
- $caches/memcached.py: {}
- $caches/pickle.py:
- maintainers: bcoca
- $caches/redis.py: {}
- $caches/yaml.py:
- maintainers: bcoca
- $callbacks/:
- labels: callbacks
- $callbacks/cgroup_memory_recap.py: {}
- $callbacks/context_demo.py: {}
- $callbacks/counter_enabled.py: {}
- $callbacks/dense.py:
- maintainers: dagwieers
- $callbacks/diy.py:
- maintainers: theque5t
- $callbacks/elastic.py:
- maintainers: v1v
- keywords: apm observability
- $callbacks/hipchat.py: {}
- $callbacks/jabber.py: {}
- $callbacks/loganalytics.py:
- maintainers: zhcli
- $callbacks/logdna.py: {}
- $callbacks/logentries.py: {}
- $callbacks/log_plays.py: {}
- $callbacks/logstash.py:
- maintainers: ujenmr
- $callbacks/mail.py:
- maintainers: dagwieers
- $callbacks/nrdp.py:
- maintainers: rverchere
- $callbacks/null.py: {}
- $callbacks/opentelemetry.py:
- maintainers: v1v
- keywords: opentelemetry observability
- $callbacks/say.py:
- notify: chris-short
- maintainers: $team_macos
- labels: macos say
- keywords: brew cask darwin homebrew macosx macports osx
- $callbacks/selective.py: {}
- $callbacks/slack.py: {}
- $callbacks/splunk.py: {}
- $callbacks/sumologic.py:
- maintainers: ryancurrah
- labels: sumologic
- $callbacks/syslog_json.py:
- maintainers: imjoseangel
- $callbacks/unixy.py:
- maintainers: akatch
- labels: unixy
- $callbacks/yaml.py: {}
- $connections/:
- labels: connections
- $connections/chroot.py: {}
- $connections/funcd.py:
- maintainers: mscherer
- $connections/iocage.py: {}
- $connections/jail.py:
- maintainers: $team_ansible_core
- $connections/lxc.py: {}
- $connections/lxd.py:
- maintainers: mattclay
- labels: lxd
- $connections/qubes.py:
- maintainers: kushaldas
- $connections/saltstack.py:
- maintainers: mscherer
- labels: saltstack
- $connections/zone.py:
- maintainers: $team_ansible_core
- $doc_fragments/:
- labels: docs_fragments
- $doc_fragments/hpe3par.py:
- maintainers: farhan7500 gautamphegde
- labels: hpe3par
- $doc_fragments/hwc.py:
- maintainers: $team_huawei
- labels: hwc
- $doc_fragments/nomad.py:
- maintainers: chris93111
- $doc_fragments/xenserver.py:
- maintainers: bvitnik
- labels: xenserver
- $filters/counter.py:
- maintainers: keilr
- $filters/dict.py:
- maintainers: felixfontein
- $filters/dict_kv.py:
- maintainers: giner
- $filters/from_csv.py:
- maintainers: Ajpantuso
- $filters/groupby:
- maintainers: felixfontein
- $filters/hashids:
- maintainers: Ajpantuso
- $filters/jc.py:
- maintainers: kellyjonbrazil
- $filters/json_query.py: {}
- $filters/list.py:
- maintainers: vbotka
- $filters/path_join_shim.py:
- maintainers: felixfontein
- $filters/random_mac.py: {}
- $filters/time.py:
- maintainers: resmo
- $filters/unicode_normalize.py:
- maintainers: Ajpantuso
- $filters/version_sort.py:
- maintainers: ericzolf
- $inventories/:
- labels: inventories
- $inventories/cobbler.py:
- maintainers: opoplawski
- $inventories/gitlab_runners.py:
- maintainers: morph027
- $inventories/linode.py:
- maintainers: $team_linode
- labels: cloud linode
- keywords: linode dynamic inventory script
- $inventories/lxd.py:
- maintainers: conloos
- $inventories/nmap.py: {}
- $inventories/online.py:
- maintainers: remyleone
- $inventories/opennebula.py:
- maintainers: feldsam
- labels: cloud opennebula
- keywords: opennebula dynamic inventory script
- $inventories/proxmox.py:
- maintainers: $team_virt ilijamt
- $inventories/xen_orchestra.py:
- maintainers: ddelnano shinuza
- $inventories/icinga2.py:
- maintainers: BongoEADGC6
- $inventories/scaleway.py:
- maintainers: $team_scaleway
- labels: cloud scaleway
- $inventories/stackpath_compute.py:
- maintainers: shayrybak
- $inventories/virtualbox.py: {}
- $lookups/:
- labels: lookups
- $lookups/cartesian.py: {}
- $lookups/chef_databag.py: {}
- $lookups/collection_version.py:
- maintainers: felixfontein
- $lookups/consul_kv.py: {}
- $lookups/credstash.py: {}
- $lookups/cyberarkpassword.py:
- notify: cyberark-bizdev
- labels: cyberarkpassword
- $lookups/dependent.py:
- maintainers: felixfontein
- $lookups/dig.py:
- maintainers: jpmens
- labels: dig
- $lookups/dnstxt.py:
- maintainers: jpmens
- $lookups/dsv.py:
- maintainers: amigus endlesstrax
- $lookups/etcd3.py:
- maintainers: eric-belhomme
- $lookups/etcd.py:
- maintainers: jpmens
- $lookups/filetree.py:
- maintainers: dagwieers
- $lookups/flattened.py: {}
- $lookups/hiera.py:
- maintainers: jparrill
- $lookups/keyring.py: {}
- $lookups/lastpass.py: {}
- $lookups/lmdb_kv.py:
- maintainers: jpmens
- $lookups/manifold.py:
- maintainers: galanoff
- labels: manifold
- $lookups/onepass:
- maintainers: samdoran
- labels: onepassword
- $lookups/onepassword.py:
- maintainers: azenk scottsb
- $lookups/onepassword_raw.py:
- maintainers: azenk scottsb
- $lookups/passwordstore.py: {}
- $lookups/random_pet.py:
- maintainers: Akasurde
- $lookups/random_string.py:
- maintainers: Akasurde
- $lookups/random_words.py:
- maintainers: konstruktoid
- $lookups/redis.py:
- maintainers: $team_ansible_core jpmens
- $lookups/revbitspss.py:
- maintainers: RevBits
- $lookups/shelvefile.py: {}
- $lookups/tss.py:
- maintainers: amigus endlesstrax
- $module_utils/:
- labels: module_utils
- $module_utils/gitlab.py:
- notify: jlozadad
- maintainers: $team_gitlab
- labels: gitlab
- keywords: gitlab source_control
- $module_utils/hwc_utils.py:
- maintainers: $team_huawei
- labels: huawei hwc_utils networking
- keywords: cloud huawei hwc
- $module_utils/identity/keycloak/keycloak.py:
- maintainers: $team_keycloak
- $module_utils/ipa.py:
- maintainers: $team_ipa
- labels: ipa
- $module_utils/manageiq.py:
- maintainers: $team_manageiq
- labels: manageiq
- $module_utils/memset.py:
- maintainers: glitchcrab
- labels: cloud memset
- $module_utils/mh/:
- maintainers: russoz
- labels: module_helper
- $module_utils/module_helper.py:
- maintainers: russoz
- labels: module_helper
- $module_utils/oracle/oci_utils.py:
- maintainers: $team_oracle
- labels: cloud
- $module_utils/pure.py:
- maintainers: $team_purestorage
- labels: pure pure_storage
- $module_utils/redfish_utils.py:
- maintainers: $team_redfish
- labels: redfish_utils
- $module_utils/remote_management/lxca/common.py:
- maintainers: navalkp prabhosa
- $module_utils/scaleway.py:
- maintainers: $team_scaleway
- labels: cloud scaleway
- $module_utils/storage/hpe3par/hpe3par.py:
- maintainers: farhan7500 gautamphegde
- $module_utils/utm_utils.py:
- maintainers: $team_e_spirit
- labels: utm_utils
- $module_utils/xenserver.py:
- maintainers: bvitnik
- labels: xenserver
- $modules/cloud/alicloud/:
- maintainers: xiaozhu36
- $modules/cloud/atomic/atomic_container.py:
- maintainers: giuseppe krsacme
- $modules/cloud/atomic/:
- maintainers: krsacme
- $modules/cloud/centurylink/:
- maintainers: clc-runner
- $modules/cloud/dimensiondata/dimensiondata_network.py:
- maintainers: aimonb tintoy
- labels: dimensiondata_network
- $modules/cloud/dimensiondata/dimensiondata_vlan.py:
- maintainers: tintoy
- $modules/cloud/heroku/heroku_collaborator.py:
- maintainers: marns93
- $modules/cloud/huawei/:
- maintainers: $team_huawei huaweicloud
- keywords: cloud huawei hwc
- $modules/cloud/linode/:
- maintainers: $team_linode
- $modules/cloud/linode/linode.py:
- maintainers: zbal
- $modules/cloud/lxc/lxc_container.py:
- maintainers: cloudnull
- $modules/cloud/lxd/:
- ignore: hnakamur
- $modules/cloud/lxd/lxd_profile.py:
- maintainers: conloos
- $modules/cloud/memset/:
- maintainers: glitchcrab
- $modules/cloud/misc/cloud_init_data_facts.py:
- maintainers: resmo
- $modules/cloud/misc/proxmox:
- maintainers: $team_virt
- labels: proxmox virt
- keywords: kvm libvirt proxmox qemu
- $modules/cloud/misc/proxmox.py:
- maintainers: UnderGreen
- ignore: skvidal
- $modules/cloud/misc/proxmox_kvm.py:
- maintainers: helldorado
- ignore: skvidal
- $modules/cloud/misc/proxmox_nic.py:
- maintainers: Kogelvis
- $modules/cloud/misc/proxmox_tasks_info:
- maintainers: paginabianca
- $modules/cloud/misc/proxmox_template.py:
- maintainers: UnderGreen
- ignore: skvidal
- $modules/cloud/misc/rhevm.py:
- maintainers: $team_virt TimothyVandenbrande
- labels: rhevm virt
- ignore: skvidal
- keywords: kvm libvirt proxmox qemu
- $modules/cloud/misc/:
- ignore: ryansb
- $modules/cloud/misc/terraform.py:
- maintainers: m-yosefpor rainerleber
- $modules/cloud/misc/xenserver_facts.py:
- maintainers: caphrim007 cheese
- labels: xenserver_facts
- ignore: andyhky
- $modules/cloud/oneandone/:
- maintainers: aajdinov edevenport
- $modules/cloud/online/:
- maintainers: remyleone
- $modules/cloud/opennebula/:
- maintainers: $team_opennebula
- $modules/cloud/opennebula/one_host.py:
- maintainers: rvalle
- $modules/cloud/oracle/oci_vcn.py:
- maintainers: $team_oracle rohitChaware
- $modules/cloud/ovh/:
- maintainers: pascalheraud
- $modules/cloud/ovh/ovh_monthly_billing.py:
- maintainers: fraff
- $modules/cloud/packet/packet_device.py:
- maintainers: baldwinSPC t0mk teebes
- $modules/cloud/packet/:
- maintainers: nurfet-becirevic t0mk
- $modules/cloud/packet/packet_sshkey.py:
- maintainers: t0mk
- $modules/cloud/profitbricks/:
- maintainers: baldwinSPC
- $modules/cloud/pubnub/pubnub_blocks.py:
- maintainers: parfeon pubnub
- $modules/cloud/rackspace/rax.py:
- maintainers: omgjlk sivel
- $modules/cloud/rackspace/:
- ignore: ryansb sivel
- $modules/cloud/rackspace/rax_cbs.py:
- maintainers: claco
- $modules/cloud/rackspace/rax_cbs_attachments.py:
- maintainers: claco
- $modules/cloud/rackspace/rax_cdb.py:
- maintainers: jails
- $modules/cloud/rackspace/rax_cdb_user.py:
- maintainers: jails
- $modules/cloud/rackspace/rax_cdb_database.py:
- maintainers: jails
- $modules/cloud/rackspace/rax_clb.py:
- maintainers: claco
- $modules/cloud/rackspace/rax_clb_nodes.py:
- maintainers: neuroid
- $modules/cloud/rackspace/rax_clb_ssl.py:
- maintainers: smashwilson
- $modules/cloud/rackspace/rax_files.py:
- maintainers: angstwad
- $modules/cloud/rackspace/rax_files_objects.py:
- maintainers: angstwad
- $modules/cloud/rackspace/rax_identity.py:
- maintainers: claco
- $modules/cloud/rackspace/rax_network.py:
- maintainers: claco omgjlk
- $modules/cloud/rackspace/rax_mon_alarm.py:
- maintainers: smashwilson
- $modules/cloud/rackspace/rax_mon_check.py:
- maintainers: smashwilson
- $modules/cloud/rackspace/rax_mon_entity.py:
- maintainers: smashwilson
- $modules/cloud/rackspace/rax_mon_notification.py:
- maintainers: smashwilson
- $modules/cloud/rackspace/rax_mon_notification_plan.py:
- maintainers: smashwilson
- $modules/cloud/rackspace/rax_queue.py:
- maintainers: claco
- $modules/cloud/scaleway/:
- maintainers: $team_scaleway
- $modules/cloud/scaleway/scaleway_database_backup.py:
- maintainers: guillaume_ro_fr
- $modules/cloud/scaleway/scaleway_image_info.py:
- maintainers: Spredzy
- $modules/cloud/scaleway/scaleway_ip_info.py:
- maintainers: Spredzy
- $modules/cloud/scaleway/scaleway_organization_info.py:
- maintainers: Spredzy
- $modules/cloud/scaleway/scaleway_private_network.py:
- maintainers: pastral
- $modules/cloud/scaleway/scaleway_security_group.py:
- maintainers: DenBeke
- $modules/cloud/scaleway/scaleway_security_group_info.py:
- maintainers: Spredzy
- $modules/cloud/scaleway/scaleway_security_group_rule.py:
- maintainers: DenBeke
- $modules/cloud/scaleway/scaleway_server_info.py:
- maintainers: Spredzy
- $modules/cloud/scaleway/scaleway_snapshot_info.py:
- maintainers: Spredzy
- $modules/cloud/scaleway/scaleway_volume.py:
- labels: scaleway_volume
- ignore: hekonsek
- $modules/cloud/scaleway/scaleway_volume_info.py:
- maintainers: Spredzy
- $modules/cloud/smartos/:
- maintainers: $team_solaris
- labels: solaris
- keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool
- $modules/cloud/smartos/nictagadm.py:
- maintainers: SmithX10
- $modules/cloud/softlayer/sl_vm.py:
- maintainers: mcltn
- $modules/cloud/spotinst/spotinst_aws_elastigroup.py:
- maintainers: talzur
- $modules/cloud/univention/:
- maintainers: keachi
- $modules/cloud/webfaction/:
- maintainers: quentinsf
- $modules/cloud/xenserver/:
- maintainers: bvitnik
- $modules/clustering/consul/:
- maintainers: $team_consul
- ignore: colin-nolan
- $modules/clustering/etcd3.py:
- maintainers: evrardjp
- ignore: vfauth
- $modules/clustering/nomad/:
- maintainers: chris93111
- $modules/clustering/pacemaker_cluster.py:
- maintainers: matbu
- $modules/clustering/znode.py:
- maintainers: treyperry
- $modules/database/aerospike/aerospike_migrations.py:
- maintainers: Alb0t
- $modules/database/influxdb/:
- maintainers: kamsz
- $modules/database/influxdb/influxdb_query.py:
- maintainers: resmo
- $modules/database/influxdb/influxdb_user.py:
- maintainers: zhhuta
- $modules/database/influxdb/influxdb_write.py:
- maintainers: resmo
- $modules/database/misc/elasticsearch_plugin.py:
- maintainers: ThePixelDeveloper samdoran
- $modules/database/misc/kibana_plugin.py:
- maintainers: barryib
- $modules/database/misc/odbc.py:
- maintainers: john-westcott-iv
- $modules/database/misc/redis.py:
- maintainers: slok
- $modules/database/misc/redis_info.py:
- maintainers: levonet
- $modules/database/misc/redis_data_info.py:
- maintainers: paginabianca
- $modules/database/misc/redis_data.py:
- maintainers: paginabianca
- $modules/database/misc/redis_data_incr.py:
- maintainers: paginabianca
- $modules/database/misc/riak.py:
- maintainers: drewkerrigan jsmartin
- $modules/database/mssql/mssql_db.py:
- maintainers: vedit Jmainguy kenichi-ogawa-1988
- labels: mssql_db
- $modules/database/mssql/mssql_script.py:
- maintainers: kbudde
- labels: mssql_script
- $modules/database/saphana/hana_query.py:
- maintainers: rainerleber
- $modules/database/vertica/:
- maintainers: dareko
- $modules/files/archive.py:
- maintainers: bendoh
- $modules/files/filesize.py:
- maintainers: quidame
- $modules/files/ini_file.py:
- maintainers: jpmens noseka1
- $modules/files/iso_create.py:
- maintainers: Tomorrow9
- $modules/files/iso_extract.py:
- maintainers: dagwieers jhoekx ribbons
- $modules/files/read_csv.py:
- maintainers: dagwieers
- $modules/files/sapcar_extract.py:
- maintainers: RainerLeber
- $modules/files/xattr.py:
- maintainers: bcoca
- labels: xattr
- $modules/files/xml.py:
- maintainers: dagwieers magnus919 tbielawa cmprescott sm4rk0
- labels: m:xml xml
- ignore: magnus919
- $modules/identity/ipa/:
- maintainers: $team_ipa
- $modules/identity/ipa/ipa_pwpolicy.py:
- maintainers: adralioh
- $modules/identity/ipa/ipa_service.py:
- maintainers: cprh
- $modules/identity/ipa/ipa_vault.py:
- maintainers: jparrill
- $modules/identity/keycloak/:
- maintainers: $team_keycloak
- $modules/identity/keycloak/keycloak_authentication.py:
- maintainers: elfelip Gaetan2907
- $modules/identity/keycloak/keycloak_clientscope.py:
- maintainers: Gaetan2907
- $modules/identity/keycloak/keycloak_client_rolemapping.py:
- maintainers: Gaetan2907
- $modules/identity/keycloak/keycloak_group.py:
- maintainers: adamgoossens
- $modules/identity/keycloak/keycloak_identity_provider.py:
- maintainers: laurpaum
- $modules/identity/keycloak/keycloak_realm_info.py:
- maintainers: fynncfchen
- $modules/identity/keycloak/keycloak_realm.py:
- maintainers: kris2kris
- $modules/identity/keycloak/keycloak_role.py:
- maintainers: laurpaum
- $modules/identity/keycloak/keycloak_user_federation.py:
- maintainers: laurpaum
- $modules/identity/onepassword_info.py:
- maintainers: Rylon
- $modules/identity/opendj/opendj_backendprop.py:
- maintainers: dj-wasabi
- $modules/monitoring/airbrake_deployment.py:
- maintainers: phumpal
- labels: airbrake_deployment
- ignore: bpennypacker
- $modules/monitoring/bigpanda.py:
- maintainers: hkariti
- $modules/monitoring/circonus_annotation.py:
- maintainers: NickatEpic
- $modules/monitoring/datadog/datadog_event.py:
- maintainers: n0ts
- labels: datadog_event
- ignore: arturaz
- $modules/monitoring/datadog/datadog_downtime.py:
- maintainers: Datadog
- $modules/monitoring/datadog/datadog_monitor.py:
- maintainers: skornehl
- $modules/monitoring/honeybadger_deployment.py:
- maintainers: stympy
- $modules/monitoring/icinga2_feature.py:
- maintainers: nerzhul
- $modules/monitoring/icinga2_host.py:
- maintainers: t794104
- $modules/monitoring/librato_annotation.py:
- maintainers: Sedward
- $modules/monitoring/logentries.py:
- labels: logentries
- ignore: ivanvanderbyl
- $modules/monitoring/logstash_plugin.py:
- maintainers: nerzhul
- $modules/monitoring/monit.py:
- maintainers: dstoflet brian-brazil snopoke
- labels: monit
- $modules/monitoring/nagios.py:
- maintainers: tbielawa tgoetheyn
- $modules/monitoring/newrelic_deployment.py:
- maintainers: mcodd
- $modules/monitoring/pagerduty.py:
- maintainers: suprememoocow thaumos
- labels: pagerduty
- ignore: bpennypacker
- $modules/monitoring/pagerduty_alert.py:
- maintainers: ApsOps
- $modules/monitoring/pagerduty_change.py:
- maintainers: adamvaughan
- $modules/monitoring/pagerduty_user.py:
- maintainers: zanssa
- $modules/monitoring/pingdom.py:
- maintainers: thaumos
- $modules/monitoring/rollbar_deployment.py:
- maintainers: kavu
- $modules/monitoring/sensu/sensu_check.py:
- maintainers: andsens
- $modules/monitoring/sensu/:
- maintainers: dmsimard
- $modules/monitoring/sensu/sensu_silence.py:
- maintainers: smbambling
- $modules/monitoring/sensu/sensu_subscription.py:
- maintainers: andsens
- $modules/monitoring/spectrum_device.py:
- maintainers: orgito
- $modules/monitoring/spectrum_model_attrs.py:
- maintainers: tgates81
- $modules/monitoring/stackdriver.py:
- maintainers: bwhaley
- $modules/monitoring/statsd.py:
- maintainers: mamercad
- $modules/monitoring/statusio_maintenance.py:
- maintainers: bhcopeland
- $modules/monitoring/uptimerobot.py:
- maintainers: nate-kingsley
- $modules/net_tools/cloudflare_dns.py:
- maintainers: mgruener
- labels: cloudflare_dns
- $modules/net_tools/dnsimple.py:
- maintainers: drcapulet
- $modules/net_tools/dnsimple_info.py:
- maintainers: edhilgendorf
- $modules/net_tools/dnsmadeeasy.py:
- maintainers: briceburg
- $modules/net_tools/gandi_livedns.py:
- maintainers: gthiemonge
- $modules/net_tools/haproxy.py:
- maintainers: ravibhure Normo
- $modules/net_tools/infinity/infinity.py:
- maintainers: MeganLiu
- $modules/net_tools/ip_netns.py:
- maintainers: bregman-arie
- $modules/net_tools/ipify_facts.py:
- maintainers: resmo
- $modules/net_tools/ipinfoio_facts.py:
- maintainers: akostyuk
- $modules/net_tools/ipwcli_dns.py:
- maintainers: cwollinger
- $modules/net_tools/ldap/ldap_attrs.py:
- maintainers: drybjed jtyr noles
- $modules/net_tools/ldap/ldap_entry.py:
- maintainers: jtyr
- $modules/net_tools/ldap/ldap_passwd.py:
- maintainers: KellerFuchs jtyr
- $modules/net_tools/ldap/ldap_search.py:
- maintainers: eryx12o45 jtyr
- $modules/net_tools/lldp.py:
- labels: lldp
- ignore: andyhky
- $modules/net_tools/netcup_dns.py:
- maintainers: nbuchwitz
- $modules/net_tools/nsupdate.py:
- maintainers: nerzhul
- $modules/net_tools/omapi_host.py:
- maintainers: amasolov nerzhul
- $modules/net_tools/pritunl/:
- maintainers: Lowess
- $modules/net_tools/nmcli.py:
- maintainers: alcamie101
- $modules/net_tools/snmp_facts.py:
- maintainers: ogenstad ujwalkomarla
- $modules/notification/bearychat.py:
- maintainers: tonyseek
- $modules/notification/campfire.py:
- maintainers: fabulops
- $modules/notification/catapult.py:
- maintainers: Jmainguy
- $modules/notification/cisco_webex.py:
- maintainers: drew-russell
- $modules/notification/discord.py:
- maintainers: cwollinger
- $modules/notification/flowdock.py:
- maintainers: mcodd
- $modules/notification/grove.py:
- maintainers: zimbatm
- $modules/notification/hipchat.py:
- maintainers: pb8226 shirou
- $modules/notification/irc.py:
- maintainers: jpmens sivel
- $modules/notification/jabber.py:
- maintainers: bcoca
- $modules/notification/logentries_msg.py:
- maintainers: jcftang
- $modules/notification/mail.py:
- maintainers: dagwieers
- $modules/notification/matrix.py:
- maintainers: jcgruenhage
- $modules/notification/mattermost.py:
- maintainers: bjolivot
- $modules/notification/mqtt.py:
- maintainers: jpmens
- $modules/notification/nexmo.py:
- maintainers: sivel
- $modules/notification/office_365_connector_card.py:
- maintainers: marc-sensenich
- $modules/notification/pushbullet.py:
- maintainers: willybarro
- $modules/notification/pushover.py:
- maintainers: weaselkeeper wopfel
- $modules/notification/rocketchat.py:
- maintainers: Deepakkothandan
- labels: rocketchat
- ignore: ramondelafuente
- $modules/notification/say.py:
- maintainers: $team_ansible_core mpdehaan
- $modules/notification/sendgrid.py:
- maintainers: makaimc
- $modules/notification/slack.py:
- maintainers: ramondelafuente
- $modules/notification/syslogger.py:
- maintainers: garbled1
- $modules/notification/telegram.py:
- maintainers: tyouxa loms lomserman
- $modules/notification/twilio.py:
- maintainers: makaimc
- $modules/notification/typetalk.py:
- maintainers: tksmd
- $modules/packaging/language/ansible_galaxy_install.py:
- maintainers: russoz
- $modules/packaging/language/bower.py:
- maintainers: mwarkentin
- $modules/packaging/language/bundler.py:
- maintainers: thoiberg
- $modules/packaging/language/cargo.py:
- maintainers: radek-sprta
- $modules/packaging/language/composer.py:
- maintainers: dmtrs
- ignore: resmo
- $modules/packaging/language/cpanm.py:
- maintainers: fcuny russoz
- $modules/packaging/language/easy_install.py:
- maintainers: mattupstate
- $modules/packaging/language/gem.py:
- maintainers: $team_ansible_core johanwiren
- labels: gem
- $modules/packaging/language/maven_artifact.py:
- maintainers: tumbl3w33d turb
- labels: maven_artifact
- ignore: chrisisbeef
- $modules/packaging/language/npm.py:
- maintainers: shane-walker xcambar
- labels: npm
- ignore: chrishoffman
- $modules/packaging/language/pear.py:
- labels: pear
- ignore: jle64
- $modules/packaging/language/pip_package_info.py:
- maintainers: bcoca matburt maxamillion
- $modules/packaging/language/pipx.py:
- maintainers: russoz
- $modules/packaging/language/yarn.py:
- maintainers: chrishoffman verkaufer
- $modules/packaging/os/apk.py:
- maintainers: tdtrask
- labels: apk
- ignore: kbrebanov
- $modules/packaging/os/apt_repo.py:
- maintainers: obirvalger
- $modules/packaging/os/apt_rpm.py:
- maintainers: evgkrsk
- $modules/packaging/os/copr.py:
- maintainers: schlupov
- $modules/packaging/os/dnf_versionlock.py:
- maintainers: moreda
- $modules/packaging/os/flatpak.py:
- maintainers: $team_flatpak
- $modules/packaging/os/flatpak_remote.py:
- maintainers: $team_flatpak
- $modules/packaging/os/pkg5:
- maintainers: $team_solaris mavit
- labels: pkg5 solaris
- keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool
- $modules/packaging/os/homebrew.py:
- notify: chris-short
- maintainers: $team_macos andrew-d
- labels: homebrew macos
- ignore: ryansb
- keywords: brew cask darwin homebrew macosx macports osx
- $modules/packaging/os/homebrew_cask.py:
- notify: chris-short
- maintainers: $team_macos enriclluelles
- labels: homebrew_ macos
- ignore: ryansb
- keywords: brew cask darwin homebrew macosx macports osx
- $modules/packaging/os/homebrew_tap.py:
- notify: chris-short
- maintainers: $team_macos
- labels: homebrew_ macos
- ignore: ryansb
- keywords: brew cask darwin homebrew macosx macports osx
- $modules/packaging/os/installp.py:
- maintainers: $team_aix kairoaraujo
- labels: aix installp
- keywords: aix efix lpar wpar
- $modules/packaging/os/layman.py:
- maintainers: jirutka
- $modules/packaging/os/macports.py:
- notify: chris-short
- maintainers: $team_macos jcftang
- labels: macos macports
- ignore: ryansb
- keywords: brew cask darwin homebrew macosx macports osx
- $modules/packaging/os/mas.py:
- maintainers: lukasbestle mheap
- $modules/packaging/os/openbsd_pkg.py:
- maintainers: $team_bsd eest
- labels: bsd openbsd_pkg
- ignore: ryansb
- keywords: doas dragonfly freebsd iocage jail netbsd openbsd opnsense pfsense
- $modules/packaging/os/opkg.py:
- maintainers: skinp
- $modules/packaging/os/pacman.py:
- maintainers: elasticdog indrajitr tchernomax jraby
- labels: pacman
- ignore: elasticdog
- $modules/packaging/os/pacman_key.py:
- maintainers: grawlinson
- labels: pacman
- $modules/packaging/os/pkgin.py:
- maintainers: $team_solaris L2G jasperla szinck martinm82
- labels: pkgin solaris
- $modules/packaging/os/pkgng.py:
- maintainers: $team_bsd bleader
- labels: bsd pkgng
- ignore: bleader
- keywords: doas dragonfly freebsd iocage jail netbsd openbsd opnsense pfsense
- $modules/packaging/os/pkgutil.py:
- maintainers: $team_solaris dermute
- labels: pkgutil solaris
- $modules/packaging/os/portage.py:
- maintainers: Tatsh wltjr
- labels: portage
- ignore: sayap
- $modules/packaging/os/portinstall.py:
- maintainers: $team_bsd berenddeboer
- labels: bsd portinstall
- ignore: ryansb
- keywords: doas dragonfly freebsd iocage jail netbsd openbsd opnsense pfsense
- $modules/packaging/os/pulp_repo.py:
- maintainers: sysadmind
- $modules/packaging/os/redhat_subscription.py:
- maintainers: barnabycourt alikins kahowell
- labels: redhat_subscription
- $modules/packaging/os/rhn_channel.py:
- maintainers: vincentvdk alikins $team_rhn
- labels: rhn_channel
- $modules/packaging/os/rhn_register.py:
- maintainers: jlaska $team_rhn
- labels: rhn_register
- $modules/packaging/os/rhsm_release.py:
- maintainers: seandst
- $modules/packaging/os/rhsm_repository.py:
- maintainers: giovannisciortino
- $modules/packaging/os/rpm_ostree_pkg.py:
- maintainers: dustymabe Akasurde
- $modules/packaging/os/slackpkg.py:
- maintainers: KimNorgaard
- $modules/packaging/os/snap.py:
- maintainers: angristan vcarceler
- labels: snap
- $modules/packaging/os/snap_alias.py:
- maintainers: russoz
- labels: snap
- $modules/packaging/os/sorcery.py:
- maintainers: vaygr
- $modules/packaging/os/svr4pkg.py:
- maintainers: $team_solaris brontitall
- labels: solaris svr4pkg
- $modules/packaging/os/swdepot.py:
- maintainers: $team_hpux melodous
- labels: hpux swdepot
- keywords: hp-ux
- $modules/packaging/os/swupd.py:
- maintainers: hnanni albertomurillo
- labels: swupd
- $modules/packaging/os/urpmi.py:
- maintainers: pmakowski
- $modules/packaging/os/xbps.py:
- maintainers: dinoocch the-maldridge
- $modules/packaging/os/yum_versionlock.py:
- maintainers: florianpaulhoberg aminvakil
- $modules/packaging/os/zypper.py:
- maintainers: $team_suse
- labels: zypper
- ignore: dirtyharrycallahan robinro
- $modules/packaging/os/zypper_repository.py:
- maintainers: $team_suse
- labels: zypper
- ignore: matze
- $modules/remote_management/cobbler/:
- maintainers: dagwieers
- $modules/remote_management/hpilo/:
- maintainers: haad
- ignore: dagwieers
- $modules/remote_management/imc/imc_rest.py:
- maintainers: dagwieers
- labels: cisco
- $modules/remote_management/ipmi/:
- maintainers: bgaifullin cloudnull
- $modules/remote_management/lenovoxcc/:
- maintainers: panyy3 renxulei
- $modules/remote_management/lxca/:
- maintainers: navalkp prabhosa
- $modules/remote_management/manageiq/:
- labels: manageiq
- maintainers: $team_manageiq
- $modules/remote_management/manageiq/manageiq_alert_profiles.py:
- maintainers: elad661
- $modules/remote_management/manageiq/manageiq_alerts.py:
- maintainers: elad661
- $modules/remote_management/manageiq/manageiq_group.py:
- maintainers: evertmulder
- $modules/remote_management/manageiq/manageiq_tenant.py:
- maintainers: evertmulder
- $modules/remote_management/oneview/:
- maintainers: adriane-cardozo fgbulsoni tmiotto
- $modules/remote_management/oneview/oneview_datacenter_info.py:
- maintainers: aalexmonteiro madhav-bharadwaj ricardogpsf soodpr
- $modules/remote_management/oneview/oneview_fc_network.py:
- maintainers: fgbulsoni
- $modules/remote_management/oneview/oneview_fcoe_network.py:
- maintainers: fgbulsoni
- $modules/remote_management/redfish/:
- maintainers: $team_redfish
- ignore: jose-delarosa
- $modules/remote_management/stacki/stacki_host.py:
- maintainers: bsanders bbyhuy
- labels: stacki_host
- $modules/remote_management/wakeonlan.py:
- maintainers: dagwieers
- $modules/source_control/bitbucket/:
- maintainers: catcombo
- $modules/source_control/bzr.py:
- maintainers: andreparames
- $modules/source_control/git_config.py:
- maintainers: djmattyg007 mgedmin
- $modules/source_control/github/github_deploy_key.py:
- maintainers: bincyber
- $modules/source_control/github/github_issue.py:
- maintainers: Akasurde
- $modules/source_control/github/github_key.py:
- maintainers: erydo
- labels: github_key
- ignore: erydo
- $modules/source_control/github/github_release.py:
- maintainers: adrianmoisey
- $modules/source_control/github/github_repo.py:
- maintainers: atorrescogollo
- $modules/source_control/github/:
- maintainers: stpierre
- $modules/source_control/gitlab/:
- notify: jlozadad
- maintainers: $team_gitlab
- keywords: gitlab source_control
- $modules/source_control/gitlab/gitlab_project_variable.py:
- maintainers: markuman
- $modules/source_control/gitlab/gitlab_runner.py:
- maintainers: SamyCoenen
- $modules/source_control/gitlab/gitlab_user.py:
- maintainers: LennertMertens stgrace
- $modules/source_control/gitlab/gitlab_branch.py:
- maintainers: paytroff
- $modules/source_control/hg.py:
- maintainers: yeukhon
- $modules/storage/emc/emc_vnx_sg_member.py:
- maintainers: remixtj
- $modules/storage/hpe3par/ss_3par_cpg.py:
- maintainers: farhan7500 gautamphegde
- $modules/storage/ibm/:
- maintainers: tzure
- $modules/storage/pmem/pmem.py:
- maintainers: mizumm
- $modules/storage/vexata/:
- maintainers: vexata
- $modules/storage/zfs/:
- maintainers: $team_solaris
- labels: solaris
- keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool
- $modules/storage/zfs/zfs.py:
- maintainers: johanwiren
- $modules/storage/zfs/zfs_delegate_admin.py:
- maintainers: natefoo
- $modules/system/aix:
- maintainers: $team_aix
- labels: aix
- keywords: aix efix lpar wpar
- $modules/system/alternatives.py:
- maintainers: mulby
- labels: alternatives
- ignore: DavidWittman
- $modules/system/aix_lvol.py:
- maintainers: adejoux
- $modules/system/awall.py:
- maintainers: tdtrask
- $modules/system/beadm.py:
- maintainers: $team_solaris
- labels: beadm solaris
- keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool
- $modules/system/capabilities.py:
- maintainers: natefoo
- $modules/system/cronvar.py:
- maintainers: dougluce
- $modules/system/crypttab.py:
- maintainers: groks
- $modules/system/dconf.py:
- maintainers: azaghal
- $modules/system/dpkg_divert.py:
- maintainers: quidame
- $modules/system/facter.py:
- maintainers: $team_ansible_core gamethis
- labels: facter
- $modules/system/filesystem.py:
- maintainers: pilou- abulimov quidame
- labels: filesystem
- $modules/system/gconftool2.py:
- maintainers: Akasurde kevensen
- labels: gconftool2
- $modules/system/homectl.py:
- maintainers: jameslivulpi
- $modules/system/interfaces_file.py:
- maintainers: obourdon hryamzik
- labels: interfaces_file
- $modules/system/iptables_state.py:
- maintainers: quidame
- $modules/system/shutdown.py:
- maintainers: nitzmahone samdoran aminvakil
- $modules/system/java_cert.py:
- maintainers: haad absynth76
- $modules/system/java_keystore.py:
- maintainers: Mogztter quidame
- $modules/system/kernel_blacklist.py:
- maintainers: matze
- $modules/system/launchd.py:
- maintainers: martinm82
- $modules/system/lbu.py:
- maintainers: kunkku
- $modules/system/listen_ports_facts.py:
- maintainers: ndavison
- $modules/system/locale_gen.py:
- maintainers: AugustusKling
- $modules/system/lvg.py:
- maintainers: abulimov
- $modules/system/lvol.py:
- maintainers: abulimov jhoekx zigaSRC unkaputtbar112
- $modules/system/make.py:
- maintainers: LinusU
- $modules/system/mksysb.py:
- maintainers: $team_aix
- labels: aix mksysb
- $modules/system/modprobe.py:
- maintainers: jdauphant mattjeffery
- labels: modprobe
- ignore: stygstra
- $modules/system/nosh.py:
- maintainers: tacatac
- $modules/system/ohai.py:
- maintainers: $team_ansible_core mpdehaan
- labels: ohai
- $modules/system/open_iscsi.py:
- maintainers: srvg
- $modules/system/openwrt_init.py:
- maintainers: agaffney
- $modules/system/osx_defaults.py:
- notify: chris-short
- maintainers: $team_macos notok
- labels: macos osx_defaults
- keywords: brew cask darwin homebrew macosx macports osx
- $modules/system/pam_limits.py:
- maintainers: giovannisciortino
- labels: pam_limits
- ignore: usawa
- $modules/system/pamd.py:
- maintainers: kevensen
- $modules/system/parted.py:
- maintainers: ColOfAbRiX rosowiecki jake2184
- $modules/system/pids.py:
- maintainers: saranyasridharan
- $modules/system/puppet.py:
- maintainers: nibalizer emonty
- labels: puppet
- $modules/system/python_requirements_info.py:
- maintainers: willthames
- ignore: ryansb
- $modules/system/runit.py:
- maintainers: jsumners
- $modules/system/sap_task_list_execute:
- maintainers: rainerleber
- $modules/system/sefcontext.py:
- maintainers: dagwieers
- $modules/system/selinux_permissive.py:
- maintainers: mscherer
- $modules/system/selogin.py:
- maintainers: bachradsusi dankeder jamescassell
- $modules/system/seport.py:
- maintainers: dankeder
- $modules/system/solaris_zone.py:
- maintainers: $team_solaris pmarkham
- labels: solaris
- keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool
- $modules/system/ssh_config.py:
- maintainers: gaqzi Akasurde
- $modules/system/sudoers.py:
- maintainers: JonEllis
- $modules/system/svc.py:
- maintainers: bcoca
- $modules/system/syspatch.py:
- maintainers: precurse
- $modules/system/sysrc.py:
- maintainers: dlundgren
- $modules/system/sysupgrade.py:
- maintainers: precurse
- $modules/system/timezone.py:
- maintainers: indrajitr jasperla tmshn
- $modules/system/ufw.py:
- notify: felixfontein
- maintainers: ahtik ovcharenko pyykkis
- labels: ufw
- $modules/system/vdo.py:
- maintainers: rhawalsh bgurney-rh
- $modules/system/xfconf.py:
- maintainers: russoz jbenden
- labels: xfconf
- $modules/system/xfconf_info.py:
- maintainers: russoz
- labels: xfconf
- $modules/system/xfs_quota.py:
- maintainers: bushvin
- $modules/web_infrastructure/apache2_mod_proxy.py:
- maintainers: oboukili
- $modules/web_infrastructure/apache2_module.py:
- maintainers: berendt n0trax
- ignore: robinro
- $modules/web_infrastructure/deploy_helper.py:
- maintainers: ramondelafuente
- $modules/web_infrastructure/django_manage.py:
- maintainers: russoz
- ignore: scottanderson42 tastychutney
- labels: django_manage
- $modules/web_infrastructure/ejabberd_user.py:
- maintainers: privateip
- $modules/web_infrastructure/gunicorn.py:
- maintainers: agmezr
- $modules/web_infrastructure/htpasswd.py:
- maintainers: $team_ansible_core
- labels: htpasswd
- $modules/web_infrastructure/jboss.py:
- maintainers: $team_jboss jhoekx
- labels: jboss
- $modules/web_infrastructure/jenkins_build.py:
- maintainers: brettmilford unnecessary-username
- $modules/web_infrastructure/jenkins_job.py:
- maintainers: sermilrod
- $modules/web_infrastructure/jenkins_job_info.py:
- maintainers: stpierre
- $modules/web_infrastructure/jenkins_plugin.py:
- maintainers: jtyr
- $modules/web_infrastructure/jenkins_script.py:
- maintainers: hogarthj
- $modules/web_infrastructure/jira.py:
- maintainers: Slezhuk tarka pertoft
- ignore: DWSR
- labels: jira
- $modules/web_infrastructure/nginx_status_info.py:
- maintainers: resmo
- $modules/web_infrastructure/rundeck_acl_policy.py:
- maintainers: nerzhul
- $modules/web_infrastructure/rundeck_project.py:
- maintainers: nerzhul
- $modules/web_infrastructure/rundeck_job_run.py:
- maintainers: phsmith
- $modules/web_infrastructure/rundeck_job_executions_info.py:
- maintainers: phsmith
- $modules/web_infrastructure/sophos_utm/:
- maintainers: $team_e_spirit
- keywords: sophos utm
- $modules/web_infrastructure/sophos_utm/utm_proxy_auth_profile.py:
- maintainers: $team_e_spirit stearz
- keywords: sophos utm
- $modules/web_infrastructure/sophos_utm/utm_proxy_exception.py:
- maintainers: $team_e_spirit RickS-C137
- keywords: sophos utm
- $modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert.py:
- maintainers: stearz
- $modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert_info.py:
- maintainers: stearz
- $modules/web_infrastructure/sophos_utm/utm_network_interface_address.py:
- maintainers: steamx
- $modules/web_infrastructure/sophos_utm/utm_network_interface_address_info.py:
- maintainers: steamx
- $modules/web_infrastructure/supervisorctl.py:
- maintainers: inetfuture mattupstate
- $modules/web_infrastructure/taiga_issue.py:
- maintainers: lekum
- $tests/a_module.py:
- maintainers: felixfontein
-#########################
- tests/:
- labels: tests
- tests/unit/:
- labels: unit
- support: community
- tests/integration:
- labels: integration
- support: community
- tests/utils/:
- maintainers: gundalow
- labels: unit
-macros:
- actions: plugins/action
- becomes: plugins/become
- caches: plugins/cache
- callbacks: plugins/callback
- cliconfs: plugins/cliconf
- connections: plugins/connection
- doc_fragments: plugins/doc_fragments
- filters: plugins/filter
- inventories: plugins/inventory
- lookups: plugins/lookup
- module_utils: plugins/module_utils
- modules: plugins/modules
- terminals: plugins/terminal
- tests: plugins/test
- team_ansible_core:
- team_aix: MorrisA bcoca d-little flynn1973 gforster kairoaraujo marvin-sinister mator molekuul ramooncamacho wtcross
- team_bsd: JoergFiedler MacLemon bcoca dch jasperla mekanix opoplawski overhacked tuxillo
- team_consul: sgargan
- team_cyberark_conjur: jvanderhoof ryanprior
- team_e_spirit: MatrixCrawler getjack
- team_flatpak: JayKayy oolongbrothers
- team_gitlab: Lunik Shaps dj-wasabi marwatk waheedi zanssa scodeman metanovii sh0shin nejch lgatellier suukit
- team_hpux: bcoca davx8342
- team_huawei: QijunPan TommyLike edisonxiang freesky-edward hwDCN niuzhenguo xuxiaowei0512 yanzhangi zengchen1024 zhongjun2
- team_ipa: Akasurde Nosmoht fxfitz justchris1
- team_jboss: Wolfant jairojunior wbrefvem
- team_keycloak: eikef ndclt
- team_linode: InTheCloudDan decentral1se displague rmcintosh Charliekenney23 LBGarber
- team_macos: Akasurde kyleabenson martinm82 danieljaouen indrajitr
- team_manageiq: abellotti cben gtanzillo yaacov zgalor dkorn evertmulder
- team_networking: NilashishC Qalthos danielmellado ganeshrn justjais trishnaguha sganesh-infoblox privateip
- team_opennebula: ilicmilan meerkampdvv rsmontero xorel nilsding
- team_oracle: manojmeda mross22 nalsaber
- team_purestorage: bannaych dnix101 genegr lionmax opslounge raekins sdodsley sile16
- team_redfish: mraineri tomasg2012 xmadsen renxulei rajeevkallur bhavya06
- team_rhn: FlossWare alikins barnabycourt vritant
- team_scaleway: remyleone abarbare
- team_solaris: bcoca fishman jasperla jpdasma mator scathatheworm troy2914 xen0l
- team_suse: commel dcermak evrardjp lrupp toabctl AnderEnder alxgu andytom sealor
- team_virt: joshainglis karmab tleguern Thulium-Drake Ajpantuso
diff --git a/ansible_collections/community/general/.github/ISSUE_TEMPLATE/config.yml b/ansible_collections/community/general/.github/ISSUE_TEMPLATE/config.yml
deleted file mode 100644
index f90bd1ad..00000000
--- a/ansible_collections/community/general/.github/ISSUE_TEMPLATE/config.yml
+++ /dev/null
@@ -1,27 +0,0 @@
----
-# Ref: https://help.github.com/en/github/building-a-strong-community/configuring-issue-templates-for-your-repository#configuring-the-template-chooser
-blank_issues_enabled: false # default: true
-contact_links:
-- name: Security bug report
- url: https://docs.ansible.com/ansible-core/devel/community/reporting_bugs_and_features.html?utm_medium=github&utm_source=issue_template_chooser_ansible_collections
- about: |
- Please learn how to report security vulnerabilities here.
-
- For all security related bugs, email security@ansible.com
- instead of using this issue tracker and you will receive
- a prompt response.
-
- For more information, see
- https://docs.ansible.com/ansible/latest/community/reporting_bugs_and_features.html
-- name: Ansible Code of Conduct
- url: https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_template_chooser_ansible_collections
- about: Be nice to other members of the community.
-- name: Talks to the community
- url: https://docs.ansible.com/ansible/latest/community/communication.html?utm_medium=github&utm_source=issue_template_chooser#mailing-list-information
- about: Please ask and answer usage questions here
-- name: Working groups
- url: https://github.com/ansible/community/wiki
- about: Interested in improving a specific area? Become a part of a working group!
-- name: For Enterprise
- url: https://www.ansible.com/products/engine?utm_medium=github&utm_source=issue_template_chooser_ansible_collections
- about: Red Hat offers support for the Ansible Automation Platform
diff --git a/ansible_collections/community/general/.github/dependabot.yml b/ansible_collections/community/general/.github/dependabot.yml
deleted file mode 100644
index 1cd41305..00000000
--- a/ansible_collections/community/general/.github/dependabot.yml
+++ /dev/null
@@ -1,6 +0,0 @@
-version: 2
-updates:
- - package-ecosystem: "github-actions"
- directory: "/"
- interval:
- schedule: "weekly"
diff --git a/ansible_collections/community/general/.github/patchback.yml b/ansible_collections/community/general/.github/patchback.yml
deleted file mode 100644
index 33ad6e84..00000000
--- a/ansible_collections/community/general/.github/patchback.yml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-backport_branch_prefix: patchback/backports/
-backport_label_prefix: backport-
-target_branch_prefix: stable-
-...
diff --git a/ansible_collections/community/general/.github/settings.yml b/ansible_collections/community/general/.github/settings.yml
deleted file mode 100644
index 8a5b8d32..00000000
--- a/ansible_collections/community/general/.github/settings.yml
+++ /dev/null
@@ -1,6 +0,0 @@
-# DO NOT MODIFY
-
-# Settings: https://probot.github.io/apps/settings/
-# Pull settings from https://github.com/ansible-collections/.github/blob/master/.github/settings.yml
-
-_extends: ".github"
diff --git a/ansible_collections/community/general/.github/workflows/codeql-analysis.yml b/ansible_collections/community/general/.github/workflows/codeql-analysis.yml
deleted file mode 100644
index 81884ac4..00000000
--- a/ansible_collections/community/general/.github/workflows/codeql-analysis.yml
+++ /dev/null
@@ -1,49 +0,0 @@
-name: "Code scanning - action"
-
-on:
- schedule:
- - cron: '26 19 * * 1'
-
-jobs:
- CodeQL-Build:
-
- runs-on: ubuntu-latest
-
- steps:
- - name: Checkout repository
- uses: actions/checkout@v2
- with:
- # We must fetch at least the immediate parents so that if this is
- # a pull request then we can checkout the head.
- fetch-depth: 2
-
- # If this run was triggered by a pull request event, then checkout
- # the head of the pull request instead of the merge commit.
- - run: git checkout HEAD^2
- if: ${{ github.event_name == 'pull_request' }}
-
- # Initializes the CodeQL tools for scanning.
- - name: Initialize CodeQL
- uses: github/codeql-action/init@v1
- # Override language selection by uncommenting this and choosing your languages
- # with:
- # languages: go, javascript, csharp, python, cpp, java
-
- # Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
- # If this step fails, then you should remove it and run the build manually (see below)
- - name: Autobuild
- uses: github/codeql-action/autobuild@v1
-
- # ℹ️ Command-line programs to run using the OS shell.
- # 📚 https://git.io/JvXDl
-
- # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
- # and modify them (or add more) to build your code if your project
- # uses a compiled language
-
- #- run: |
- # make bootstrap
- # make release
-
- - name: Perform CodeQL Analysis
- uses: github/codeql-action/analyze@v1
diff --git a/ansible_collections/community/general/.gitignore b/ansible_collections/community/general/.gitignore
deleted file mode 100644
index c6c78b42..00000000
--- a/ansible_collections/community/general/.gitignore
+++ /dev/null
@@ -1,446 +0,0 @@
-
-# Created by https://www.toptal.com/developers/gitignore/api/git,linux,pydev,python,windows,pycharm+all,jupyternotebook,vim,webstorm,emacs,dotenv
-# Edit at https://www.toptal.com/developers/gitignore?templates=git,linux,pydev,python,windows,pycharm+all,jupyternotebook,vim,webstorm,emacs,dotenv
-
-### dotenv ###
-.env
-
-### Emacs ###
-# -*- mode: gitignore; -*-
-*~
-\#*\#
-/.emacs.desktop
-/.emacs.desktop.lock
-*.elc
-auto-save-list
-tramp
-.\#*
-
-# Org-mode
-.org-id-locations
-*_archive
-
-# flymake-mode
-*_flymake.*
-
-# eshell files
-/eshell/history
-/eshell/lastdir
-
-# elpa packages
-/elpa/
-
-# reftex files
-*.rel
-
-# AUCTeX auto folder
-/auto/
-
-# cask packages
-.cask/
-dist/
-
-# Flycheck
-flycheck_*.el
-
-# server auth directory
-/server/
-
-# projectiles files
-.projectile
-
-# directory configuration
-.dir-locals.el
-
-# network security
-/network-security.data
-
-
-### Git ###
-# Created by git for backups. To disable backups in Git:
-# $ git config --global mergetool.keepBackup false
-*.orig
-
-# Created by git when using merge tools for conflicts
-*.BACKUP.*
-*.BASE.*
-*.LOCAL.*
-*.REMOTE.*
-*_BACKUP_*.txt
-*_BASE_*.txt
-*_LOCAL_*.txt
-*_REMOTE_*.txt
-
-#!! ERROR: jupyternotebook is undefined. Use list command to see defined gitignore types !!#
-
-### Linux ###
-
-# temporary files which can be created if a process still has a handle open of a deleted file
-.fuse_hidden*
-
-# KDE directory preferences
-.directory
-
-# Linux trash folder which might appear on any partition or disk
-.Trash-*
-
-# .nfs files are created when an open file is removed but is still being accessed
-.nfs*
-
-### PyCharm+all ###
-# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider
-# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
-
-# User-specific stuff
-.idea/**/workspace.xml
-.idea/**/tasks.xml
-.idea/**/usage.statistics.xml
-.idea/**/dictionaries
-.idea/**/shelf
-
-# AWS User-specific
-.idea/**/aws.xml
-
-# Generated files
-.idea/**/contentModel.xml
-
-# Sensitive or high-churn files
-.idea/**/dataSources/
-.idea/**/dataSources.ids
-.idea/**/dataSources.local.xml
-.idea/**/sqlDataSources.xml
-.idea/**/dynamic.xml
-.idea/**/uiDesigner.xml
-.idea/**/dbnavigator.xml
-
-# Gradle
-.idea/**/gradle.xml
-.idea/**/libraries
-
-# Gradle and Maven with auto-import
-# When using Gradle or Maven with auto-import, you should exclude module files,
-# since they will be recreated, and may cause churn. Uncomment if using
-# auto-import.
-# .idea/artifacts
-# .idea/compiler.xml
-# .idea/jarRepositories.xml
-# .idea/modules.xml
-# .idea/*.iml
-# .idea/modules
-# *.iml
-# *.ipr
-
-# CMake
-cmake-build-*/
-
-# Mongo Explorer plugin
-.idea/**/mongoSettings.xml
-
-# File-based project format
-*.iws
-
-# IntelliJ
-out/
-
-# mpeltonen/sbt-idea plugin
-.idea_modules/
-
-# JIRA plugin
-atlassian-ide-plugin.xml
-
-# Cursive Clojure plugin
-.idea/replstate.xml
-
-# Crashlytics plugin (for Android Studio and IntelliJ)
-com_crashlytics_export_strings.xml
-crashlytics.properties
-crashlytics-build.properties
-fabric.properties
-
-# Editor-based Rest Client
-.idea/httpRequests
-
-# Android studio 3.1+ serialized cache file
-.idea/caches/build_file_checksums.ser
-
-### PyCharm+all Patch ###
-# Ignores the whole .idea folder and all .iml files
-# See https://github.com/joeblau/gitignore.io/issues/186 and https://github.com/joeblau/gitignore.io/issues/360
-
-.idea/
-
-# Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-249601023
-
-*.iml
-modules.xml
-.idea/misc.xml
-*.ipr
-
-# Sonarlint plugin
-.idea/sonarlint
-
-### pydev ###
-.pydevproject
-
-### Python ###
-# Byte-compiled / optimized / DLL files
-__pycache__/
-*.py[cod]
-*$py.class
-
-# C extensions
-*.so
-
-# Distribution / packaging
-.Python
-build/
-develop-eggs/
-downloads/
-eggs/
-.eggs/
-lib/
-lib64/
-parts/
-sdist/
-var/
-wheels/
-share/python-wheels/
-*.egg-info/
-.installed.cfg
-*.egg
-MANIFEST
-
-# PyInstaller
-# Usually these files are written by a python script from a template
-# before PyInstaller builds the exe, so as to inject date/other infos into it.
-*.manifest
-*.spec
-
-# Installer logs
-pip-log.txt
-pip-delete-this-directory.txt
-
-# Unit test / coverage reports
-htmlcov/
-.tox/
-.nox/
-.coverage
-.coverage.*
-.cache
-nosetests.xml
-coverage.xml
-*.cover
-*.py,cover
-.hypothesis/
-.pytest_cache/
-cover/
-
-# Translations
-*.mo
-*.pot
-
-# Django stuff:
-*.log
-local_settings.py
-db.sqlite3
-db.sqlite3-journal
-
-# Flask stuff:
-instance/
-.webassets-cache
-
-# Scrapy stuff:
-.scrapy
-
-# Sphinx documentation
-docs/_build/
-
-# PyBuilder
-.pybuilder/
-target/
-
-# Jupyter Notebook
-.ipynb_checkpoints
-
-# IPython
-profile_default/
-ipython_config.py
-
-# pyenv
-# For a library or package, you might want to ignore these files since the code is
-# intended to run in multiple environments; otherwise, check them in:
-.python-version
-
-# pipenv
-# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
-# However, in case of collaboration, if having platform-specific dependencies or dependencies
-# having no cross-platform support, pipenv may install dependencies that don't work, or not
-# install all needed dependencies.
-#Pipfile.lock
-
-# PEP 582; used by e.g. github.com/David-OConnor/pyflow
-__pypackages__/
-
-# Celery stuff
-celerybeat-schedule
-celerybeat.pid
-
-# SageMath parsed files
-*.sage.py
-
-# Environments
-.venv
-env/
-venv/
-ENV/
-env.bak/
-venv.bak/
-
-# Spyder project settings
-.spyderproject
-.spyproject
-
-# Rope project settings
-.ropeproject
-
-# mkdocs documentation
-/site
-
-# mypy
-.mypy_cache/
-.dmypy.json
-dmypy.json
-
-# Pyre type checker
-.pyre/
-
-# pytype static type analyzer
-.pytype/
-
-# Cython debug symbols
-cython_debug/
-
-### Vim ###
-# Swap
-[._]*.s[a-v][a-z]
-!*.svg # comment out if you don't need vector files
-[._]*.sw[a-p]
-[._]s[a-rt-v][a-z]
-[._]ss[a-gi-z]
-[._]sw[a-p]
-
-# Session
-Session.vim
-Sessionx.vim
-
-# Temporary
-.netrwhist
-# Auto-generated tag files
-tags
-# Persistent undo
-[._]*.un~
-
-### WebStorm ###
-# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider
-# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
-
-# User-specific stuff
-
-# AWS User-specific
-
-# Generated files
-
-# Sensitive or high-churn files
-
-# Gradle
-
-# Gradle and Maven with auto-import
-# When using Gradle or Maven with auto-import, you should exclude module files,
-# since they will be recreated, and may cause churn. Uncomment if using
-# auto-import.
-# .idea/artifacts
-# .idea/compiler.xml
-# .idea/jarRepositories.xml
-# .idea/modules.xml
-# .idea/*.iml
-# .idea/modules
-# *.iml
-# *.ipr
-
-# CMake
-
-# Mongo Explorer plugin
-
-# File-based project format
-
-# IntelliJ
-
-# mpeltonen/sbt-idea plugin
-
-# JIRA plugin
-
-# Cursive Clojure plugin
-
-# Crashlytics plugin (for Android Studio and IntelliJ)
-
-# Editor-based Rest Client
-
-# Android studio 3.1+ serialized cache file
-
-### WebStorm Patch ###
-# Comment Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-215987721
-
-# *.iml
-# modules.xml
-# .idea/misc.xml
-# *.ipr
-
-# Sonarlint plugin
-# https://plugins.jetbrains.com/plugin/7973-sonarlint
-.idea/**/sonarlint/
-
-# SonarQube Plugin
-# https://plugins.jetbrains.com/plugin/7238-sonarqube-community-plugin
-.idea/**/sonarIssues.xml
-
-# Markdown Navigator plugin
-# https://plugins.jetbrains.com/plugin/7896-markdown-navigator-enhanced
-.idea/**/markdown-navigator.xml
-.idea/**/markdown-navigator-enh.xml
-.idea/**/markdown-navigator/
-
-# Cache file creation bug
-# See https://youtrack.jetbrains.com/issue/JBR-2257
-.idea/$CACHE_FILE$
-
-# CodeStream plugin
-# https://plugins.jetbrains.com/plugin/12206-codestream
-.idea/codestream.xml
-
-### Windows ###
-# Windows thumbnail cache files
-Thumbs.db
-Thumbs.db:encryptable
-ehthumbs.db
-ehthumbs_vista.db
-
-# Dump file
-*.stackdump
-
-# Folder config file
-[Dd]esktop.ini
-
-# Recycle Bin used on file shares
-$RECYCLE.BIN/
-
-# Windows Installer files
-*.cab
-*.msi
-*.msix
-*.msm
-*.msp
-
-# Windows shortcuts
-*.lnk
-
-# End of https://www.toptal.com/developers/gitignore/api/git,linux,pydev,python,windows,pycharm+all,jupyternotebook,vim,webstorm,emacs,dotenv
diff --git a/ansible_collections/community/general/CHANGELOG.rst b/ansible_collections/community/general/CHANGELOG.rst
deleted file mode 100644
index 78a59b7f..00000000
--- a/ansible_collections/community/general/CHANGELOG.rst
+++ /dev/null
@@ -1,929 +0,0 @@
-===============================
-Community General Release Notes
-===============================
-
-.. contents:: Topics
-
-This changelog describes changes after version 3.0.0.
-
-v4.6.1
-======
-
-Release Summary
----------------
-
-Extraordinary bugfix release to fix a breaking change in ``terraform``.
-
-Bugfixes
---------
-
-- lxd inventory plugin - do not crash if OS and release metadata are not present
- (https://github.com/ansible-collections/community.general/pull/4351).
-- terraform - revert bugfix https://github.com/ansible-collections/community.general/pull/4281 that tried to fix ``variable`` handling to allow complex values. It turned out that this was breaking several valid use-cases (https://github.com/ansible-collections/community.general/issues/4367, https://github.com/ansible-collections/community.general/pull/4370).
-
-v4.6.0
-======
-
-Release Summary
----------------
-
-Regular feature and bugfix release.
-
-Minor Changes
--------------
-
-- jira - when creating a comment, ``fields`` now is used for additional data (https://github.com/ansible-collections/community.general/pull/4304).
-- ldap_entry - add support for recursive deletion (https://github.com/ansible-collections/community.general/issues/3613).
-- mksysb - revamped the module using ``ModuleHelper`` (https://github.com/ansible-collections/community.general/pull/3295).
-- nmcli - add missing connection aliases ``802-3-ethernet`` and ``802-11-wireless`` (https://github.com/ansible-collections/community.general/pull/4108).
-- nmcli - remove nmcli modify dependency on ``type`` parameter (https://github.com/ansible-collections/community.general/issues/2858).
-- npm - add ability to use ``production`` flag when ``ci`` is set (https://github.com/ansible-collections/community.general/pull/4299).
-- pacman - add ``remove_nosave`` parameter to avoid saving modified configuration files as ``.pacsave`` files. (https://github.com/ansible-collections/community.general/pull/4316, https://github.com/ansible-collections/community.general/issues/4315).
-- pacman - now implements proper change detection for ``update_cache=true``. Adds ``cache_updated`` return value to when ``update_cache=true`` to report this result independently of the module's overall changed return value (https://github.com/ansible-collections/community.general/pull/4337).
-- pipx - added options ``editable`` and ``pip_args`` (https://github.com/ansible-collections/community.general/issues/4300).
-- proxmox inventory plugin - add support for client-side jinja filters (https://github.com/ansible-collections/community.general/issues/3553).
-- redis - add authentication parameters ``login_user``, ``tls``, ``validate_certs``, and ``ca_certs`` (https://github.com/ansible-collections/community.general/pull/4207).
-- syslog_json - add option to skip logging of ``gather_facts`` playbook tasks; use v2 callback API (https://github.com/ansible-collections/community.general/pull/4223).
-- zypper - add support for ``--clean-deps`` option to remove packages that depend on a package being removed (https://github.com/ansible-collections/community.general/pull/4195).
-
-Deprecated Features
--------------------
-
-- pacman - from community.general 5.0.0 on, the ``changed`` status of ``update_cache`` will no longer be ignored if ``name`` or ``upgrade`` is specified. To keep the old behavior, add something like ``register: result`` and ``changed_when: result.packages | length > 0`` to your task (https://github.com/ansible-collections/community.general/pull/4329).
-
-Bugfixes
---------
-
-- filesize - add support for busybox dd implementation, that is used by default on Alpine linux (https://github.com/ansible-collections/community.general/pull/4288, https://github.com/ansible-collections/community.general/issues/4259).
-- linode inventory plugin - fix configuration handling relating to inventory filtering (https://github.com/ansible-collections/community.general/pull/4336).
-- mksysb - fixed bug for parameter ``backup_dmapi_fs`` was passing the wrong CLI argument (https://github.com/ansible-collections/community.general/pull/3295).
-- pacman - Use ``--groups`` instead of ``--group`` (https://github.com/ansible-collections/community.general/pull/4312).
-- pacman - fix URL based package installation (https://github.com/ansible-collections/community.general/pull/4286, https://github.com/ansible-collections/community.general/issues/4285).
-- pacman - fix ``upgrade=yes`` (https://github.com/ansible-collections/community.general/pull/4275, https://github.com/ansible-collections/community.general/issues/4274).
-- pacman - make sure that ``packages`` is always returned when ``name`` or ``upgrade`` is specified, also if nothing is done (https://github.com/ansible-collections/community.general/pull/4329).
-- pacman - when the ``update_cache`` option is combined with another option such as ``upgrade``, report ``changed`` based on the actions performed by the latter option. This was the behavior in community.general 4.4.0 and before. In community.general 4.5.0, a task combining these options would always report ``changed`` (https://github.com/ansible-collections/community.general/pull/4318).
-- proxmox inventory plugin - always convert strings that follow the ``key=value[,key=value[...]]`` form into dictionaries (https://github.com/ansible-collections/community.general/pull/4349).
-- proxmox inventory plugin - fixed the ``description`` field being ignored if it contained a comma (https://github.com/ansible-collections/community.general/issues/4348).
-- proxmox_kvm - fix error in check when creating or cloning (https://github.com/ansible-collections/community.general/pull/4306).
-- proxmox_kvm - fix error when checking whether Proxmox VM exists (https://github.com/ansible-collections/community.general/pull/4287).
-- terraform - fix ``variable`` handling to allow complex values (https://github.com/ansible-collections/community.general/pull/4281).
-
-Known Issues
-------------
-
-- pacman - ``update_cache`` cannot differentiate between up to date and outdated package lists and will report ``changed`` in both situations (https://github.com/ansible-collections/community.general/pull/4318).
-- pacman - binaries specified in the ``executable`` parameter must support ``--print-format`` in order to be used by this module. In particular, AUR helper ``yay`` is known not to currently support it (https://github.com/ansible-collections/community.general/pull/4312).
-
-v4.5.0
-======
-
-Release Summary
----------------
-
-Regular feature and bugfix release.
-
-Minor Changes
--------------
-
-- Avoid internal ansible-core module_utils in favor of equivalent public API available since at least Ansible 2.9. This fixes some instances added since the last time this was fixed (https://github.com/ansible-collections/community.general/pull/4232).
-- ansible_galaxy_install - added option ``no_deps`` to the module (https://github.com/ansible-collections/community.general/issues/4174).
-- gitlab_group_variable - new ``variables`` parameter (https://github.com/ansible-collections/community.general/pull/4038 and https://github.com/ansible-collections/community.general/issues/4074).
-- keycloak_* modules - added connection timeout parameter when calling server (https://github.com/ansible-collections/community.general/pull/4168).
-- linode inventory plugin - add support for caching inventory results (https://github.com/ansible-collections/community.general/pull/4179).
-- opentelemetry_plugin - enrich service when using the ``jenkins``, ``hetzner`` or ``jira`` modules (https://github.com/ansible-collections/community.general/pull/4105).
-- pacman - the module has been rewritten and is now much faster when using ``state=latest``. Operations are now done all packages at once instead of package per package and the configured output format of ``pacman`` no longer affect the module's operation. (https://github.com/ansible-collections/community.general/pull/3907, https://github.com/ansible-collections/community.general/issues/3783, https://github.com/ansible-collections/community.general/issues/4079)
-- passwordstore lookup plugin - add configurable ``lock`` and ``locktimeout`` options to avoid race conditions in itself and in the ``pass`` utility it calls. By default, the plugin now locks on write operations (https://github.com/ansible-collections/community.general/pull/4194).
-- proxmox modules - move common code into ``module_utils`` (https://github.com/ansible-collections/community.general/pull/4029).
-- proxmox_kvm - added EFI disk support when creating VM with OVMF UEFI BIOS with new ``efidisk0`` option (https://github.com/ansible-collections/community.general/pull/4106, https://github.com/ansible-collections/community.general/issues/1638).
-- proxmox_kwm - add ``win11`` to ``ostype`` parameter for Windows 11 and Windows Server 2022 support (https://github.com/ansible-collections/community.general/issues/4023, https://github.com/ansible-collections/community.general/pull/4191).
-
-Bugfixes
---------
-
-- dconf - skip processes that disappeared while we inspected them (https://github.com/ansible-collections/community.general/issues/4151).
-- gitlab_group_variable - add missing documentation about GitLab versions that support ``environment_scope`` and ``variable_type`` (https://github.com/ansible-collections/community.general/pull/4038).
-- gitlab_group_variable - allow to set same variable name under different environment scopes. Due this change, the return value ``group_variable`` differs from previous version in check mode. It was counting ``updated`` values, because it was accidentally overwriting environment scopes (https://github.com/ansible-collections/community.general/pull/4038).
-- gitlab_group_variable - fix idempotent change behaviour for float and integer variables (https://github.com/ansible-collections/community.general/pull/4038).
-- gitlab_project_variable - ``value`` is not necessary when deleting variables (https://github.com/ansible-collections/community.general/pull/4150).
-- gitlab_runner - make ``project`` and ``owned`` mutually exclusive (https://github.com/ansible-collections/community.general/pull/4136).
-- homebrew_cask - fix force install operation (https://github.com/ansible-collections/community.general/issues/3703).
-- imc_rest - fixes the module failure due to the usage of ``itertools.izip_longest`` which is not available in Python 3 (https://github.com/ansible-collections/community.general/issues/4206).
-- ini_file - when removing nothing do not report changed (https://github.com/ansible-collections/community.general/issues/4154).
-- keycloak_user_federation - creating a user federation while specifying an ID (that does not exist yet) no longer fail with a 404 Not Found (https://github.com/ansible-collections/community.general/pull/4212).
-- keycloak_user_federation - mappers auto-created by keycloak are matched and merged by their name and no longer create duplicated entries (https://github.com/ansible-collections/community.general/pull/4212).
-- mail callback plugin - fix encoding of the name of sender and recipient (https://github.com/ansible-collections/community.general/issues/4060, https://github.com/ansible-collections/community.general/pull/4061).
-- passwordstore lookup plugin - fix error detection for non-English locales (https://github.com/ansible-collections/community.general/pull/4219).
-- passwordstore lookup plugin - prevent returning path names as passwords by accident (https://github.com/ansible-collections/community.general/issues/4185, https://github.com/ansible-collections/community.general/pull/4192).
-- vdo - fix options error (https://github.com/ansible-collections/community.general/pull/4163).
-- yum_versionlock - fix matching of existing entries with names passed to the module. Match yum and dnf lock format (https://github.com/ansible-collections/community.general/pull/4183).
-
-New Modules
------------
-
-Cloud
-~~~~~
-
-scaleway
-^^^^^^^^
-
-- scaleway_private_network - Scaleway private network management
-
-Storage
-~~~~~~~
-
-pmem
-^^^^
-
-- pmem - Configure Intel Optane Persistent Memory modules
-
-v4.4.0
-======
-
-Release Summary
----------------
-
-Regular features and bugfixes release.
-
-Minor Changes
--------------
-
-- cobbler inventory plugin - add ``include_profiles`` option (https://github.com/ansible-collections/community.general/pull/4068).
-- gitlab_project_variable - new ``variables`` parameter (https://github.com/ansible-collections/community.general/issues/4038).
-- icinga2 inventory plugin - implemented constructed interface (https://github.com/ansible-collections/community.general/pull/4088).
-- linode inventory plugin - allow templating of ``access_token`` variable in Linode inventory plugin (https://github.com/ansible-collections/community.general/pull/4040).
-- lists_mergeby filter plugin - add parameters ``list_merge`` and ``recursive``. These are only supported when used with ansible-base 2.10 or ansible-core, but not with Ansible 2.9 (https://github.com/ansible-collections/community.general/pull/4058).
-- lxc_container - added ``wait_for_container`` parameter. If ``true`` the module will wait until the running task reports success as the status (https://github.com/ansible-collections/community.general/pull/4039).
-- mail callback plugin - add ``Message-ID`` and ``Date`` headers (https://github.com/ansible-collections/community.general/issues/4055, https://github.com/ansible-collections/community.general/pull/4056).
-- mail callback plugin - properly use Ansible's option handling to split lists (https://github.com/ansible-collections/community.general/pull/4140).
-- nmcli - adds ``routes6`` and ``route_metric6`` parameters for supporting IPv6 routes (https://github.com/ansible-collections/community.general/issues/4059).
-- opennebula - add the release action for VMs in the ``HOLD`` state (https://github.com/ansible-collections/community.general/pull/4036).
-- opentelemetry_plugin - enrich service when using the ``docker_login`` (https://github.com/ansible-collections/community.general/pull/4104).
-- proxmox modules - move ``HAS_PROXMOXER`` check into ``module_utils`` (https://github.com/ansible-collections/community.general/pull/4030).
-- scaleway inventory plugin - add profile parameter ``scw_profile`` (https://github.com/ansible-collections/community.general/pull/4049).
-- snap - add option ``options`` permitting to set options using the ``snap set`` command (https://github.com/ansible-collections/community.general/pull/3943).
-
-Deprecated Features
--------------------
-
-- mail callback plugin - not specifying ``sender`` is deprecated and will be disallowed in community.general 6.0.0 (https://github.com/ansible-collections/community.general/pull/4140).
-
-Bugfixes
---------
-
-- cargo - fix detection of outdated packages when ``state=latest`` (https://github.com/ansible-collections/community.general/pull/4052).
-- cargo - fix incorrectly reported changed status for packages with a name containing a hyphen (https://github.com/ansible-collections/community.general/issues/4044, https://github.com/ansible-collections/community.general/pull/4052).
-- gitlab_project_variable - add missing documentation about GitLab versions that support ``environment_scope`` and ``variable_type`` (https://github.com/ansible-collections/community.general/issues/4038).
-- gitlab_project_variable - allow to set same variable name under different environment scopes. Due this change, the return value ``project_variable`` differs from previous version in check mode. It was counting ``updated`` values, because it was accidentally overwriting environment scopes (https://github.com/ansible-collections/community.general/issues/4038).
-- gitlab_project_variable - fix idempotent change behaviour for float and integer variables (https://github.com/ansible-collections/community.general/issues/4038).
-- gitlab_runner - use correct API endpoint to create and retrieve project level runners when using ``project`` (https://github.com/ansible-collections/community.general/pull/3965).
-- listen_ports_facts - local port regex was not handling well IPv6 only binding. Fixes the regex for ``ss`` (https://github.com/ansible-collections/community.general/pull/4092).
-- mail callback plugin - fix crash on Python 3 (https://github.com/ansible-collections/community.general/issues/4025, https://github.com/ansible-collections/community.general/pull/4026).
-- opentelemetry - fix generating a trace with a task containing ``no_log: true`` (https://github.com/ansible-collections/community.general/pull/4043).
-- python_requirements_info - store ``mismatched`` return values per package as documented in the module (https://github.com/ansible-collections/community.general/pull/4078).
-- yarn - fix incorrect handling of ``yarn list`` and ``yarn global list`` output that could result in fatal error (https://github.com/ansible-collections/community.general/pull/4050).
-- yarn - fix incorrectly reported status when installing a package globally (https://github.com/ansible-collections/community.general/issues/4045, https://github.com/ansible-collections/community.general/pull/4050).
-- yarn - fix missing ``~`` expansion in yarn global install folder which resulted in incorrect task status (https://github.com/ansible-collections/community.general/issues/4045, https://github.com/ansible-collections/community.general/pull/4048).
-
-New Modules
------------
-
-System
-~~~~~~
-
-- homectl - Manage user accounts with systemd-homed
-
-v4.3.0
-======
-
-Release Summary
----------------
-
-Regular feature and bugfix release.
-
-Minor Changes
--------------
-
-- ipa_dnszone - ``dynamicupdate`` is now a boolean parameter, instead of a string parameter accepting ``"true"`` and ``"false"``. Also the module is now idempotent with respect to ``dynamicupdate`` (https://github.com/ansible-collections/community.general/pull/3374).
-- ipa_dnszone - add DNS zone synchronization support (https://github.com/ansible-collections/community.general/pull/3374).
-- ipmi_power - add ``machine`` option to ensure the power state via the remote target address (https://github.com/ansible-collections/community.general/pull/3968).
-- mattermost - add the possibility to send attachments instead of text messages (https://github.com/ansible-collections/community.general/pull/3946).
-- nmcli - add ``wireguard`` connection type (https://github.com/ansible-collections/community.general/pull/3985).
-- proxmox - add ``clone`` parameter (https://github.com/ansible-collections/community.general/pull/3930).
-- puppet - remove deprecation for ``show_diff`` parameter. Its alias ``show-diff`` is still deprecated and will be removed in community.general 7.0.0 (https://github.com/ansible-collections/community.general/pull/3980).
-- scaleway_compute - add possibility to use project identifier (new ``project`` option) instead of deprecated organization identifier (https://github.com/ansible-collections/community.general/pull/3951).
-- scaleway_volume - all volumes are systematically created on par1 (https://github.com/ansible-collections/community.general/pull/3964).
-
-Bugfixes
---------
-
-- Various modules and plugins - use vendored version of ``distutils.version`` instead of the deprecated Python standard library ``distutils`` (https://github.com/ansible-collections/community.general/pull/3936).
-- alternatives - fix output parsing for alternatives groups (https://github.com/ansible-collections/community.general/pull/3976).
-- jail connection plugin - replace deprecated ``distutils.spawn.find_executable`` with Ansible's ``get_bin_path`` to find the executable (https://github.com/ansible-collections/community.general/pull/3934).
-- lxd connection plugin - replace deprecated ``distutils.spawn.find_executable`` with Ansible's ``get_bin_path`` to find the ``lxc`` executable (https://github.com/ansible-collections/community.general/pull/3934).
-- passwordstore lookup plugin - replace deprecated ``distutils.util.strtobool`` with Ansible's ``convert_bool.boolean`` to interpret values for the ``create``, ``returnall``, ``overwrite``, 'backup``, and ``nosymbols`` options (https://github.com/ansible-collections/community.general/pull/3934).
-- say callback plugin - replace deprecated ``distutils.spawn.find_executable`` with Ansible's ``get_bin_path`` to find the ``say`` resp. ``espeak`` executables (https://github.com/ansible-collections/community.general/pull/3934).
-- scaleway_user_data - fix double-quote added where no double-quote is needed to user data in scaleway's server (``Content-type`` -> ``Content-Type``) (https://github.com/ansible-collections/community.general/pull/3940).
-- slack - add ``charset`` to HTTP headers to avoid Slack API warning (https://github.com/ansible-collections/community.general/issues/3932).
-- zone connection plugin - replace deprecated ``distutils.spawn.find_executable`` with Ansible's ``get_bin_path`` to find the executable (https://github.com/ansible-collections/community.general/pull/3934).
-
-New Plugins
------------
-
-Filter
-~~~~~~
-
-- counter - Counts hashable elements in a sequence
-
-New Modules
------------
-
-Identity
-~~~~~~~~
-
-keycloak
-^^^^^^^^
-
-- keycloak_realm_info - Allows obtaining Keycloak realm public information via Keycloak API
-
-Packaging
-~~~~~~~~~
-
-language
-^^^^^^^^
-
-- cargo - Manage Rust packages with cargo
-
-System
-~~~~~~
-
-- sudoers - Manage sudoers files
-
-v4.2.0
-======
-
-Release Summary
----------------
-
-Regular bugfix and feature release.
-
-Minor Changes
--------------
-
-- aix_filesystem - calling ``run_command`` with arguments as ``list`` instead of ``str`` (https://github.com/ansible-collections/community.general/pull/3833).
-- aix_lvg - calling ``run_command`` with arguments as ``list`` instead of ``str`` (https://github.com/ansible-collections/community.general/pull/3834).
-- gitlab - add more token authentication support with the new options ``api_oauth_token`` and ``api_job_token`` (https://github.com/ansible-collections/community.general/issues/705).
-- gitlab_group, gitlab_project - add new option ``avatar_path`` (https://github.com/ansible-collections/community.general/pull/3792).
-- gitlab_project - add new option ``default_branch`` to gitlab_project (if ``readme = true``) (https://github.com/ansible-collections/community.general/pull/3792).
-- hponcfg - revamped module using ModuleHelper (https://github.com/ansible-collections/community.general/pull/3840).
-- icinga2 inventory plugin - added the ``display_name`` field to variables (https://github.com/ansible-collections/community.general/issues/3875, https://github.com/ansible-collections/community.general/pull/3906).
-- icinga2 inventory plugin - inventory object names are changable using ``inventory_attr`` in your config file to the host object name, address, or display_name fields (https://github.com/ansible-collections/community.general/issues/3875, https://github.com/ansible-collections/community.general/pull/3906).
-- ip_netns - calling ``run_command`` with arguments as ``list`` instead of ``str`` (https://github.com/ansible-collections/community.general/pull/3822).
-- iso_extract - calling ``run_command`` with arguments as ``list`` instead of ``str`` (https://github.com/ansible-collections/community.general/pull/3805).
-- java_cert - calling ``run_command`` with arguments as ``list`` instead of ``str`` (https://github.com/ansible-collections/community.general/pull/3835).
-- jira - add support for Bearer token auth (https://github.com/ansible-collections/community.general/pull/3838).
-- keycloak_user_federation - add sssd user federation support (https://github.com/ansible-collections/community.general/issues/3767).
-- logentries - calling ``run_command`` with arguments as ``list`` instead of ``str`` (https://github.com/ansible-collections/community.general/pull/3807).
-- logstash_plugin - calling ``run_command`` with arguments as ``list`` instead of ``str`` (https://github.com/ansible-collections/community.general/pull/3808).
-- lxc_container - calling ``run_command`` with arguments as ``list`` instead of ``str`` (https://github.com/ansible-collections/community.general/pull/3851).
-- lxd connection plugin - make sure that ``ansible_lxd_host``, ``ansible_executable``, and ``ansible_lxd_executable`` work (https://github.com/ansible-collections/community.general/pull/3798).
-- lxd inventory plugin - support virtual machines (https://github.com/ansible-collections/community.general/pull/3519).
-- module_helper module utils - added decorators ``check_mode_skip`` and ``check_mode_skip_returns`` for skipping methods when ``check_mode=True`` (https://github.com/ansible-collections/community.general/pull/3849).
-- monit - calling ``run_command`` with arguments as ``list`` instead of ``str`` (https://github.com/ansible-collections/community.general/pull/3821).
-- nmcli - add multiple addresses support for ``ip6`` parameter (https://github.com/ansible-collections/community.general/issues/1088).
-- nmcli - add support for ``eui64`` and ``ipv6privacy`` parameters (https://github.com/ansible-collections/community.general/issues/3357).
-- python_requirements_info - returns python version broken down into its components, and some minor refactoring (https://github.com/ansible-collections/community.general/pull/3797).
-- svc - calling ``run_command`` with arguments as ``list`` instead of ``str`` (https://github.com/ansible-collections/community.general/pull/3829).
-- xattr - calling ``run_command`` with arguments as ``list`` instead of ``str`` (https://github.com/ansible-collections/community.general/pull/3806).
-- xfconf - minor refactor on the base class for the module (https://github.com/ansible-collections/community.general/pull/3919).
-
-Deprecated Features
--------------------
-
-- module_helper module utils - deprecated the attribute ``ModuleHelper.VarDict`` (https://github.com/ansible-collections/community.general/pull/3801).
-
-Bugfixes
---------
-
-- icinga2 inventory plugin - handle 404 error when filter produces no results (https://github.com/ansible-collections/community.general/issues/3875, https://github.com/ansible-collections/community.general/pull/3906).
-- interfaces_file - fixed the check for existing option in interface (https://github.com/ansible-collections/community.general/issues/3841).
-- jira - fixed bug where module returns error related to dictionary key ``body`` (https://github.com/ansible-collections/community.general/issues/3419).
-- nmcli - fix returning "changed" when no mask set for IPv4 or IPv6 addresses on task rerun (https://github.com/ansible-collections/community.general/issues/3768).
-- nmcli - pass ``flags``, ``ingress``, ``egress`` params to ``nmcli`` (https://github.com/ansible-collections/community.general/issues/1086).
-- nrdp callback plugin - fix error ``string arguments without an encoding`` (https://github.com/ansible-collections/community.general/issues/3903).
-- opentelemetry_plugin - honour ``ignore_errors`` when a task has failed instead of reporting an error (https://github.com/ansible-collections/community.general/pull/3837).
-- pipx - passes the correct command line option ``--include-apps`` (https://github.com/ansible-collections/community.general/issues/3791).
-- proxmox - fixed ``onboot`` parameter causing module failures when undefined (https://github.com/ansible-collections/community.general/issues/3844).
-- python_requirements_info - fails if version operator used without version (https://github.com/ansible-collections/community.general/pull/3785).
-
-New Modules
------------
-
-Net Tools
-~~~~~~~~~
-
-- dnsimple_info - Pull basic info from DNSimple API
-
-Remote Management
-~~~~~~~~~~~~~~~~~
-
-redfish
-^^^^^^^
-
-- ilo_redfish_config - Sets or updates configuration attributes on HPE iLO with Redfish OEM extensions
-- ilo_redfish_info - Gathers server information through iLO using Redfish APIs
-
-Source Control
-~~~~~~~~~~~~~~
-
-gitlab
-^^^^^^
-
-- gitlab_branch - Create or delete a branch
-
-v4.1.0
-======
-
-Release Summary
----------------
-
-Regular bugfix and feature release.
-
-Minor Changes
--------------
-
-- gitlab - clean up modules and utils (https://github.com/ansible-collections/community.general/pull/3694).
-- ipmi_boot - add support for user-specified IPMI encryption key (https://github.com/ansible-collections/community.general/issues/3698).
-- ipmi_power - add support for user-specified IPMI encryption key (https://github.com/ansible-collections/community.general/issues/3698).
-- listen_ports_facts - add support for ``ss`` command besides ``netstat`` (https://github.com/ansible-collections/community.general/pull/3708).
-- lxd_container - adds ``type`` option which also allows to operate on virtual machines and not just containers (https://github.com/ansible-collections/community.general/pull/3661).
-- nmcli - add multiple addresses support for ``ip4`` parameter (https://github.com/ansible-collections/community.general/issues/1088, https://github.com/ansible-collections/community.general/pull/3738).
-- open_iscsi - extended module to allow rescanning of established session for one or all targets (https://github.com/ansible-collections/community.general/issues/3763).
-- pacman - add ``stdout`` and ``stderr`` as return values (https://github.com/ansible-collections/community.general/pull/3758).
-- redfish_command - add ``GetHostInterfaces`` command to enable reporting Redfish Host Interface information (https://github.com/ansible-collections/community.general/issues/3693).
-- redfish_command - add ``SetHostInterface`` command to enable configuring the Redfish Host Interface (https://github.com/ansible-collections/community.general/issues/3632).
-
-Bugfixes
---------
-
-- github_repo - ``private`` and ``description`` attributes should not be set to default values when the repo already exists (https://github.com/ansible-collections/community.general/pull/2386).
-- terraform - fix command options being ignored during planned/plan in function ``build_plan`` such as ``lock`` or ``lock_timeout`` (https://github.com/ansible-collections/community.general/issues/3707, https://github.com/ansible-collections/community.general/pull/3726).
-
-New Plugins
------------
-
-Inventory
-~~~~~~~~~
-
-- xen_orchestra - Xen Orchestra inventory source
-
-Lookup
-~~~~~~
-
-- revbitspss - Get secrets from RevBits PAM server
-
-v4.0.2
-======
-
-Release Summary
----------------
-
-Bugfix release for today's Ansible 5.0.0 beta 2.
-
-Deprecated Features
--------------------
-
-- Support for Ansible 2.9 and ansible-base 2.10 is deprecated, and will be removed in the next major release (community.general 5.0.0) next spring. While most content will probably still work with ansible-base 2.10, we will remove symbolic links for modules and action plugins, which will make it impossible to use them with Ansible 2.9 anymore. Please use community.general 4.x.y with Ansible 2.9 and ansible-base 2.10, as these releases will continue to support Ansible 2.9 and ansible-base 2.10 even after they are End of Life (https://github.com/ansible-community/community-topics/issues/50, https://github.com/ansible-collections/community.general/pull/3723).
-
-Bugfixes
---------
-
-- counter_enabled callback plugin - fix output to correctly display host and task counters in serial mode (https://github.com/ansible-collections/community.general/pull/3709).
-- ldap_search - allow it to be used even in check mode (https://github.com/ansible-collections/community.general/issues/3619).
-- lvol - allows logical volumes to be created with certain size arguments prefixed with ``+`` to preserve behavior of older versions of this module (https://github.com/ansible-collections/community.general/issues/3665).
-- nmcli - fixed falsely reported changed status when ``mtu`` is omitted with ``dummy`` connections (https://github.com/ansible-collections/community.general/issues/3612, https://github.com/ansible-collections/community.general/pull/3625).
-
-v4.0.1
-======
-
-Release Summary
----------------
-
-Bugfix release for today's Ansible 5.0.0 beta 1.
-
-Bugfixes
---------
-
-- a_module test plugin - fix crash when testing a module name that was tombstoned (https://github.com/ansible-collections/community.general/pull/3660).
-- xattr - fix exception caused by ``_run_xattr()`` raising a ``ValueError`` due to a mishandling of base64-encoded value (https://github.com/ansible-collections/community.general/issues/3673).
-
-v4.0.0
-======
-
-Release Summary
----------------
-
-This is release 4.0.0 of ``community.general``, released on 2021-11-02.
-
-Major Changes
--------------
-
-- bitbucket_* modules - ``client_id`` is no longer marked as ``no_log=true``. If you relied on its value not showing up in logs and output, please mark the whole tasks with ``no_log: true`` (https://github.com/ansible-collections/community.general/pull/2045).
-
-Minor Changes
--------------
-
-- Avoid internal ansible-core module_utils in favor of equivalent public API available since at least Ansible 2.9 (https://github.com/ansible-collections/community.general/pull/2877).
-- ModuleHelper module utils - improved mechanism for customizing the calculation of ``changed`` (https://github.com/ansible-collections/community.general/pull/2514).
-- Remove unnecessary ``__init__.py`` files from ``plugins/`` (https://github.com/ansible-collections/community.general/pull/2632).
-- apache2_module - minor refactoring improving code quality, readability and speed (https://github.com/ansible-collections/community.general/pull/3106).
-- archive - added ``dest_state`` return value to describe final state of ``dest`` after successful task execution (https://github.com/ansible-collections/community.general/pull/2913).
-- archive - added ``exclusion_patterns`` option to exclude files or subdirectories from archives (https://github.com/ansible-collections/community.general/pull/2616).
-- archive - refactoring prior to fix for idempotency checks. The fix will be a breaking change and only appear in community.general 4.0.0 (https://github.com/ansible-collections/community.general/pull/2987).
-- bitbucket_* modules - add ``user`` and ``password`` options for Basic authentication (https://github.com/ansible-collections/community.general/pull/2045).
-- chroot connection - minor refactor to make lints and IDEs happy (https://github.com/ansible-collections/community.general/pull/2520).
-- cloud_init_data_facts - minor refactor (https://github.com/ansible-collections/community.general/pull/2557).
-- cmd (Module Helper) module utils - ``CmdMixin`` now pulls the value for ``run_command()`` params from ``self.vars``, as opposed to previously retrieving those from ``self.module.params`` (https://github.com/ansible-collections/community.general/pull/2517).
-- composer - add ``composer_executable`` option (https://github.com/ansible-collections/community.general/issues/2649).
-- datadog_event - adding parameter ``api_host`` to allow selecting a datadog API endpoint instead of using the default one (https://github.com/ansible-collections/community.general/issues/2774, https://github.com/ansible-collections/community.general/pull/2775).
-- datadog_monitor - allow creation of composite datadog monitors (https://github.com/ansible-collections/community.general/issues/2956).
-- dig lookup plugin - add ``retry_servfail`` option (https://github.com/ansible-collections/community.general/pull/3247).
-- dnsimple - module rewrite to include support for python-dnsimple>=2.0.0; also add ``sandbox`` parameter (https://github.com/ansible-collections/community.general/pull/2946).
-- elastic callback plugin - enriched the stacktrace information with the ``message``, ``exception`` and ``stderr`` fields from the failed task (https://github.com/ansible-collections/community.general/pull/3556).
-- filesystem - cleanup and revamp module, tests and doc. Pass all commands to ``module.run_command()`` as lists. Move the device-vs-mountpoint logic to ``grow()`` method. Give to all ``get_fs_size()`` the same logic and error handling. (https://github.com/ansible-collections/community.general/pull/2472).
-- filesystem - extend support for FreeBSD. Avoid potential data loss by checking existence of a filesystem with ``fstyp`` (native command) if ``blkid`` (foreign command) doesn't find one. Add support for character devices and ``ufs`` filesystem type (https://github.com/ansible-collections/community.general/pull/2902).
-- flatpak - add ``no_dependencies`` parameter (https://github.com/ansible/ansible/pull/55452, https://github.com/ansible-collections/community.general/pull/2751).
-- flatpak - allows installing or uninstalling a list of packages (https://github.com/ansible-collections/community.general/pull/2521).
-- funcd connection - minor refactor to make lints and IDEs happy (https://github.com/ansible-collections/community.general/pull/2520).
-- gem - add ``bindir`` option to specify an installation path for executables such as ``/home/user/bin`` or ``/home/user/.local/bin`` (https://github.com/ansible-collections/community.general/pull/2837).
-- gem - add ``norc`` option to avoid loading any ``.gemrc`` file (https://github.com/ansible-collections/community.general/pull/2837).
-- github_repo - add new option ``api_url`` to allow working with on premises installations (https://github.com/ansible-collections/community.general/pull/3038).
-- gitlab_group - add new options ``project_creation_level``, ``auto_devops_enabled``, ``subgroup_creation_level`` (https://github.com/ansible-collections/community.general/pull/3248).
-- gitlab_group - add new property ``require_two_factor_authentication`` (https://github.com/ansible-collections/community.general/pull/3367).
-- gitlab_group_members - ``gitlab_user`` can now also be a list of users (https://github.com/ansible-collections/community.general/pull/3047).
-- gitlab_group_members - added functionality to set all members exactly as given (https://github.com/ansible-collections/community.general/pull/3047).
-- gitlab_project - add new options ``allow_merge_on_skipped_pipeline``, ``only_allow_merge_if_all_discussions_are_resolved``, ``only_allow_merge_if_pipeline_succeeds``, ``packages_enabled``, ``remove_source_branch_after_merge``, ``squash_option`` (https://github.com/ansible-collections/community.general/pull/3002).
-- gitlab_project - add new properties ``ci_config_path`` and ``shared_runners_enabled`` (https://github.com/ansible-collections/community.general/pull/3379).
-- gitlab_project - projects can be created under other user's namespaces with the new ``username`` option (https://github.com/ansible-collections/community.general/pull/2824).
-- gitlab_project_members - ``gitlab_user`` can now also be a list of users (https://github.com/ansible-collections/community.general/pull/3319).
-- gitlab_project_members - added functionality to set all members exactly as given (https://github.com/ansible-collections/community.general/pull/3319).
-- gitlab_runner - support project-scoped gitlab.com runners registration (https://github.com/ansible-collections/community.general/pull/634).
-- gitlab_user - add ``expires_at`` option (https://github.com/ansible-collections/community.general/issues/2325).
-- gitlab_user - add functionality for adding external identity providers to a GitLab user (https://github.com/ansible-collections/community.general/pull/2691).
-- gitlab_user - allow to reset an existing password with the new ``reset_password`` option (https://github.com/ansible-collections/community.general/pull/2691).
-- gitlab_user - specifying a password is no longer necessary (https://github.com/ansible-collections/community.general/pull/2691).
-- gunicorn - search for ``gunicorn`` binary in more paths (https://github.com/ansible-collections/community.general/pull/3092).
-- hana_query - added the abillity to use hdbuserstore (https://github.com/ansible-collections/community.general/pull/3125).
-- hpilo_info - added ``host_power_status`` return value to report power state of machine with ``OFF``, ``ON`` or ``UNKNOWN`` (https://github.com/ansible-collections/community.general/pull/3079).
-- idrac_redfish_config - modified set_manager_attributes function to skip invalid attribute instead of returning. Added skipped attributes to output. Modified module exit to add warning variable (https://github.com/ansible-collections/community.general/issues/1995).
-- influxdb_retention_policy - add ``state`` parameter with allowed values ``present`` and ``absent`` to support deletion of existing retention policies (https://github.com/ansible-collections/community.general/issues/2383).
-- influxdb_retention_policy - simplify duration logic parsing (https://github.com/ansible-collections/community.general/pull/2385).
-- ini_file - add abbility to define multiple options with the same name but different values (https://github.com/ansible-collections/community.general/issues/273, https://github.com/ansible-collections/community.general/issues/1204).
-- ini_file - add module option ``exclusive`` (boolean) for the ability to add/remove single ``option=value`` entries without overwriting existing options with the same name but different values (https://github.com/ansible-collections/community.general/pull/3033).
-- ini_file - opening file with encoding ``utf-8-sig`` (https://github.com/ansible-collections/community.general/issues/2189).
-- interfaces_file - minor refactor (https://github.com/ansible-collections/community.general/pull/3328).
-- iocage connection - minor refactor to make lints and IDEs happy (https://github.com/ansible-collections/community.general/pull/2520).
-- ipa_config - add ``ipaselinuxusermaporder`` option to set the SELinux user map order (https://github.com/ansible-collections/community.general/pull/3178).
-- ipa_group - add ``append`` option for adding group and users members, instead of replacing the respective lists (https://github.com/ansible-collections/community.general/pull/3545).
-- jail connection - minor refactor to make lints and IDEs happy (https://github.com/ansible-collections/community.general/pull/2520).
-- java_keystore - added ``ssl_backend`` parameter for using the cryptography library instead of the OpenSSL binary (https://github.com/ansible-collections/community.general/pull/2485).
-- java_keystore - replace envvar by stdin to pass secret to ``keytool`` (https://github.com/ansible-collections/community.general/pull/2526).
-- jenkins_build - support stopping a running jenkins build (https://github.com/ansible-collections/community.general/pull/2850).
-- jenkins_job_info - the ``password`` and ``token`` parameters can also be omitted to retrieve only public information (https://github.com/ansible-collections/community.general/pull/2948).
-- jenkins_plugin - add fallback url(s) for failure of plugin installation/download (https://github.com/ansible-collections/community.general/pull/1334).
-- jira - add comment visibility parameter for comment operation (https://github.com/ansible-collections/community.general/pull/2556).
-- kernel_blacklist - revamped the module using ``ModuleHelper`` (https://github.com/ansible-collections/community.general/pull/3329).
-- keycloak_* modules - refactor many of the ``keycloak_*`` modules to have similar structures, comments, and documentation (https://github.com/ansible-collections/community.general/pull/3280).
-- keycloak_authentication - enhanced diff mode to also return before and after state when the authentication flow is updated (https://github.com/ansible-collections/community.general/pull/2963).
-- keycloak_client - add ``authentication_flow_binding_overrides`` option (https://github.com/ansible-collections/community.general/pull/2949).
-- keycloak_realm - add ``events_enabled`` parameter to allow activation or deactivation of login events (https://github.com/ansible-collections/community.general/pull/3231).
-- linode - added proper traceback when failing due to exceptions (https://github.com/ansible-collections/community.general/pull/2410).
-- linode - parameter ``additional_disks`` is now validated as a list of dictionaries (https://github.com/ansible-collections/community.general/pull/2410).
-- linode inventory plugin - adds the ``ip_style`` configuration key. Set to ``api`` to get more detailed network details back from the remote Linode host (https://github.com/ansible-collections/community.general/pull/3203).
-- lxc connection - minor refactor to make lints and IDEs happy (https://github.com/ansible-collections/community.general/pull/2520).
-- lxd_container - add ``ignore_volatile_options`` option which allows to disable the behavior that the module ignores options starting with ``volatile.`` (https://github.com/ansible-collections/community.general/pull/3331).
-- mail - added the ``ehlohost`` parameter which allows for manual override of the host used in SMTP EHLO (https://github.com/ansible-collections/community.general/pull/3425).
-- maven_artifact - added ``checksum_alg`` option to support SHA1 checksums in order to support FIPS systems (https://github.com/ansible-collections/community.general/pull/2662).
-- module_helper cmd module utils - added the ``ArgFormat`` style ``BOOLEAN_NOT``, to add CLI parameters when the module argument is false-ish (https://github.com/ansible-collections/community.general/pull/3290).
-- module_helper module utils - added feature flag parameter to ``CmdMixin`` to control whether ``cmd_args`` is automatically added to the module output (https://github.com/ansible-collections/community.general/pull/3648).
-- module_helper module utils - added feature flag parameters to ``CmdMixin`` to control whether ``rc``, ``out`` and ``err`` are automatically added to the module output (https://github.com/ansible-collections/community.general/pull/2922).
-- module_helper module utils - break down of the long file into smaller pieces (https://github.com/ansible-collections/community.general/pull/2393).
-- module_helper module utils - method ``CmdMixin.run_command()`` now accepts ``process_output`` specifying a function to process the outcome of the underlying ``module.run_command()`` (https://github.com/ansible-collections/community.general/pull/2564).
-- module_helper module_utils - added classmethod to trigger the execution of MH modules (https://github.com/ansible-collections/community.general/pull/3206).
-- nmcli - add ``disabled`` value to ``method6`` option (https://github.com/ansible-collections/community.general/issues/2730).
-- nmcli - add ``dummy`` interface support (https://github.com/ansible-collections/community.general/issues/724).
-- nmcli - add ``gre`` tunnel support (https://github.com/ansible-collections/community.general/issues/3105, https://github.com/ansible-collections/community.general/pull/3262).
-- nmcli - add ``gsm`` support (https://github.com/ansible-collections/community.general/pull/3313).
-- nmcli - add ``routing_rules4`` and ``may_fail4`` options (https://github.com/ansible-collections/community.general/issues/2730).
-- nmcli - add ``runner`` and ``runner_hwaddr_policy`` options (https://github.com/ansible-collections/community.general/issues/2901).
-- nmcli - add ``wifi-sec`` option change detection to support managing secure Wi-Fi connections (https://github.com/ansible-collections/community.general/pull/3136).
-- nmcli - add ``wifi`` option to support managing Wi-Fi settings such as ``hidden`` or ``mode`` (https://github.com/ansible-collections/community.general/pull/3081).
-- nmcli - add new options to ignore automatic DNS servers and gateways (https://github.com/ansible-collections/community.general/issues/1087).
-- nmcli - query ``nmcli`` directly to determine available WiFi options (https://github.com/ansible-collections/community.general/pull/3141).
-- nmcli - remove dead code, ``options`` never contains keys from ``param_alias`` (https://github.com/ansible-collections/community.general/pull/2417).
-- nmcli - the option ``routing_rules4`` can now be specified as a list of strings, instead of as a single string (https://github.com/ansible-collections/community.general/issues/3401).
-- nrdp callback plugin - parameters are now converted to strings, except ``validate_certs`` which is converted to boolean (https://github.com/ansible-collections/community.general/pull/2878).
-- onepassword lookup plugin - add ``domain`` option (https://github.com/ansible-collections/community.general/issues/2734).
-- open-iscsi - adding support for mutual authentication between target and initiator (https://github.com/ansible-collections/community.general/pull/3422).
-- open_iscsi - add ``auto_portal_startup`` parameter to allow ``node.startup`` setting per portal (https://github.com/ansible-collections/community.general/issues/2685).
-- open_iscsi - also consider ``portal`` and ``port`` to check if already logged in or not (https://github.com/ansible-collections/community.general/issues/2683).
-- open_iscsi - minor refactoring (https://github.com/ansible-collections/community.general/pull/3286).
-- opentelemetry callback plugin - added option ``enable_from_environment`` to support enabling the plugin only if the given environment variable exists and it is set to true (https://github.com/ansible-collections/community.general/pull/3498).
-- opentelemetry callback plugin - enriched the span attributes with HTTP metadata for those Ansible tasks that interact with third party systems (https://github.com/ansible-collections/community.general/pull/3448).
-- opentelemetry callback plugin - enriched the stacktrace information for loops with the ``message``, ``exception`` and ``stderr`` fields from the failed item in the tasks in addition to the name of the task and failed item (https://github.com/ansible-collections/community.general/pull/3599).
-- opentelemetry callback plugin - enriched the stacktrace information with the ``message``, ``exception`` and ``stderr`` fields from the failed task (https://github.com/ansible-collections/community.general/pull/3496).
-- opentelemetry callback plugin - transformed args in a list of span attributes in addition it redacted username and password from any URLs (https://github.com/ansible-collections/community.general/pull/3564).
-- openwrt_init - minor refactoring (https://github.com/ansible-collections/community.general/pull/3284).
-- opkg - allow ``name`` to be a YAML list of strings (https://github.com/ansible-collections/community.general/issues/572, https://github.com/ansible-collections/community.general/pull/3554).
-- pacman - add ``executable`` option to use an alternative pacman binary (https://github.com/ansible-collections/community.general/issues/2524).
-- pacman - speed up checking if the package is installed, when the latest version check is not needed (https://github.com/ansible-collections/community.general/pull/3606).
-- pamd - minor refactorings (https://github.com/ansible-collections/community.general/pull/3285).
-- passwordstore lookup - add option ``missing`` to choose what to do if the password file is missing (https://github.com/ansible-collections/community.general/pull/2500).
-- pids - refactor to add support for older ``psutil`` versions to the ``pattern`` option (https://github.com/ansible-collections/community.general/pull/3315).
-- pipx - minor refactor on the ``changed`` logic (https://github.com/ansible-collections/community.general/pull/3647).
-- pkgin - in case of ``pkgin`` tool failue, display returned standard output ``stdout`` and standard error ``stderr`` to ease debugging (https://github.com/ansible-collections/community.general/issues/3146).
-- pkgng - ``annotation`` can now also be a YAML list (https://github.com/ansible-collections/community.general/pull/3526).
-- pkgng - packages being installed (or upgraded) are acted on in one command (per action) (https://github.com/ansible-collections/community.general/issues/2265).
-- pkgng - status message specifies number of packages installed and/or upgraded separately. Previously, all changes were reported as one count of packages "added" (https://github.com/ansible-collections/community.general/pull/3393).
-- proxmox inventory plugin - added snapshots to host facts (https://github.com/ansible-collections/community.general/pull/3044).
-- proxmox_group_info - minor refactor (https://github.com/ansible-collections/community.general/pull/2557).
-- proxmox_kvm - minor refactor (https://github.com/ansible-collections/community.general/pull/2557).
-- qubes connection - minor refactor to make lints and IDEs happy (https://github.com/ansible-collections/community.general/pull/2520).
-- rax_mon_notification_plan - fixed validation checks by specifying type ``str`` as the ``elements`` of parameters ``ok_state``, ``warning_state`` and ``critical_state`` (https://github.com/ansible-collections/community.general/pull/2955).
-- redfish_command - add ``boot_override_mode`` argument to BootSourceOverride commands (https://github.com/ansible-collections/community.general/issues/3134).
-- redfish_command and redfish_config and redfish_utils module utils - add parameter to strip etag of quotes before patch, since some vendors do not properly ``If-Match`` etag with quotes (https://github.com/ansible-collections/community.general/pull/3296).
-- redfish_config - modified module exit to add warning variable (https://github.com/ansible-collections/community.general/issues/1995).
-- redfish_info - include ``Status`` property for Thermal objects when querying Thermal properties via ``GetChassisThermals`` command (https://github.com/ansible-collections/community.general/issues/3232).
-- redfish_utils module utils - modified set_bios_attributes function to skip invalid attribute instead of returning. Added skipped attributes to output (https://github.com/ansible-collections/community.general/issues/1995).
-- redhat_subscription - add ``server_prefix`` and ``server_port`` parameters (https://github.com/ansible-collections/community.general/pull/2779).
-- redis - allow to use the term ``replica`` instead of ``slave``, which has been the official Redis terminology since 2018 (https://github.com/ansible-collections/community.general/pull/2867).
-- rhevm - minor refactor (https://github.com/ansible-collections/community.general/pull/2557).
-- saltstack connection - minor refactor to make lints and IDEs happy (https://github.com/ansible-collections/community.general/pull/2520).
-- scaleway plugin inventory - parse scw-cli config file for ``oauth_token`` (https://github.com/ansible-collections/community.general/pull/3250).
-- serverless - minor refactor (https://github.com/ansible-collections/community.general/pull/2557).
-- slack - minor refactoring (https://github.com/ansible-collections/community.general/pull/3205).
-- snap - added ``enabled`` and ``disabled`` states (https://github.com/ansible-collections/community.general/issues/1990).
-- snap - improved module error handling, especially for the case when snap server is down (https://github.com/ansible-collections/community.general/issues/2970).
-- splunk callback plugin - add ``batch`` option for user-configurable correlation ID's (https://github.com/ansible-collections/community.general/issues/2790).
-- spotinst_aws_elastigroup - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/2355).
-- ssh_config - new feature to set ``ForwardAgent`` option to ``yes`` or ``no`` (https://github.com/ansible-collections/community.general/issues/2473).
-- stacki_host - minor refactoring (https://github.com/ansible-collections/community.general/pull/2681).
-- supervisorctl - add the possibility to restart all programs and program groups (https://github.com/ansible-collections/community.general/issues/3551).
-- supervisorctl - using standard Ansible mechanism to validate ``signalled`` state required parameter (https://github.com/ansible-collections/community.general/pull/3068).
-- terraform - add ``check_destroy`` optional parameter to check for deletion of resources before it is applied (https://github.com/ansible-collections/community.general/pull/2874).
-- terraform - add ``parallelism`` parameter (https://github.com/ansible-collections/community.general/pull/3540).
-- terraform - add option ``overwrite_init`` to skip init if exists (https://github.com/ansible-collections/community.general/pull/2573).
-- terraform - minor refactor (https://github.com/ansible-collections/community.general/pull/2557).
-- timezone - print error message to debug instead of warning when timedatectl fails (https://github.com/ansible-collections/community.general/issues/1942).
-- tss lookup plugin - added ``token`` parameter for token authorization; ``username`` and ``password`` are optional when ``token`` is provided (https://github.com/ansible-collections/community.general/pull/3327).
-- tss lookup plugin - added new parameter for domain authorization (https://github.com/ansible-collections/community.general/pull/3228).
-- tss lookup plugin - refactored to decouple the supporting third-party library (``python-tss-sdk``) (https://github.com/ansible-collections/community.general/pull/3252).
-- ufw - if ``delete=true`` and ``insert`` option is present, then ``insert`` is now ignored rather than failing with a syntax error (https://github.com/ansible-collections/community.general/pull/3514).
-- vdo - minor refactoring of the code (https://github.com/ansible-collections/community.general/pull/3191).
-- zfs - added diff mode support (https://github.com/ansible-collections/community.general/pull/502).
-- zfs_delegate_admin - drop choices from permissions, allowing any permission supported by the underlying zfs commands (https://github.com/ansible-collections/community.general/pull/2540).
-- zone connection - minor refactor to make lints and IDEs happy (https://github.com/ansible-collections/community.general/pull/2520).
-- zpool_facts - minor refactoring (https://github.com/ansible-collections/community.general/pull/3332).
-- zypper - prefix zypper commands with ``/sbin/transactional-update --continue --drop-if-no-change --quiet run`` if transactional updates are detected (https://github.com/ansible-collections/community.general/issues/3159).
-
-Breaking Changes / Porting Guide
---------------------------------
-
-- archive - adding idempotency checks for changes to file names and content within the ``destination`` file (https://github.com/ansible-collections/community.general/pull/3075).
-- lxd inventory plugin - when used with Python 2, the plugin now needs ``ipaddress`` installed `from pypi `_ (https://github.com/ansible-collections/community.general/pull/2441).
-- scaleway_security_group_rule - when used with Python 2, the module now needs ``ipaddress`` installed `from pypi `_ (https://github.com/ansible-collections/community.general/pull/2441).
-
-Deprecated Features
--------------------
-
-- ali_instance_info - marked removal version of deprecated parameters ``availability_zone`` and ``instance_names`` (https://github.com/ansible-collections/community.general/issues/2429).
-- bitbucket_* modules - ``username`` options have been deprecated in favor of ``workspace`` and will be removed in community.general 6.0.0 (https://github.com/ansible-collections/community.general/pull/2045).
-- dnsimple - python-dnsimple < 2.0.0 is deprecated and support for it will be removed in community.general 5.0.0 (https://github.com/ansible-collections/community.general/pull/2946#discussion_r667624693).
-- gitlab_group_members - setting ``gitlab_group`` to ``name`` or ``path`` is deprecated. Use ``full_path`` instead (https://github.com/ansible-collections/community.general/pull/3451).
-- keycloak_authentication - the return value ``flow`` is now deprecated and will be removed in community.general 6.0.0; use ``end_state`` instead (https://github.com/ansible-collections/community.general/pull/3280).
-- keycloak_group - the return value ``group`` is now deprecated and will be removed in community.general 6.0.0; use ``end_state`` instead (https://github.com/ansible-collections/community.general/pull/3280).
-- linode - parameter ``backupsenabled`` is deprecated and will be removed in community.general 5.0.0 (https://github.com/ansible-collections/community.general/pull/2410).
-- lxd_container - the current default value ``true`` of ``ignore_volatile_options`` is deprecated and will change to ``false`` in community.general 6.0.0 (https://github.com/ansible-collections/community.general/pull/3429).
-- serverless - deprecating parameter ``functions`` because it was not used in the code (https://github.com/ansible-collections/community.general/pull/2845).
-- xfconf - deprecate the ``get`` state. The new module ``xfconf_info`` should be used instead (https://github.com/ansible-collections/community.general/pull/3049).
-
-Removed Features (previously deprecated)
-----------------------------------------
-
-- All inventory and vault scripts contained in community.general were moved to the `contrib-scripts GitHub repository `_ (https://github.com/ansible-collections/community.general/pull/2696).
-- ModuleHelper module utils - remove fallback when value could not be determined for a parameter (https://github.com/ansible-collections/community.general/pull/3461).
-- Removed deprecated netapp module utils and doc fragments (https://github.com/ansible-collections/community.general/pull/3197).
-- The nios, nios_next_ip, nios_next_network lookup plugins, the nios documentation fragment, and the nios_host_record, nios_ptr_record, nios_mx_record, nios_fixed_address, nios_zone, nios_member, nios_a_record, nios_aaaa_record, nios_network, nios_dns_view, nios_txt_record, nios_naptr_record, nios_srv_record, nios_cname_record, nios_nsgroup, and nios_network_view module have been removed from community.general 4.0.0 and were replaced by redirects to the `infoblox.nios_modules `_ collection. Please install the ``infoblox.nios_modules`` collection to continue using these plugins and modules, and update your FQCNs (https://github.com/ansible-collections/community.general/pull/3592).
-- The vendored copy of ``ipaddress`` has been removed. Please use ``ipaddress`` from the Python 3 standard library, or `from pypi `_. (https://github.com/ansible-collections/community.general/pull/2441).
-- cpanm - removed the deprecated ``system_lib`` option. Use Ansible's privilege escalation mechanism instead; the option basically used ``sudo`` (https://github.com/ansible-collections/community.general/pull/3461).
-- grove - removed the deprecated alias ``message`` of the ``message_content`` option (https://github.com/ansible-collections/community.general/pull/3461).
-- proxmox - default value of ``proxmox_default_behavior`` changed to ``no_defaults`` (https://github.com/ansible-collections/community.general/pull/3461).
-- proxmox_kvm - default value of ``proxmox_default_behavior`` changed to ``no_defaults`` (https://github.com/ansible-collections/community.general/pull/3461).
-- runit - removed the deprecated ``dist`` option which was not used by the module (https://github.com/ansible-collections/community.general/pull/3461).
-- telegram - removed the deprecated ``msg``, ``msg_format`` and ``chat_id`` options (https://github.com/ansible-collections/community.general/pull/3461).
-- xfconf - the default value of ``disable_facts`` changed to ``true``, and the value ``false`` is no longer allowed. Register the module results instead (https://github.com/ansible-collections/community.general/pull/3461).
-
-Security Fixes
---------------
-
-- nmcli - do not pass WiFi secrets on the ``nmcli`` command line. Use ``nmcli con edit`` instead and pass secrets as ``stdin`` (https://github.com/ansible-collections/community.general/issues/3145).
-
-Bugfixes
---------
-
-- _mount module utils - fixed the sanity checks (https://github.com/ansible-collections/community.general/pull/2883).
-- ali_instance_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084).
-- ansible_galaxy_install - the output value ``cmd_args`` was bringing the intermediate command used to gather the state, instead of the command that actually performed the state change (https://github.com/ansible-collections/community.general/pull/3655).
-- apache2_module - fix ``a2enmod``/``a2dismod`` detection, and error message when not found (https://github.com/ansible-collections/community.general/issues/3253).
-- archive - fixed ``exclude_path`` values causing incorrect archive root (https://github.com/ansible-collections/community.general/pull/2816).
-- archive - fixed improper file names for single file zip archives (https://github.com/ansible-collections/community.general/issues/2818).
-- archive - fixed incorrect ``state`` result value documentation (https://github.com/ansible-collections/community.general/pull/2816).
-- archive - fixed task failure when using the ``remove`` option with a ``path`` containing nested files for ``format``s other than ``zip`` (https://github.com/ansible-collections/community.general/issues/2919).
-- archive - fixing archive root determination when longest common root is ``/`` (https://github.com/ansible-collections/community.general/pull/3036).
-- composer - use ``no-interaction`` option when discovering available options to avoid an issue where composer hangs (https://github.com/ansible-collections/community.general/pull/2348).
-- consul_acl - update the hcl allowlist to include all supported options (https://github.com/ansible-collections/community.general/pull/2495).
-- consul_kv lookup plugin - allow to set ``recurse``, ``index``, ``datacenter`` and ``token`` as keyword arguments (https://github.com/ansible-collections/community.general/issues/2124).
-- copr - fix chroot naming issues, ``centos-stream`` changed naming to ``centos-stream-`` (for exmaple ``centos-stream-8``) (https://github.com/ansible-collections/community.general/issues/2084, https://github.com/ansible-collections/community.general/pull/3237).
-- cpanm - also use ``LC_ALL`` to enforce locale choice (https://github.com/ansible-collections/community.general/pull/2731).
-- deploy_helper - improved parameter checking by using standard Ansible construct (https://github.com/ansible-collections/community.general/pull/3104).
-- django_manage - argument ``command`` is being splitted again as it should (https://github.com/ansible-collections/community.general/issues/3215).
-- django_manage - parameters ``apps`` and ``fixtures`` are now splitted instead of being used as a single argument (https://github.com/ansible-collections/community.general/issues/3333).
-- django_manage - refactor to call ``run_command()`` passing command as a list instead of string (https://github.com/ansible-collections/community.general/pull/3098).
-- ejabberd_user - replaced in-code check with ``required_if``, using ``get_bin_path()`` for the command, passing args to ``run_command()`` as list instead of string (https://github.com/ansible-collections/community.general/pull/3093).
-- filesystem - repair ``reiserfs`` fstype support after adding it to integration tests (https://github.com/ansible-collections/community.general/pull/2472).
-- gitlab_deploy_key - fix idempotency on projects with multiple deploy keys (https://github.com/ansible-collections/community.general/pull/3473).
-- gitlab_deploy_key - fix the SSH Deploy Key being deleted accidentally while running task in check mode (https://github.com/ansible-collections/community.general/issues/3621, https://github.com/ansible-collections/community.general/pull/3622).
-- gitlab_group - avoid passing wrong value for ``require_two_factor_authentication`` on creation when the option has not been specified (https://github.com/ansible-collections/community.general/pull/3453).
-- gitlab_group_members - ``get_group_id`` return the group ID by matching ``full_path``, ``path`` or ``name`` (https://github.com/ansible-collections/community.general/pull/3400).
-- gitlab_group_members - fixes issue when gitlab group has more then 20 members, pagination problem (https://github.com/ansible-collections/community.general/issues/3041).
-- gitlab_project - user projects are created using namespace ID now, instead of user ID (https://github.com/ansible-collections/community.general/pull/2881).
-- gitlab_project_members - ``get_project_id`` return the project id by matching ``full_path`` or ``name`` (https://github.com/ansible-collections/community.general/pull/3602).
-- gitlab_project_members - fixes issue when gitlab group has more then 20 members, pagination problem (https://github.com/ansible-collections/community.general/issues/3041).
-- idrac_redfish_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084).
-- influxdb_retention_policy - fix bug where ``INF`` duration values failed parsing (https://github.com/ansible-collections/community.general/pull/2385).
-- influxdb_user - allow creation of admin users when InfluxDB authentication is enabled but no other user exists on the database. In this scenario, InfluxDB 1.x allows only ``CREATE USER`` queries and rejects any other query (https://github.com/ansible-collections/community.general/issues/2364).
-- influxdb_user - fix bug where an influxdb user has no privileges for 2 or more databases (https://github.com/ansible-collections/community.general/pull/2499).
-- influxdb_user - fix bug which removed current privileges instead of appending them to existing ones (https://github.com/ansible-collections/community.general/issues/2609, https://github.com/ansible-collections/community.general/pull/2614).
-- ini_file - fix Unicode processing for Python 2 (https://github.com/ansible-collections/community.general/pull/2875).
-- ini_file - fix inconsistency between empty value and no value (https://github.com/ansible-collections/community.general/issues/3031).
-- interfaces_file - no longer reporting change when none happened (https://github.com/ansible-collections/community.general/pull/3328).
-- inventory and vault scripts - change file permissions to make vendored inventory and vault scripts exectuable (https://github.com/ansible-collections/community.general/pull/2337).
-- ipa_* modules - fix environment fallback for ``ipa_host`` option (https://github.com/ansible-collections/community.general/issues/3560).
-- ipa_sudorule - call ``sudorule_add_allow_command`` method instead of ``sudorule_add_allow_command_group`` (https://github.com/ansible-collections/community.general/issues/2442).
-- iptables_state - call ``async_status`` action plugin rather than its module (https://github.com/ansible-collections/community.general/issues/2700).
-- iptables_state - fix a 'FutureWarning' in a regex and do some basic code clean up (https://github.com/ansible-collections/community.general/pull/2525).
-- iptables_state - fix a broken query of ``async_status`` result with current ansible-core development version (https://github.com/ansible-collections/community.general/issues/2627, https://github.com/ansible-collections/community.general/pull/2671).
-- iptables_state - fix initialization of iptables from null state when adressing more than one table (https://github.com/ansible-collections/community.general/issues/2523).
-- java_cert - fix issue with incorrect alias used on PKCS#12 certificate import (https://github.com/ansible-collections/community.general/pull/2560).
-- java_cert - import private key as well as public certificate from PKCS#12 (https://github.com/ansible-collections/community.general/issues/2460).
-- java_keystore - add parameter ``keystore_type`` to control output file format and override ``keytool``'s default, which depends on Java version (https://github.com/ansible-collections/community.general/issues/2515).
-- jboss - fix the deployment file permission issue when Jboss server is running under non-root user. The deployment file is copied with file content only. The file permission is set to ``440`` and belongs to root user. When the JBoss ``WildFly`` server is running under non-root user, it is unable to read the deployment file (https://github.com/ansible-collections/community.general/pull/3426).
-- jenkins_build - examine presence of ``build_number`` before deleting a jenkins build (https://github.com/ansible-collections/community.general/pull/2850).
-- jenkins_plugin - use POST method for sending request to jenkins API when ``state`` option is one of ``enabled``, ``disabled``, ``pinned``, ``unpinned``, or ``absent`` (https://github.com/ansible-collections/community.general/issues/2510).
-- json_query filter plugin - avoid 'unknown type' errors for more Ansible internal types (https://github.com/ansible-collections/community.general/pull/2607).
-- keycloak_authentication - fix bug when two identical executions are in the same authentication flow (https://github.com/ansible-collections/community.general/pull/2904).
-- keycloak_authentication - fix bug, the requirement was always on ``DISABLED`` when creating a new authentication flow (https://github.com/ansible-collections/community.general/pull/3330).
-- keycloak_client - update the check mode to not show differences resulting from sorting and default values relating to the properties, ``redirectUris``, ``attributes``, and ``protocol_mappers`` (https://github.com/ansible-collections/community.general/pull/3610).
-- keycloak_identity_provider - fix change detection when updating identity provider mappers (https://github.com/ansible-collections/community.general/pull/3538, https://github.com/ansible-collections/community.general/issues/3537).
-- keycloak_realm - ``ssl_required`` changed from a boolean type to accept the strings ``none``, ``external`` or ``all``. This is not a breaking change since the module always failed when a boolean was supplied (https://github.com/ansible-collections/community.general/pull/2693).
-- keycloak_realm - element type for ``events_listeners`` parameter should be ``string`` instead of ``dict`` (https://github.com/ansible-collections/community.general/pull/3231).
-- keycloak_realm - remove warning that ``reset_password_allowed`` needs to be marked as ``no_log`` (https://github.com/ansible-collections/community.general/pull/2694).
-- keycloak_role - quote role name when used in URL path to avoid errors when role names contain special characters (https://github.com/ansible-collections/community.general/issues/3535, https://github.com/ansible-collections/community.general/pull/3536).
-- launchd - fixed sanity check in the module's code (https://github.com/ansible-collections/community.general/pull/2960).
-- launchd - use private attribute to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194).
-- linode inventory plugin - fix default value of new option ``ip_style`` (https://github.com/ansible-collections/community.general/issues/3337).
-- linode_v4 - changed the error message to point to the correct bugtracker URL (https://github.com/ansible-collections/community.general/pull/2430).
-- logdns callback plugin - improve split call to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194).
-- logstash callback plugin - replace ``_option`` with ``context.CLIARGS`` to fix the plugin on ansible-base and ansible-core (https://github.com/ansible-collections/community.general/issues/2692).
-- lvol - fixed rounding errors (https://github.com/ansible-collections/community.general/issues/2370).
-- lvol - fixed size unit capitalization to match units used between different tools for comparison (https://github.com/ansible-collections/community.general/issues/2360).
-- lvol - honor ``check_mode`` on thinpool (https://github.com/ansible-collections/community.general/issues/2934).
-- macports - add ``stdout`` and ``stderr`` to return values (https://github.com/ansible-collections/community.general/issues/3499).
-- maven_artifact - improve split call to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194).
-- memcached cache plugin - change function argument names to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194).
-- memset_memstore_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084).
-- memset_server_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084).
-- modprobe - added additional checks to ensure module load/unload is effective (https://github.com/ansible-collections/community.general/issues/1608).
-- module_helper module utils - ``CmdMixin`` must also use ``LC_ALL`` to enforce locale choice (https://github.com/ansible-collections/community.general/pull/2731).
-- module_helper module utils - avoid failing when non-zero ``rc`` is present on regular exit (https://github.com/ansible-collections/community.general/pull/2912).
-- module_helper module utils - fixed change-tracking for dictionaries and lists (https://github.com/ansible-collections/community.general/pull/2951).
-- netapp module utils - remove always-true conditional to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194).
-- netcup_dns - use ``str(ex)`` instead of unreliable ``ex.message`` in exception handling to fix ``AttributeError`` in error cases (https://github.com/ansible-collections/community.general/pull/2590).
-- nmap inventory plugin - fix local variable error when cache is disabled (https://github.com/ansible-collections/community.general/issues/2512).
-- nmcli - added ip4/ip6 configuration arguments for ``sit`` and ``ipip`` tunnels (https://github.com/ansible-collections/community.general/issues/3238, https://github.com/ansible-collections/community.general/pull/3239).
-- nmcli - compare MAC addresses case insensitively to fix idempotency issue (https://github.com/ansible-collections/community.general/issues/2409).
-- nmcli - fixed ``dns6`` option handling so that it is treated as a list internally (https://github.com/ansible-collections/community.general/pull/3563).
-- nmcli - fixed ``ipv4.route-metric`` being in properties of type list (https://github.com/ansible-collections/community.general/pull/3563).
-- nmcli - fixes team-slave configuration by adding connection.slave-type (https://github.com/ansible-collections/community.general/issues/766).
-- nmcli - if type is ``bridge-slave`` add ``slave-type bridge`` to ``nmcli`` command (https://github.com/ansible-collections/community.general/issues/2408).
-- npm - correctly handle cases where a dependency does not have a ``version`` property because it is either missing or invalid (https://github.com/ansible-collections/community.general/issues/2917).
-- npm - when the ``version`` option is used the comparison of installed vs missing will use name@version instead of just name, allowing version specific updates (https://github.com/ansible-collections/community.general/issues/2021).
-- one_image - fix error message when renaming an image (https://github.com/ansible-collections/community.general/pull/3626).
-- one_template - change function argument name to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194).
-- one_vm - Allow missing NIC keys (https://github.com/ansible-collections/community.general/pull/2435).
-- oneview_datacenter_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084).
-- oneview_enclosure_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084).
-- oneview_ethernet_network_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084).
-- oneview_fc_network_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084).
-- oneview_fcoe_network_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084).
-- oneview_logical_interconnect_group_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084).
-- oneview_network_set_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084).
-- oneview_san_manager_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084).
-- online inventory plugin - improve split call to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194).
-- online module utils - improve split call to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194).
-- open_iscsi - calling ``run_command`` with arguments as ``list`` instead of ``str`` (https://github.com/ansible-collections/community.general/pull/3286).
-- openbsd_pkg - fix crash from ``KeyError`` exception when package installs, but ``pkg_add`` returns with a non-zero exit code (https://github.com/ansible-collections/community.general/pull/3336).
-- openbsd_pkg - fix regexp matching crash. This bug could trigger on package names with special characters, for example ``g++`` (https://github.com/ansible-collections/community.general/pull/3161).
-- opentelemetry callback plugin - validated the task result exception without crashing. Also simplifying code a bit (https://github.com/ansible-collections/community.general/pull/3450, https://github.com/ansible/ansible/issues/75726).
-- openwrt_init - calling ``run_command`` with arguments as ``list`` instead of ``str`` (https://github.com/ansible-collections/community.general/pull/3284).
-- ovir4 inventory script - improve configparser creation to avoid crashes for options without values (https://github.com/ansible-collections/community.general/issues/674).
-- packet_device - use generator to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194).
-- packet_sshkey - use generator to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194).
-- pacman - fix changed status when ignorepkg has been defined (https://github.com/ansible-collections/community.general/issues/1758).
-- pamd - code for ``state=updated`` when dealing with the pam module arguments, made no distinction between ``None`` and an empty list (https://github.com/ansible-collections/community.general/issues/3260).
-- pamd - fixed problem with files containing only one or two lines (https://github.com/ansible-collections/community.general/issues/2925).
-- pids - avoid crashes for older ``psutil`` versions, like on RHEL6 and RHEL7 (https://github.com/ansible-collections/community.general/pull/2808).
-- pipx - ``state=inject`` was failing to parse the list of injected packages (https://github.com/ansible-collections/community.general/pull/3611).
-- pipx - set environment variable ``USE_EMOJI=0`` to prevent errors in platforms that do not support ``UTF-8`` (https://github.com/ansible-collections/community.general/pull/3611).
-- pipx - the output value ``cmd_args`` was bringing the intermediate command used to gather the state, instead of the command that actually performed the state change (https://github.com/ansible-collections/community.general/pull/3655).
-- pkgin - Fix exception encountered when all packages are already installed (https://github.com/ansible-collections/community.general/pull/3583).
-- pkgng - ``name=* state=latest`` check for upgrades did not count "Number of packages to be reinstalled" as a `changed` action, giving incorrect results in both regular and check mode (https://github.com/ansible-collections/community.general/pull/3526).
-- pkgng - an `earlier PR `_ broke check mode so that the module always reports `not changed`. This is now fixed so that the module reports number of upgrade or install actions that would be performed (https://github.com/ansible-collections/community.general/pull/3526).
-- pkgng - the ``annotation`` functionality was broken and is now fixed, and now also works with check mode (https://github.com/ansible-collections/community.general/pull/3526).
-- proxmox inventory plugin - fixed parsing failures when some cluster nodes are offline (https://github.com/ansible-collections/community.general/issues/2931).
-- proxmox inventory plugin - fixed plugin failure when a ``qemu`` guest has no ``template`` key (https://github.com/ansible-collections/community.general/pull/3052).
-- proxmox_group_info - fix module crash if a ``group`` parameter is used (https://github.com/ansible-collections/community.general/pull/3649).
-- proxmox_kvm - clone operation should return the VMID of the target VM and not that of the source VM. This was failing when the target VM with the chosen name already existed (https://github.com/ansible-collections/community.general/pull/3266).
-- proxmox_kvm - fix parsing of Proxmox VM information with device info not containing a comma, like disks backed by ZFS zvols (https://github.com/ansible-collections/community.general/issues/2840).
-- proxmox_kvm - fix result of clone, now returns ``newid`` instead of ``vmid`` (https://github.com/ansible-collections/community.general/pull/3034).
-- proxmox_kvm - fixed ``vmid`` return value when VM with ``name`` already exists (https://github.com/ansible-collections/community.general/issues/2648).
-- puppet - replace ``console` with ``stdout`` in ``logdest`` option when ``all`` has been chosen (https://github.com/ansible-collections/community.general/issues/1190).
-- rax_facts - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084).
-- redfish_command - fix extraneous error caused by missing ``bootdevice`` argument when using the ``DisableBootOverride`` sub-command (https://github.com/ansible-collections/community.general/issues/3005).
-- redfish_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084).
-- redfish_utils module utils - do not attempt to change the boot source override mode if not specified by the user (https://github.com/ansible-collections/community.general/issues/3509/).
-- redfish_utils module utils - if a manager network property is not specified in the service, attempt to change the requested settings (https://github.com/ansible-collections/community.general/issues/3404/).
-- redfish_utils module utils - if given, add account ID of user that should be created to HTTP request (https://github.com/ansible-collections/community.general/pull/3343/).
-- redis cache - improved connection string parsing (https://github.com/ansible-collections/community.general/issues/497).
-- rhsm_release - fix the issue that module considers 8, 7Client and 7Workstation as invalid releases (https://github.com/ansible-collections/community.general/pull/2571).
-- saltstack connection plugin - fix function signature (https://github.com/ansible-collections/community.general/pull/3194).
-- scaleway module utils - improve split call to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194).
-- scaleway plugin inventory - fix ``JSON object must be str, not 'bytes'`` with Python 3.5 (https://github.com/ansible-collections/community.general/issues/2769).
-- smartos_image_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084).
-- snap - also use ``LC_ALL`` to enforce locale choice (https://github.com/ansible-collections/community.general/pull/2731).
-- snap - fix formatting of ``--channel`` argument when the ``channel`` option is used (https://github.com/ansible-collections/community.general/pull/3028).
-- snap - fix various bugs which prevented the module from working at all, and which resulted in ``state=absent`` fail on absent snaps (https://github.com/ansible-collections/community.general/issues/2835, https://github.com/ansible-collections/community.general/issues/2906, https://github.com/ansible-collections/community.general/pull/2912).
-- snap - fixed the order of the ``--classic`` parameter in the command line invocation (https://github.com/ansible-collections/community.general/issues/2916).
-- snap_alias - the output value ``cmd_args`` was bringing the intermediate command used to gather the state, instead of the command that actually performed the state change (https://github.com/ansible-collections/community.general/pull/3655).
-- snmp_facts - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084).
-- ssh_config - reduce stormssh searches based on host (https://github.com/ansible-collections/community.general/pull/2568/).
-- stacki_host - when adding a new server, ``rack`` and ``rank`` must be passed, and network parameters are optional (https://github.com/ansible-collections/community.general/pull/2681).
-- stackpath_compute inventory script - fix broken validation checks for client ID and client secret (https://github.com/ansible-collections/community.general/pull/2448).
-- supervisorctl - state ``signalled`` was not working (https://github.com/ansible-collections/community.general/pull/3068).
-- svr4pkg - convert string to a bytes-like object to avoid ``TypeError`` with Python 3 (https://github.com/ansible-collections/community.general/issues/2373).
-- taiga - some constructs in the module fixed to work also in Python 3 (https://github.com/ansible-collections/community.general/pull/3067).
-- terraform - ensure the workspace is set back to its previous value when the apply fails (https://github.com/ansible-collections/community.general/pull/2634).
-- tss lookup plugin - fixed backwards compatibility issue with ``python-tss-sdk`` version <=0.0.5 (https://github.com/ansible-collections/community.general/issues/3192, https://github.com/ansible-collections/community.general/pull/3199).
-- tss lookup plugin - fixed incompatibility with ``python-tss-sdk`` version 1.0.0 (https://github.com/ansible-collections/community.general/issues/3057, https://github.com/ansible-collections/community.general/pull/3139).
-- udm_dns_record - fixed managing of PTR records, which can never have worked before (https://github.com/ansible-collections/community.general/pull/3256).
-- ufw - use generator to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194).
-- utm_aaa_group_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084).
-- utm_ca_host_key_cert_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084).
-- utm_network_interface_address_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084).
-- utm_proxy_frontend_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084).
-- utm_proxy_location_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084).
-- vdo - boolean arguments now compared with proper ``true`` and ``false`` values instead of string representations like ``"yes"`` or ``"no"`` (https://github.com/ansible-collections/community.general/pull/3191).
-- xenserver_facts - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084).
-- xfconf - also use ``LC_ALL`` to enforce locale choice (https://github.com/ansible-collections/community.general/issues/2715).
-- xfconf_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084).
-- yaml callback plugin - avoid modifying PyYAML so that other plugins using it on the controller, like the ``to_yaml`` filter, do not produce different output (https://github.com/ansible-collections/community.general/issues/3471, https://github.com/ansible-collections/community.general/pull/3478).
-- yum_versionlock - fix idempotency when using wildcard (asterisk) in ``name`` option (https://github.com/ansible-collections/community.general/issues/2761).
-- zfs - certain ZFS properties, especially sizes, would lead to a task being falsely marked as "changed" even when no actual change was made (https://github.com/ansible-collections/community.general/issues/975, https://github.com/ansible-collections/community.general/pull/2454).
-- zfs - treated received properties as local (https://github.com/ansible-collections/community.general/pull/502).
-- zypper_repository - fix idempotency on adding repository with ``$releasever`` and ``$basearch`` variables (https://github.com/ansible-collections/community.general/issues/1985).
-- zypper_repository - when an URL to a .repo file was provided in option ``repo=`` and ``state=present`` only the first run was successful, future runs failed due to missing checks prior starting zypper. Usage of ``state=absent`` in combination with a .repo file was not working either (https://github.com/ansible-collections/community.general/issues/1791, https://github.com/ansible-collections/community.general/issues/3466).
-
-New Plugins
------------
-
-Callback
-~~~~~~~~
-
-- elastic - Create distributed traces for each Ansible task in Elastic APM
-- opentelemetry - Create distributed traces with OpenTelemetry
-
-Filter
-~~~~~~
-
-- groupby_as_dict - Transform a sequence of dictionaries to a dictionary where the dictionaries are indexed by an attribute
-- unicode_normalize - Normalizes unicode strings to facilitate comparison of characters with normalized forms
-
-Inventory
-~~~~~~~~~
-
-- icinga2 - Icinga2 inventory source
-- opennebula - OpenNebula inventory source
-
-Lookup
-~~~~~~
-
-- collection_version - Retrieves the version of an installed collection
-- dependent - Composes a list with nested elements of other lists or dicts which can depend on previous loop variables
-- random_pet - Generates random pet names
-- random_string - Generates random string
-- random_words - Return a number of random words
-
-Test
-~~~~
-
-- a_module - Check whether the given string refers to an available module or action plugin
-
-New Modules
------------
-
-Cloud
-~~~~~
-
-misc
-^^^^
-
-- proxmox_nic - Management of a NIC of a Qemu(KVM) VM in a Proxmox VE cluster.
-- proxmox_tasks_info - Retrieve information about one or more Proxmox VE tasks
-
-Database
-~~~~~~~~
-
-misc
-^^^^
-
-- redis_data - Set key value pairs in Redis
-- redis_data_incr - Increment keys in Redis
-- redis_data_info - Get value of key in Redis database
-
-mssql
-^^^^^
-
-- mssql_script - Execute SQL scripts on a MSSQL database
-
-saphana
-^^^^^^^
-
-- hana_query - Execute SQL on HANA
-
-Files
-~~~~~
-
-- sapcar_extract - Manages SAP SAPCAR archives
-
-Identity
-~~~~~~~~
-
-keycloak
-^^^^^^^^
-
-- keycloak_authentication - Configure authentication in Keycloak
-- keycloak_client_rolemapping - Allows administration of Keycloak client_rolemapping with the Keycloak API
-- keycloak_clientscope - Allows administration of Keycloak client_scopes via Keycloak API
-- keycloak_identity_provider - Allows administration of Keycloak identity providers via Keycloak API
-- keycloak_role - Allows administration of Keycloak roles via Keycloak API
-- keycloak_user_federation - Allows administration of Keycloak user federations via Keycloak API
-
-Notification
-~~~~~~~~~~~~
-
-- discord - Send Discord messages
-
-Packaging
-~~~~~~~~~
-
-language
-^^^^^^^^
-
-- ansible_galaxy_install - Install Ansible roles or collections using ansible-galaxy
-- pipx - Manages applications installed with pipx
-
-os
-^^
-
-- dnf_versionlock - Locks package versions in C(dnf) based systems
-- pacman_key - Manage pacman's list of trusted keys
-- snap_alias - Manages snap aliases
-
-Source Control
-~~~~~~~~~~~~~~
-
-gitlab
-^^^^^^
-
-- gitlab_protected_branch - (un)Marking existing branches for protection
-
-System
-~~~~~~
-
-- sap_task_list_execute - Perform SAP Task list execution
-- xfconf_info - Retrieve XFCE4 configurations
-
-Web Infrastructure
-~~~~~~~~~~~~~~~~~~
-
-- rundeck_job_executions_info - Query executions for a Rundeck job
-- rundeck_job_run - Run a Rundeck job
diff --git a/ansible_collections/community/general/FILES.json b/ansible_collections/community/general/FILES.json
deleted file mode 100644
index 3f038dbf..00000000
--- a/ansible_collections/community/general/FILES.json
+++ /dev/null
@@ -1,25261 +0,0 @@
-{
- "files": [
- {
- "name": ".",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": ".azure-pipelines",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": ".azure-pipelines/scripts",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": ".azure-pipelines/scripts/aggregate-coverage.sh",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "432bb55a22ee5b1b7aeb57eb6474c9ac0eb70db442456616205453d01584392c",
- "format": 1
- },
- {
- "name": ".azure-pipelines/scripts/combine-coverage.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e34d4e863a65b9f53c4ca8ae37655858969898a949e050e9cb3cb0d5f02342d0",
- "format": 1
- },
- {
- "name": ".azure-pipelines/scripts/process-results.sh",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c03d7273fe58882a439b6723e92ab89f1e127772b5ce35aa67c546dd62659741",
- "format": 1
- },
- {
- "name": ".azure-pipelines/scripts/publish-codecov.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d690f98e8db0d0020dbadb4d7012bf9e27c7b37bd91e3d7bce3f17d1b69b335d",
- "format": 1
- },
- {
- "name": ".azure-pipelines/scripts/report-coverage.sh",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0134b8f21933faca559c36c7551eb9f7aca849a09fa575ff16627c33bd317c42",
- "format": 1
- },
- {
- "name": ".azure-pipelines/scripts/run-tests.sh",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "cb08a3ec5715b00d476ae6d63ca22e11a9ad8887239439937d2a7ea342e5a623",
- "format": 1
- },
- {
- "name": ".azure-pipelines/scripts/time-command.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0232f415efeb583ddff907c058986963b775441eaf129d7162aee0acb0d36834",
- "format": 1
- },
- {
- "name": ".azure-pipelines/templates",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": ".azure-pipelines/templates/coverage.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "270c97c0b91869f4bf2ff350d9b703382d2032c1a8321e5142e75085409c87de",
- "format": 1
- },
- {
- "name": ".azure-pipelines/templates/matrix.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4fb0d3ffb2125d5806c7597e4f9d4b2af69cf8c337e9d57803081eddd4a6b081",
- "format": 1
- },
- {
- "name": ".azure-pipelines/templates/test.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "2cfa1271f94c71f05ffa0b1f763d8946394b5636e14579cda8ee14bb38bbcf1c",
- "format": 1
- },
- {
- "name": ".azure-pipelines/README.md",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "61f20decd3c8fb34ac2cc6ff79f598fc5136e642130a7ba065ccc5aa37960cd2",
- "format": 1
- },
- {
- "name": ".azure-pipelines/azure-pipelines.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f326a40059db446b4d5ded0aa25a449b1ca889f96cd2084c2672afb2b5a5cdce",
- "format": 1
- },
- {
- "name": ".github",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": ".github/ISSUE_TEMPLATE",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": ".github/ISSUE_TEMPLATE/bug_report.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8063576d1a2b7efbc22829be27042dc843eb6f3b1c1862663823aeff9c7071bb",
- "format": 1
- },
- {
- "name": ".github/ISSUE_TEMPLATE/config.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "2e5f08c57601d637ec507daec616f993993d16f51892ca62214932b4fad0dcd9",
- "format": 1
- },
- {
- "name": ".github/ISSUE_TEMPLATE/documentation_report.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "085b4f03c46b46d5e370727e0c1561c24c4e1a3f625a1cf436a7e3d5649f686d",
- "format": 1
- },
- {
- "name": ".github/ISSUE_TEMPLATE/feature_request.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c6b3fc179291972b5ed5b9a1f7d66db88ea95fd24ae4f84500fc253dd4f6e5ba",
- "format": 1
- },
- {
- "name": ".github/workflows",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": ".github/workflows/codeql-analysis.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b03191ab8e81273883b6d5eb8ac4ff0a216cd2e3a11f46c9c15553ff9f0c5fcd",
- "format": 1
- },
- {
- "name": ".github/BOTMETA.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f33ce93f8b6b7dc02243acfb94cb69b637dbe8ed7967032bfa277c4e4f61e106",
- "format": 1
- },
- {
- "name": ".github/dependabot.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4db28cbf4c9935cd6c08296f484f95441fcc58321213715df8477b63ba53f4cd",
- "format": 1
- },
- {
- "name": ".github/patchback.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f28653c2f8d2965a55f76092049c4205a9c7f828e4edbd1cd089f7dd2685f93a",
- "format": 1
- },
- {
- "name": ".github/settings.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e0381b42e525395bcf6c9e21de33e23ca8cace574b8ef85902a36ce606d3a991",
- "format": 1
- },
- {
- "name": "changelogs",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "changelogs/fragments",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "changelogs/fragments/.keep",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "changelogs/.gitignore",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "919ef00776e7d2ff349950ac4b806132aa9faf006e214d5285de54533e443b33",
- "format": 1
- },
- {
- "name": "changelogs/changelog.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "fc99147da9dd277a5e0718ac311034e1f0d7841a2ddc1fdf48d3d2b14738fcad",
- "format": 1
- },
- {
- "name": "changelogs/config.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a31925d687298c2d9568cd8a6083c015024ba3560d3275fd9ef0b1e9e8f6b378",
- "format": 1
- },
- {
- "name": "docs",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "docs/docsite",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "docs/docsite/helper",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "docs/docsite/helper/lists_mergeby",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "docs/docsite/helper/lists_mergeby/example-001_vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "docs/docsite/helper/lists_mergeby/example-001_vars/default-common.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b59292184c20294d3e7608c6b970358f669d617f432f4d2ff7969a05a8560d75",
- "format": 1
- },
- {
- "name": "docs/docsite/helper/lists_mergeby/example-001_vars/list3.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "124727db3b24565eacc8316782db1ad61d34c66973f84aa4367fc6e8374b7593",
- "format": 1
- },
- {
- "name": "docs/docsite/helper/lists_mergeby/example-002_vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "docs/docsite/helper/lists_mergeby/example-002_vars/default-common.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b59292184c20294d3e7608c6b970358f669d617f432f4d2ff7969a05a8560d75",
- "format": 1
- },
- {
- "name": "docs/docsite/helper/lists_mergeby/example-002_vars/list3.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5f8cdc658841376d10f5755fa855f6462fa50a9d84c2272393e554a786e2a0fd",
- "format": 1
- },
- {
- "name": "docs/docsite/helper/lists_mergeby/example-003_vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "docs/docsite/helper/lists_mergeby/example-003_vars/default-recursive-true.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f716c819d3f2e7619a234a79f066165a24c34304a59c9e24b1f6f049035407e5",
- "format": 1
- },
- {
- "name": "docs/docsite/helper/lists_mergeby/example-003_vars/list3.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "372fa863d5e78b00aeee2f74cd394f69eeb41234667315c5b23226cf8adb2ca5",
- "format": 1
- },
- {
- "name": "docs/docsite/helper/lists_mergeby/example-004_vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "docs/docsite/helper/lists_mergeby/example-004_vars/default-recursive-true.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f716c819d3f2e7619a234a79f066165a24c34304a59c9e24b1f6f049035407e5",
- "format": 1
- },
- {
- "name": "docs/docsite/helper/lists_mergeby/example-004_vars/list3.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3c8afda6cf2d26ae54e12b82771f759347d290c3da1a1f1d161bead215ccdb58",
- "format": 1
- },
- {
- "name": "docs/docsite/helper/lists_mergeby/example-005_vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "docs/docsite/helper/lists_mergeby/example-005_vars/default-recursive-true.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f716c819d3f2e7619a234a79f066165a24c34304a59c9e24b1f6f049035407e5",
- "format": 1
- },
- {
- "name": "docs/docsite/helper/lists_mergeby/example-005_vars/list3.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "2cd75720f897d6484c8cf128c3a4cee776b2ba7bfda588c377b42ea2df77769f",
- "format": 1
- },
- {
- "name": "docs/docsite/helper/lists_mergeby/example-006_vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "docs/docsite/helper/lists_mergeby/example-006_vars/default-recursive-true.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f716c819d3f2e7619a234a79f066165a24c34304a59c9e24b1f6f049035407e5",
- "format": 1
- },
- {
- "name": "docs/docsite/helper/lists_mergeby/example-006_vars/list3.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d6aabc04b58d2a62e22e8ba73a5ad9bc54115f1b5567c9d6fd3627f2b78f4f1e",
- "format": 1
- },
- {
- "name": "docs/docsite/helper/lists_mergeby/example-007_vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "docs/docsite/helper/lists_mergeby/example-007_vars/default-recursive-true.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f716c819d3f2e7619a234a79f066165a24c34304a59c9e24b1f6f049035407e5",
- "format": 1
- },
- {
- "name": "docs/docsite/helper/lists_mergeby/example-007_vars/list3.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a5a6c28ca3d0b57d1dcf91c93421b1de878b57cc74db5328080e2b89710416f5",
- "format": 1
- },
- {
- "name": "docs/docsite/helper/lists_mergeby/example-008_vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "docs/docsite/helper/lists_mergeby/example-008_vars/default-recursive-true.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f716c819d3f2e7619a234a79f066165a24c34304a59c9e24b1f6f049035407e5",
- "format": 1
- },
- {
- "name": "docs/docsite/helper/lists_mergeby/example-008_vars/list3.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d1153c8f98613b91afc3ceed3f67d37437ccb3bff135ceb753c0a18e0b3725b0",
- "format": 1
- },
- {
- "name": "docs/docsite/helper/lists_mergeby/default-common.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b59292184c20294d3e7608c6b970358f669d617f432f4d2ff7969a05a8560d75",
- "format": 1
- },
- {
- "name": "docs/docsite/helper/lists_mergeby/default-recursive-true.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f716c819d3f2e7619a234a79f066165a24c34304a59c9e24b1f6f049035407e5",
- "format": 1
- },
- {
- "name": "docs/docsite/helper/lists_mergeby/example-001.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ef524048f5428f3b554e450b33d52eba16992be3bea78f478ab2ed145658b143",
- "format": 1
- },
- {
- "name": "docs/docsite/helper/lists_mergeby/example-002.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "58ffe71c339fb197ff96e2310dfdd5382817f27168550881f2e899a74cdff45e",
- "format": 1
- },
- {
- "name": "docs/docsite/helper/lists_mergeby/example-003.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0ff4f9a245a38cc2604c0918066666bb9ee70f1ffaab7dd024605c865a8a4712",
- "format": 1
- },
- {
- "name": "docs/docsite/helper/lists_mergeby/example-004.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "32a3d7c2172e552a9242406b89ce18069e3402326d433774da71eafccb3b6a7d",
- "format": 1
- },
- {
- "name": "docs/docsite/helper/lists_mergeby/example-005.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "841054e97bab645c017ad516606400d7f38b93e99b1b2de512c9184ce55c5aeb",
- "format": 1
- },
- {
- "name": "docs/docsite/helper/lists_mergeby/example-006.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e770a5fefc343eee1eeeb1d196dc83c65ad980ca560bd6931ed8155f1be72213",
- "format": 1
- },
- {
- "name": "docs/docsite/helper/lists_mergeby/example-007.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4a90d6df4eb06a73e032b977783ef63fbca76639565e6ba88c72e91036fb2c74",
- "format": 1
- },
- {
- "name": "docs/docsite/helper/lists_mergeby/example-008.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9483a524f54b54f2b85382ca5f25fb3dc404b83cd69a095632d2097590d8ce31",
- "format": 1
- },
- {
- "name": "docs/docsite/helper/lists_mergeby/examples.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "bf392d2344f6e1e286ff3f963f4cd911a7a5d64d00e5892b99f457b0aa2e5180",
- "format": 1
- },
- {
- "name": "docs/docsite/helper/lists_mergeby/examples_all.rst.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c3715738ea49e65ee4be377dd05879d368f30a2524c85f65cbc47d5c0e1f90c5",
- "format": 1
- },
- {
- "name": "docs/docsite/helper/lists_mergeby/filter_guide_abstract_informations_merging_lists_of_dictionaries.rst.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1d66e2b303f2aa06758a54e7fbf8145fcd2d3026e7a4de4e6bec1bcea72b10a6",
- "format": 1
- },
- {
- "name": "docs/docsite/helper/lists_mergeby/list3.out.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "06392cd62d44858d2ebe996653f3ef1177118e40fc49da734f92105ed17f2454",
- "format": 1
- },
- {
- "name": "docs/docsite/helper/lists_mergeby/playbook.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8822928d0e5ff719ed6b6f79ead4b319428244e71af2716d7f3407d938b758d4",
- "format": 1
- },
- {
- "name": "docs/docsite/rst",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "docs/docsite/rst/filter_guide.rst",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5dd351082133b92222d674bb84a7492b3914b2fb892ac0d529c3179a20960d1e",
- "format": 1
- },
- {
- "name": "docs/docsite/rst/filter_guide_abstract_informations.rst",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e7ac0c375a05277db0cb3877a2858d320838b25179be76255ddacdfc9ebbfda6",
- "format": 1
- },
- {
- "name": "docs/docsite/rst/filter_guide_abstract_informations_counting_elements_in_sequence.rst",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "27eb1e21de980780e97423c7517b2f1fe3ce2f3e7feb13e8101dd6ed03b12021",
- "format": 1
- },
- {
- "name": "docs/docsite/rst/filter_guide_abstract_informations_dictionaries.rst",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ee3b7aa7d966d6589d128753c66cfb17a71129de7a97dd5227121b4d439d9c94",
- "format": 1
- },
- {
- "name": "docs/docsite/rst/filter_guide_abstract_informations_grouping.rst",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3212e2c984c60a97aa8b7120797299433f91979a29f7d16cfc8d29e3c83279cb",
- "format": 1
- },
- {
- "name": "docs/docsite/rst/filter_guide_abstract_informations_merging_lists_of_dictionaries.rst",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "efd8b7219cdc908d82c696bb659676911abc6df882cc8119dfc425c61e90a7b6",
- "format": 1
- },
- {
- "name": "docs/docsite/rst/filter_guide_conversions.rst",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6781fe08e818dca9601c2f5e8604ac42d23a32747e31b1a4c57af8f8f5c9c86d",
- "format": 1
- },
- {
- "name": "docs/docsite/rst/filter_guide_creating_identifiers.rst",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "44f1bec70452d5e3bcf6f9e090a4c31e554ff1a4f113e82249ce880f02ed3292",
- "format": 1
- },
- {
- "name": "docs/docsite/rst/filter_guide_paths.rst",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0ef986b82c012433f93e354da4e4ff014a4f6adb80d1b859435acc013f588d26",
- "format": 1
- },
- {
- "name": "docs/docsite/rst/filter_guide_selecting_json_data.rst",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4a1c451d18cd6d5e202f9328598c3432f816270bfc097956888cdf7db6da258e",
- "format": 1
- },
- {
- "name": "docs/docsite/rst/filter_guide_working_with_times.rst",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "447f2f740573115ce6e67b9c885d86cb1619f49173c23135c633aa47f6035100",
- "format": 1
- },
- {
- "name": "docs/docsite/rst/filter_guide_working_with_unicode.rst",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "452dc71c1cc3073d9272179ed5670a892c54c7a32901e1c8e8e5ccca6f6c710d",
- "format": 1
- },
- {
- "name": "docs/docsite/rst/filter_guide_working_with_versions.rst",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "fd43a5e7c1176513c891f7e1969676fc0ee62f2a7f2bbec17214e4f89b569484",
- "format": 1
- },
- {
- "name": "docs/docsite/rst/test_guide.rst",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8c5039a3affa314b180ab9ee304c1516bda3ea7309664d7743b205be539e90db",
- "format": 1
- },
- {
- "name": "docs/docsite/extra-docs.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d5ced61d7909c0f08262fbb375ced0dc6f75f8ded8c20f432001ddbf37fab47a",
- "format": 1
- },
- {
- "name": "meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "meta/runtime.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "55147edd582b4723e052ff22be6afde181ce16a1d0d9bc453edadbc05a4f48e5",
- "format": 1
- },
- {
- "name": "plugins",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/action",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/action/iptables_state.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5e67b7ebc904eb63f86c197d95409f0483f23f805f5a0816191ed3b8546f474a",
- "format": 1
- },
- {
- "name": "plugins/action/shutdown.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c14173d1cb5da24064a3e0ae193caf7e27170f30c18c9d1dce6040a9529b1d9b",
- "format": 1
- },
- {
- "name": "plugins/action/system",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/action/system/iptables_state.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5e67b7ebc904eb63f86c197d95409f0483f23f805f5a0816191ed3b8546f474a",
- "format": 1
- },
- {
- "name": "plugins/action/system/shutdown.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c14173d1cb5da24064a3e0ae193caf7e27170f30c18c9d1dce6040a9529b1d9b",
- "format": 1
- },
- {
- "name": "plugins/become",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/become/doas.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7417aa750f35679d63d98e5a2d7c0a2a1fc999b779b00a78a22e5d92879d3b56",
- "format": 1
- },
- {
- "name": "plugins/become/dzdo.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d6fe0252fe0993b2d11c3ee4db0ca1fef4eda6453da6658bf7c08aa39a8a47b2",
- "format": 1
- },
- {
- "name": "plugins/become/ksu.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9df0c91601626ebbb74475f1c264db19f5aaf6d0100d0c173f89214db480ba69",
- "format": 1
- },
- {
- "name": "plugins/become/machinectl.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "65f5821799afbcf2492a3732734a71a508d7c138ad65820c57ea10291dd58b90",
- "format": 1
- },
- {
- "name": "plugins/become/pbrun.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "75a5d44918f3fc4b9b95b3dcc8f268632728c9f6d84611e3c1381dfc46b5fb68",
- "format": 1
- },
- {
- "name": "plugins/become/pfexec.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b467c2a91d39a4dae3de752e304e9350b6cf3b59f3133961de2375919f0cf52b",
- "format": 1
- },
- {
- "name": "plugins/become/pmrun.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0fcc2a009001fd94145011ac79ddb5560633edc690c8a61a0e321d9a52d65d87",
- "format": 1
- },
- {
- "name": "plugins/become/sesu.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1256af9d7fdef2e198354945d3dfa9e91b026be4d745fa9230945869d7c282e0",
- "format": 1
- },
- {
- "name": "plugins/become/sudosu.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "45fe5493856a68a9c849b294f69ea32527d2357978acc3829ffd64c045cc6e1f",
- "format": 1
- },
- {
- "name": "plugins/cache",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/cache/memcached.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "06c967f9a9b9079174bee6fd175c27298c7618c0047df1dea170acab4feb2cfd",
- "format": 1
- },
- {
- "name": "plugins/cache/pickle.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c90e85c3e31a5389b36e499a533f1df83aa2557dd202f9defe4218edc0441d92",
- "format": 1
- },
- {
- "name": "plugins/cache/redis.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d834deaeb93249049ab3adca4fae0c71817b2ca5bfd5a6b65e04a82c3d8ed16a",
- "format": 1
- },
- {
- "name": "plugins/cache/yaml.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "384760144b4430feb9d9ab7ca90c8d3e3cd72d4cee2baf09e302f2730de116e6",
- "format": 1
- },
- {
- "name": "plugins/callback",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/callback/osx_say.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "55a7c838e72b1969ec94bad4afefc9da350e21e7f96cdad3f6d7e2a758c2cdbb",
- "format": 1
- },
- {
- "name": "plugins/callback/cgroup_memory_recap.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c78c0cd15d78c87c7b24db3f64bf5d7a609d3156081923b8fe823b129e333af8",
- "format": 1
- },
- {
- "name": "plugins/callback/context_demo.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8d537228fd64c0872560a9885d23802b8a2fbb3776c15c374dd6d6eb8d1021c4",
- "format": 1
- },
- {
- "name": "plugins/callback/counter_enabled.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b8f9c4ca899b057e804fde54ce1203de367e7277e631b89c907dddc71df0da27",
- "format": 1
- },
- {
- "name": "plugins/callback/dense.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9b54b9ad005054a39bf3e97550263859b91b2e31d3663c4d5f71373b48cd7133",
- "format": 1
- },
- {
- "name": "plugins/callback/diy.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "831aa82c12a345a06b29cf3ca6dc23c89b80603ea79a05adc03a602d8ccd7eb9",
- "format": 1
- },
- {
- "name": "plugins/callback/elastic.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ea637f02072dd92bd942d49b5d970e3c5693a3b461f70c8fdc5872c553a9948c",
- "format": 1
- },
- {
- "name": "plugins/callback/hipchat.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e363830af498823fe22e96671c5f6c5c725c13f6b499aba2f3980151816d5559",
- "format": 1
- },
- {
- "name": "plugins/callback/jabber.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "774ea134645b688b3cbfc1a2460859420d8fc836df4ca1f989255ee5bc831891",
- "format": 1
- },
- {
- "name": "plugins/callback/log_plays.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "89c06df364e2b945ccc5a29b4e423d40c77cd8a49e8f9df9631c12a166171202",
- "format": 1
- },
- {
- "name": "plugins/callback/loganalytics.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7e761d5a9a8a2f94597fa10f7e4d74dc92618d2a642181e6ac9d605c4a97c4ff",
- "format": 1
- },
- {
- "name": "plugins/callback/logdna.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "2bb93c4197c2fef50aceafba9bbe69d9f6f769fe03add3e34ab19159d23c86e7",
- "format": 1
- },
- {
- "name": "plugins/callback/logentries.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "39947c41dda2778fe14bebd157b5fcd46e974cfe35920d5a4a21c3d04ef3d9db",
- "format": 1
- },
- {
- "name": "plugins/callback/logstash.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "105116672adbb9137f73822d07372373fe8666d4c716a15c4821e470ed2dadce",
- "format": 1
- },
- {
- "name": "plugins/callback/mail.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "886722a8b4ea343316eab7c1f8439f693ee7bc56efd5ab452a3e9222478cdc5e",
- "format": 1
- },
- {
- "name": "plugins/callback/nrdp.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "22f2697963c0c64eaa67d877955a6e1fac87b5d18d2ec42f75022c5051a118af",
- "format": 1
- },
- {
- "name": "plugins/callback/null.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0d11ed31f94e426af5c0676353ca67fc24270f7ed8f5a2f43cadf99c5f3b7b30",
- "format": 1
- },
- {
- "name": "plugins/callback/opentelemetry.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "bd36601aee1e44919ef754413cf10a3c191b83ebd2fcbedc5cdd2ac2f4dc892b",
- "format": 1
- },
- {
- "name": "plugins/callback/say.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "55a7c838e72b1969ec94bad4afefc9da350e21e7f96cdad3f6d7e2a758c2cdbb",
- "format": 1
- },
- {
- "name": "plugins/callback/selective.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c947c8f7b9129cc4e44ae76af714fb9bf700031f8da738893dd68804592df370",
- "format": 1
- },
- {
- "name": "plugins/callback/slack.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "11b70ce2822efe57ebb9839a2172c09e95f3466fa0fbc100d1e4dc025feabbe3",
- "format": 1
- },
- {
- "name": "plugins/callback/splunk.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ea1a28f51e7574c5f22dd955a2a1d37bd8d3a0381be83e27b1ab530e561f57a1",
- "format": 1
- },
- {
- "name": "plugins/callback/sumologic.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e8d06234d12a2b4fd38510a86159ad3d9e163f97739c56581f595254f4de64d7",
- "format": 1
- },
- {
- "name": "plugins/callback/syslog_json.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "83af94d297e9dfb004d89167b04e26563721087dfb66c4665f4536def3fa6e21",
- "format": 1
- },
- {
- "name": "plugins/callback/unixy.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4dd32eff531ebed7499a6e94812f1a0d1b93f697361bca14fbea1c75f840b632",
- "format": 1
- },
- {
- "name": "plugins/callback/yaml.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "69838b12ded886a7663173e305dc10143baf8a8dfb51458cc4d323728d5c318c",
- "format": 1
- },
- {
- "name": "plugins/connection",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/connection/chroot.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "322bae37ba470f073f786622847d83e2a3b2a170349e8511a1500907ee7be3ba",
- "format": 1
- },
- {
- "name": "plugins/connection/funcd.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "190eb5364ac8895b86b364860544f3a9a28cf77ad1f406e089667166ac5cf8c4",
- "format": 1
- },
- {
- "name": "plugins/connection/iocage.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "260bfc99a34252f9a38a19b1738814f3dc6aee1aa15434f0e963dcc014f32381",
- "format": 1
- },
- {
- "name": "plugins/connection/jail.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "239beb53a3ddf81a855f194f2ffe50133b2d2bb5f80c7ca2fb1673220f022d4b",
- "format": 1
- },
- {
- "name": "plugins/connection/lxc.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "852b490a76392ae10e3ec3ddddd7a6efd66cf67718076cf5694c0c314fe6273c",
- "format": 1
- },
- {
- "name": "plugins/connection/lxd.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "2758cf8b7b184104def140b85ccffd7240aece938c2211a890934ed5445496a9",
- "format": 1
- },
- {
- "name": "plugins/connection/qubes.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "cc1e03e94beb0355bb25daa42ad8ea14549aca47ee296ad7c7d91fc301db42c8",
- "format": 1
- },
- {
- "name": "plugins/connection/saltstack.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "281ea091d1b347f5d82e31b3a43d75d8a4e83a5557793c74c8e3dfd1b74556bc",
- "format": 1
- },
- {
- "name": "plugins/connection/zone.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f5933c5f126170dfe744347811c69efe60e14022ff12684c6b12481c85c8f4ae",
- "format": 1
- },
- {
- "name": "plugins/doc_fragments",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/doc_fragments/alicloud.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "678f5b64368f51cc5ff9bbbac69c2b722cba9408176523d0a874eeec0d2d8c46",
- "format": 1
- },
- {
- "name": "plugins/doc_fragments/auth_basic.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "34740866c7bdbcaed75b7d4414e978ed24a2e05424e4a2af4e179fded67ab950",
- "format": 1
- },
- {
- "name": "plugins/doc_fragments/bitbucket.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0e68bd78cd3a1bc6c4b17428e6ba9e0b3761f4ede3d0a1ca6964124d47c9e476",
- "format": 1
- },
- {
- "name": "plugins/doc_fragments/dimensiondata.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a34334fca7e874a3c5381729e38804f893a49869e664b5098d2340074526b15d",
- "format": 1
- },
- {
- "name": "plugins/doc_fragments/dimensiondata_wait.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "854970d12c42e53dd93c01cc6c00f4561c29503a0cb50f8ba7d74d85abb67047",
- "format": 1
- },
- {
- "name": "plugins/doc_fragments/emc.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d8e4738088f16fdf47cec032f3046c5dce8a520a06d4e3c47e4f8d7bbd978599",
- "format": 1
- },
- {
- "name": "plugins/doc_fragments/gitlab.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "48058c3ef0154703e8184c3d51345cf10f3f805aef57c936727253270474cba0",
- "format": 1
- },
- {
- "name": "plugins/doc_fragments/hpe3par.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ed065527eb18c4d72c5ab95a898bad90882cafcfff03be3f22823779ce8b9a1d",
- "format": 1
- },
- {
- "name": "plugins/doc_fragments/hwc.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "cb061c5a72b42d57ec94959856ade7a12394b95cabcf7b6c64af2339876611b6",
- "format": 1
- },
- {
- "name": "plugins/doc_fragments/ibm_storage.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "aa82aa6805f786bbffc1c2d0740fa230852373ce44a38a7af28e7f880f998e61",
- "format": 1
- },
- {
- "name": "plugins/doc_fragments/influxdb.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "eb088065bb558d8bd0fd38facbd3e569a75cf2350ff54bddee7ec87d25f3391a",
- "format": 1
- },
- {
- "name": "plugins/doc_fragments/ipa.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5f952ad0bc01198e8db6f21143273782cab239d5e6acc035fd4606e0aabbfed2",
- "format": 1
- },
- {
- "name": "plugins/doc_fragments/keycloak.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a48e78b6913388c7e758243910eedd30ec2319e0d7ed4aae71def7bf865929b8",
- "format": 1
- },
- {
- "name": "plugins/doc_fragments/ldap.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e21d098d79bb479a2fa47e655cb2fba729fdfb233a4bf2a638fe2703ebb479da",
- "format": 1
- },
- {
- "name": "plugins/doc_fragments/lxca_common.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6f3c7661305f8b89b2e4a611bfbdf08a9ca2585d90fe0b156747eb45d1d6a09c",
- "format": 1
- },
- {
- "name": "plugins/doc_fragments/manageiq.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9d04400f964b445c2ba413c5028e62f5e4411c7daac5ee7c520c9da8a4adc1fb",
- "format": 1
- },
- {
- "name": "plugins/doc_fragments/nomad.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7bfef29c982c903cd3263b446dcd28eed54bb5f5834a3a848425f478634fa9d9",
- "format": 1
- },
- {
- "name": "plugins/doc_fragments/oneview.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b56ef83547e88ad7eb9f15732dca7f8eac1a4e82b5146ef8f783d3fcc5e94d13",
- "format": 1
- },
- {
- "name": "plugins/doc_fragments/online.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a23dcec58b0d6fabcde691111466b6ffc6db5d8034de41f4da96eeeef3789a35",
- "format": 1
- },
- {
- "name": "plugins/doc_fragments/opennebula.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c09c661f3141cee65af3798e6d8e99097cc80a61acd36ad52e41a7b12ba6b5f6",
- "format": 1
- },
- {
- "name": "plugins/doc_fragments/openswitch.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "803130c2ab3075b5553a20bb57cc40db502f37699f9e0e90d23539a1d04f45f1",
- "format": 1
- },
- {
- "name": "plugins/doc_fragments/oracle.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "53102f2a2e5a1b856ace02593803940baff8d355a346ee41a66d46fc918ef066",
- "format": 1
- },
- {
- "name": "plugins/doc_fragments/oracle_creatable_resource.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0d5149fc41043c8566f45e188a4d7ea02641ef62e176cabf079da16d145f6fbf",
- "format": 1
- },
- {
- "name": "plugins/doc_fragments/oracle_display_name_option.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f0639a4b83c44df4fb5af0e879f0d8ebfeccaf2d385cd5be7128da41ca52bd8f",
- "format": 1
- },
- {
- "name": "plugins/doc_fragments/oracle_name_option.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "01bc8f3275fa439749fcb06191ca97459f66137434ae38979ef0c62cc56c1be9",
- "format": 1
- },
- {
- "name": "plugins/doc_fragments/oracle_tags.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "36a057fbe4873583cf0983070d75552a3d2db084e11c6f9b47d1e51585749df9",
- "format": 1
- },
- {
- "name": "plugins/doc_fragments/oracle_wait_options.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "808e217393514cca1cd235ccbb80dfd09029af85325edca13f63f352c2f11e34",
- "format": 1
- },
- {
- "name": "plugins/doc_fragments/pritunl.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "cf7ab079e36719f73c8a7598062b3e4f7b0d2a2f55e7e2b92e170b6e5ca1541a",
- "format": 1
- },
- {
- "name": "plugins/doc_fragments/proxmox.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5909e654a318bd6a6ec20270a7e12b61a26d67df1447d05b50846acf0df5022f",
- "format": 1
- },
- {
- "name": "plugins/doc_fragments/purestorage.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5ddc57c545438fa417cd600b412758153396222b802ec28968f57b11f6031cb8",
- "format": 1
- },
- {
- "name": "plugins/doc_fragments/rackspace.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "22456616c25433541723ad90c6fb91b09fa52a2c1bf925c93cb5cb7dcd73f2cb",
- "format": 1
- },
- {
- "name": "plugins/doc_fragments/redis.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c867d37c83553d1d8b4ab1e2f0ddc9f5f313047df27e0ffe9fc73807a66ef2ec",
- "format": 1
- },
- {
- "name": "plugins/doc_fragments/rundeck.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1128f3948d9ef91920987b76b7f70fc3b36b416376528c0d5a5e3568384543c1",
- "format": 1
- },
- {
- "name": "plugins/doc_fragments/scaleway.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "45b9ab99f962f8c87c100b113c98ccac2789142220e55851c1239bab43c657cc",
- "format": 1
- },
- {
- "name": "plugins/doc_fragments/utm.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c595fd6936490911e09c041bf49e93933278c65a838896b95075432a3a7e6acc",
- "format": 1
- },
- {
- "name": "plugins/doc_fragments/vexata.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c8039bbcfe59a10d3db8062a7bf6ec797a0cd96d5d1163635fed5db388670d9a",
- "format": 1
- },
- {
- "name": "plugins/doc_fragments/xenserver.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "84cffcdae284ac8290d7f2865f536fc13bd35b4cd31d4a5eaeef89493f53b64d",
- "format": 1
- },
- {
- "name": "plugins/filter",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/filter/counter.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "594012ed8bf9714c030858fa25bab1b950161c0a2ae683fc009eac580bdc79f4",
- "format": 1
- },
- {
- "name": "plugins/filter/dict.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "37f731a7b4ef003d88eac100709c097a7522c8cfb27688ea95565cb096b226ed",
- "format": 1
- },
- {
- "name": "plugins/filter/dict_kv.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "162301ca8d64b1366113df22068b7b9a150610f5ab13023beb846c176863bf86",
- "format": 1
- },
- {
- "name": "plugins/filter/from_csv.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "121f47a203202d50e035cb54fcf119e710c7557a894745d26c5643733db130e1",
- "format": 1
- },
- {
- "name": "plugins/filter/groupby.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "530d6dc0f8f500d68393de2f744261fb20e1457f01183af02fbfe461c890acb9",
- "format": 1
- },
- {
- "name": "plugins/filter/hashids.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4e5740394a9f9d7f8b1333e4b200b60cdf8eea18aa70bae28ae7fcc9df70562f",
- "format": 1
- },
- {
- "name": "plugins/filter/jc.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4ee84bed62711c60d2c0ae37056801b389f1ce1fe588efe0f3f9b566a74144a6",
- "format": 1
- },
- {
- "name": "plugins/filter/json_query.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1754fc223cf8315816d846798dad5e9a07daef8e1b6adaa282b15afa3ca48983",
- "format": 1
- },
- {
- "name": "plugins/filter/list.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7767a0d9040ded2d5c06474aa51c858c1c6bf97786b26f9c30f0614feab5e905",
- "format": 1
- },
- {
- "name": "plugins/filter/path_join_shim.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "af24e5338d8ad56ed65bbdbccc59ab1b3d688085c42051e935fb1ef1b009dbea",
- "format": 1
- },
- {
- "name": "plugins/filter/random_mac.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1907c71d6eb92015868e1bc393ca39a5ebe312759a41a629eb48be54b65fee43",
- "format": 1
- },
- {
- "name": "plugins/filter/time.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "80a25fc2ba18f1ce7a68017a1f5af5435f40eee159c07be188f1fc51b3818d73",
- "format": 1
- },
- {
- "name": "plugins/filter/unicode_normalize.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "434484e773bc02a9aed066c7166b8f6d30a937b22c7f177cb4a5cee5733b3e08",
- "format": 1
- },
- {
- "name": "plugins/filter/version_sort.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "818111a99a79f02fa838f8340897b914a84304b1ff24fb77eb33f0a6757e948e",
- "format": 1
- },
- {
- "name": "plugins/inventory",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/inventory/cobbler.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9a6b423e69705b2bc5a4bae307cba2bd41f3d9ee94c398846cbf1fb33df7a509",
- "format": 1
- },
- {
- "name": "plugins/inventory/gitlab_runners.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5f490cc333192da437f9ca31881b14db49cd3c2b44d5481d126463968e65f700",
- "format": 1
- },
- {
- "name": "plugins/inventory/icinga2.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "cd2b8abe72349f95ba1b23d5daac2e148ed50c80a4ccb4aad31658d93b2bd1b5",
- "format": 1
- },
- {
- "name": "plugins/inventory/linode.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "490656d2d37ed9d1499662d5a2a9269a18922ce40e1611441450fe31cef73d35",
- "format": 1
- },
- {
- "name": "plugins/inventory/lxd.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "2b8de32e26a01c46eb80084c968b2f1c808f5bbb76a5569ec1746e9191a380e9",
- "format": 1
- },
- {
- "name": "plugins/inventory/nmap.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "34512b9e6ebaf0d368d03d2bcbf9d184a6b796210a71d256976dc2b4ec5e1c3c",
- "format": 1
- },
- {
- "name": "plugins/inventory/online.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c4d33407094a14d875e37b46acaae929212d70046ee3e90a9376795a678fd59e",
- "format": 1
- },
- {
- "name": "plugins/inventory/opennebula.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b464e03664f7e72776d52cbac1abdaf6358b303cc726d1cd4196fe92542d4522",
- "format": 1
- },
- {
- "name": "plugins/inventory/proxmox.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9d517b140e791d044c6dc3c34b7be7a1ae6e3a0045f11f85ee0627f3e3b7f46b",
- "format": 1
- },
- {
- "name": "plugins/inventory/scaleway.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ed112e682df95e0eb4432d7824951e60f0908ba09c3f5612df114ab442baad97",
- "format": 1
- },
- {
- "name": "plugins/inventory/stackpath_compute.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "086a7a3c09bbd181750bb397399495d30acee51484ab0930ac2b1f1ccf1a44af",
- "format": 1
- },
- {
- "name": "plugins/inventory/virtualbox.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d7dd10c1c99e7486ea054780cea690dd122df85b200b97853ae528bb2cdd85cb",
- "format": 1
- },
- {
- "name": "plugins/inventory/xen_orchestra.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "64120b597c9c0946705db83a4159f8714bac9da25cd5716f9dfbf55247b6d7a9",
- "format": 1
- },
- {
- "name": "plugins/lookup",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/lookup/cartesian.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "601cdd5c19a9f962bfb78eff1140e06143d22253cc42c3352dc3535d56727010",
- "format": 1
- },
- {
- "name": "plugins/lookup/chef_databag.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "fc62bc09202aa868006f820cb54ca8081f1839a98b65ff4eea643832cd8d984e",
- "format": 1
- },
- {
- "name": "plugins/lookup/collection_version.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ecce1b17933bb8618696167c68794e0f285ab0254556857379a3438846b3393d",
- "format": 1
- },
- {
- "name": "plugins/lookup/consul_kv.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d8776417592ea1e3a4902cf5062270d4a72fecd556b58d87ae3a1390dd7fec00",
- "format": 1
- },
- {
- "name": "plugins/lookup/credstash.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "da6715b6fa5fadbe9f7bf0f6f3941bd420d6c95d8c93f1af08d14d68cd509b16",
- "format": 1
- },
- {
- "name": "plugins/lookup/cyberarkpassword.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "49f79f62f69b96251569e967cc3e5444352f8fd93f3425bd9df49dd34a76a488",
- "format": 1
- },
- {
- "name": "plugins/lookup/dependent.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "05ffac09abc7b434be1ea9d4f24ec8ddb06a1e014f91bb94fa0542abf3d139dd",
- "format": 1
- },
- {
- "name": "plugins/lookup/dig.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3c283980b0e8038c1be6f6a41091266c1e486ba3096b5ab702674cdb956790a7",
- "format": 1
- },
- {
- "name": "plugins/lookup/dnstxt.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8d09c809d3862854e8899785110027dc3478ed1fdec294bdcf88218caacc0fcf",
- "format": 1
- },
- {
- "name": "plugins/lookup/dsv.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b358f5ee14615d293267f67a70d5586437a3571f5e62890e2751793f63802967",
- "format": 1
- },
- {
- "name": "plugins/lookup/etcd.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "98fe425dcdbe482ea1dabe239041b098cd5782f253f29d764023e908d2d72b5d",
- "format": 1
- },
- {
- "name": "plugins/lookup/etcd3.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c6dfc208189b24a892a0039a75003388d37629a4364bb592e5c6a83d1907c5f4",
- "format": 1
- },
- {
- "name": "plugins/lookup/filetree.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0354e0643410f2eab47a1d92f0500779a9bb1e189ef353494e0585abab136938",
- "format": 1
- },
- {
- "name": "plugins/lookup/flattened.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "efc7d3e2f66fe14c0b392ae86fc3dd911a88fb27ae95af94b944e74a9b6873e5",
- "format": 1
- },
- {
- "name": "plugins/lookup/hiera.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ed4305bb68b4c75ba0d85b4a81c2f26406335df9e38ad39c1628bdb5c4985df2",
- "format": 1
- },
- {
- "name": "plugins/lookup/keyring.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "44734661d6389b0d36cdc184bd2a31c7acaf0e3dfc962b66c68889b3c4b0c244",
- "format": 1
- },
- {
- "name": "plugins/lookup/lastpass.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3b03df385693176d05d1c8476ebe517c20c5d940addbaa291979232d21949d8f",
- "format": 1
- },
- {
- "name": "plugins/lookup/lmdb_kv.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f731830e02139284d7e0fe7268aaa0477ed7ef71edbc6f824207ef02adf5f412",
- "format": 1
- },
- {
- "name": "plugins/lookup/manifold.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "12f925dae69fa1397e6b2991497d926cafbbe26909e53bb32483500108837680",
- "format": 1
- },
- {
- "name": "plugins/lookup/onepassword.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e2ee9db11ee7c83320d4d782844075a8cb850cf342b3e0d4590c00df975762be",
- "format": 1
- },
- {
- "name": "plugins/lookup/onepassword_raw.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "745ef11eb06162d4b1d7eaed2ff0167862c690009f590e990745963d5306190c",
- "format": 1
- },
- {
- "name": "plugins/lookup/passwordstore.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8c18082005704e6154682ab99c1454dd78be164f8005e4cc03a928aedbde48d9",
- "format": 1
- },
- {
- "name": "plugins/lookup/random_pet.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "37bf4ac9d16cbfe47ab0e2dd79366d501ce7292b39a4b99557fd1f24d16d9541",
- "format": 1
- },
- {
- "name": "plugins/lookup/random_string.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8d1e85d9752a4c1441caa617558296569cddbb2d898cabacf769cb2a8f5ac113",
- "format": 1
- },
- {
- "name": "plugins/lookup/random_words.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "cc0f56ec2996fe8016e9de309cae58f1d77206820740bcb4a75cd8603aa0c4f3",
- "format": 1
- },
- {
- "name": "plugins/lookup/redis.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3c8ec26f3669649cc04347ceb8c9d255df4bc610a7785e4b45770f12ec6d0742",
- "format": 1
- },
- {
- "name": "plugins/lookup/revbitspss.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "63dba93df50106fa2781e584db2ba5d16a87b982a4927ece21d9d8ec1304b2d8",
- "format": 1
- },
- {
- "name": "plugins/lookup/shelvefile.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "037c25ee46d4e4702e4893eee1afa2fc99366485d278997747e5b5758fc96dee",
- "format": 1
- },
- {
- "name": "plugins/lookup/tss.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6eb4f8b5253c68a19479b23f19de02c03722065396b996ddc5dc8facaac2ff43",
- "format": 1
- },
- {
- "name": "plugins/module_utils",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/module_utils/identity",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/module_utils/identity/keycloak",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/module_utils/identity/keycloak/keycloak.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f53ff61bda7067e63b100dc96f1516f98d21cae8dee06530c630ac7a8ac6b5b6",
- "format": 1
- },
- {
- "name": "plugins/module_utils/mh",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/module_utils/mh/mixins",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/module_utils/mh/mixins/cmd.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "93dd46c75ef7fb7c459a2635e262f7fdbbfafccc170ae0b28f2ec9c978eea99a",
- "format": 1
- },
- {
- "name": "plugins/module_utils/mh/mixins/deprecate_attrs.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "dff6ebcc0c94de20c94d90143682bf29c619a15a70b185378ab8918ae7c1e658",
- "format": 1
- },
- {
- "name": "plugins/module_utils/mh/mixins/deps.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "25cd3ff114d835d74e5065bb13cd1ad6401f02674eb007570423245248a53c57",
- "format": 1
- },
- {
- "name": "plugins/module_utils/mh/mixins/state.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "2b7973ac3e0e31ceb5f3864c50baa4f130820714c69c82e24b1e2329f637eaf9",
- "format": 1
- },
- {
- "name": "plugins/module_utils/mh/mixins/vars.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "73ecfc76056264c5d683ef0b207e65c631a3160b23436340e8e0859bdf2f39c3",
- "format": 1
- },
- {
- "name": "plugins/module_utils/mh/base.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "2829bb2e209d6e264910506727a0579afdf891b04c751d60fcfd5308bd0e0856",
- "format": 1
- },
- {
- "name": "plugins/module_utils/mh/deco.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "11a2e78b057f9d0c1b6aae0012273737a230b89a1b748689076c19311e06d19f",
- "format": 1
- },
- {
- "name": "plugins/module_utils/mh/exceptions.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "87593b77ff79dd644b6d815ed269b743b1f425f9e689b8dfe928138b2f957b73",
- "format": 1
- },
- {
- "name": "plugins/module_utils/mh/module_helper.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "15968853409eb2166c53e3ab6f4d3331fe92bf70f1e1a39e6d6e307a6114cdde",
- "format": 1
- },
- {
- "name": "plugins/module_utils/net_tools",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/module_utils/net_tools/pritunl",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/module_utils/net_tools/pritunl/api.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "154a0510cdd87e8e3559582f1f9125186d3f15596d78e9b9b3fa713f223dc338",
- "format": 1
- },
- {
- "name": "plugins/module_utils/oracle",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/module_utils/oracle/oci_utils.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "532c951a79e9bcfe9c8066214d668e3bb69800d4081d9b368e736f5641df4783",
- "format": 1
- },
- {
- "name": "plugins/module_utils/remote_management",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/module_utils/remote_management/lxca",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/module_utils/remote_management/lxca/common.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "da683cda97e3b7089a56a4cc74e92a17d15d122fc3539437c68fc640be012e4e",
- "format": 1
- },
- {
- "name": "plugins/module_utils/source_control",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/module_utils/source_control/bitbucket.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "172c5a258df5efbf9200ab18706aeac4a1707268de0c0725f906f98bc4ddac0e",
- "format": 1
- },
- {
- "name": "plugins/module_utils/storage",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/module_utils/storage/emc",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/module_utils/storage/emc/emc_vnx.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "78c9bac2963bbac14c05810618ab082b46049456cc35be571235fb4fd0ff0466",
- "format": 1
- },
- {
- "name": "plugins/module_utils/storage/hpe3par",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/module_utils/storage/hpe3par/hpe3par.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b978e81816e639956697e866ea1249e3187b1500440a6bfe723d896fa64bbee8",
- "format": 1
- },
- {
- "name": "plugins/module_utils/_mount.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8919224fab73d91d655912c780866109fe811ee9b34af3d36c72f663fbb3a8d2",
- "format": 1
- },
- {
- "name": "plugins/module_utils/_version.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "25726217922373642c280d10e4949b6424c22ce3067e22e5a1d343e9943d54c0",
- "format": 1
- },
- {
- "name": "plugins/module_utils/alicloud_ecs.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3c6d509c6f6c58594ff0b0813872a65478c19315af52c86ec334132ffd003159",
- "format": 1
- },
- {
- "name": "plugins/module_utils/cloud.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1691ca71996ba84bcc8abd35da7647705035f1e390672fe7962dc502617d8a4f",
- "format": 1
- },
- {
- "name": "plugins/module_utils/csv.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4cc33c9b0881219a255af4669ed2ead6b8675769e3fc9a94ce3a17d6706202c1",
- "format": 1
- },
- {
- "name": "plugins/module_utils/database.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7ecea17a2a45cb079354e6bcc8bf043fcd3373f103f0a9c4aaeec22bdbbf5b0a",
- "format": 1
- },
- {
- "name": "plugins/module_utils/dimensiondata.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "aa6a1917eb2c3cf04159130fac79f317e84711d539a7f2ab9eeeccc7df7de2ba",
- "format": 1
- },
- {
- "name": "plugins/module_utils/gandi_livedns_api.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "cbf2e1b3dc0fa2080a2b36a2bb72979af470f60c5c012a6ae9f35b0fe22a8d40",
- "format": 1
- },
- {
- "name": "plugins/module_utils/gitlab.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5e0270a52a05723162d30353adaf7d1f89068c3b3a69dbf840317b48377d5279",
- "format": 1
- },
- {
- "name": "plugins/module_utils/heroku.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "77b1c51c9c98a9093a964de5991e379a140002513c1b42df54a127a2e81d6a99",
- "format": 1
- },
- {
- "name": "plugins/module_utils/hwc_utils.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e644ea3ca36f30764fcaa369072fb87e3c7d9b0188b8570b0902fbdbd8bcc010",
- "format": 1
- },
- {
- "name": "plugins/module_utils/ibm_sa_utils.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "606f1d1dd9ff8a739c706157de668ea6df79273aa6545509645eb4791032cc70",
- "format": 1
- },
- {
- "name": "plugins/module_utils/ilo_redfish_utils.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9c8f188c3f2e6143eaf92794baec80f9c63823daf4baee92d6336b15396b6be7",
- "format": 1
- },
- {
- "name": "plugins/module_utils/influxdb.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "fb2d2b67ce38172c9265adbebc86590a2f1bf36a348f1d17bc179064de838044",
- "format": 1
- },
- {
- "name": "plugins/module_utils/ipa.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "2db62c7f034d2d5b085cdf375bfc010b7135a33556e207941bd14be26edb6352",
- "format": 1
- },
- {
- "name": "plugins/module_utils/known_hosts.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d6968081e28f64f7c22c48804c0990762ee18d77f31c46dbe1219391670e6485",
- "format": 1
- },
- {
- "name": "plugins/module_utils/ldap.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4b8a7bdb65f124dbe79c02b8bb4592a72f42ca059c7f43a451730b79a82b3120",
- "format": 1
- },
- {
- "name": "plugins/module_utils/linode.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "835ba5286602cb6678f067f4523f1b884e6ba588e52de900b08bacd8bfd41884",
- "format": 1
- },
- {
- "name": "plugins/module_utils/lxd.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a84494f0f3f7053e3c0ec2edda3dae59bb1b543d0d459291c4b9701e12c5d672",
- "format": 1
- },
- {
- "name": "plugins/module_utils/manageiq.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e2f385d2563979f768448c3c902d3aaad71c6f5e85f8dcc6aad36c5ce361c6fe",
- "format": 1
- },
- {
- "name": "plugins/module_utils/memset.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b699e7d7a3189e0d0085a39fdc164c9bd31862be047d3221014eeb8eddb2d07d",
- "format": 1
- },
- {
- "name": "plugins/module_utils/module_helper.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6bf2d8179c5353f2df6628282d37075b7ef71912ea56ec7ff56c296c3c481281",
- "format": 1
- },
- {
- "name": "plugins/module_utils/oneandone.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6a41305aac71c57950fe1139e541f4913009e90fcfe097062bf6370f95ae54d0",
- "format": 1
- },
- {
- "name": "plugins/module_utils/oneview.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3f15b7e3b0511dfdc47abb0844e355af8e0e6d274c4151ff6285460af9baf3ac",
- "format": 1
- },
- {
- "name": "plugins/module_utils/online.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b33524e4c4ec9a89b937e3f98834af94fbaa42d710d759b8c127240d2c034e68",
- "format": 1
- },
- {
- "name": "plugins/module_utils/opennebula.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1ddadc44b0351b1e71914fc01170ca17558e156e06ee08131e881e797d939dd1",
- "format": 1
- },
- {
- "name": "plugins/module_utils/proxmox.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a758afd9dce948cc94205d44537689ac5304a1ad5b6c0a720edfebe2c2b7ac5b",
- "format": 1
- },
- {
- "name": "plugins/module_utils/pure.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "60df81c9c2b060371eec008cff84670b87c339a4d414f3ae55f55ace604f5d76",
- "format": 1
- },
- {
- "name": "plugins/module_utils/rax.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "20cd4c7581094c22183254263ede05d70e44d915ea54e98c39be0f7bad2f7f9b",
- "format": 1
- },
- {
- "name": "plugins/module_utils/redfish_utils.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "292bbafb6fac2bf23c2cc28426701c2d0d55ec6065914ff2655e3b1564805cc9",
- "format": 1
- },
- {
- "name": "plugins/module_utils/redhat.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "19c5d942349922f542caaa782e540ca036a9119dab63577ffc6f88f2e0f9151b",
- "format": 1
- },
- {
- "name": "plugins/module_utils/redis.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9157e11b9949e354cadd630919844e4e8bb89f542b239598dc8385fe2566e58a",
- "format": 1
- },
- {
- "name": "plugins/module_utils/rundeck.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "753a1aa0bea535ff317f43a74d9f636871a505db8533ade49b3505bc49290609",
- "format": 1
- },
- {
- "name": "plugins/module_utils/saslprep.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "bc03619992801129d5aacabd6caca116a11077a3270d1a8934213a0f870725af",
- "format": 1
- },
- {
- "name": "plugins/module_utils/scaleway.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "cb222ed5ba99758851c55db8678d4516bce88c2635daac6455b228c0428fe634",
- "format": 1
- },
- {
- "name": "plugins/module_utils/univention_umc.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b2485af98b53118b37e49002e8da0a69fa91d397b440e108304edf6729b80080",
- "format": 1
- },
- {
- "name": "plugins/module_utils/utm_utils.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "22494870f3b67a64b503bfc8fd7d71fa1a12b903d7e2ccbcb242b2ab4c72a06c",
- "format": 1
- },
- {
- "name": "plugins/module_utils/version.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6f8a9655670ac2b2c3e2aae452324c5f283721bfe5bc971957ac5836bebad61b",
- "format": 1
- },
- {
- "name": "plugins/module_utils/vexata.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c303edbc99d8703b5a185586b36b6dda7da695f60b8939be16ed9a314471ccd5",
- "format": 1
- },
- {
- "name": "plugins/module_utils/xenserver.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "36332210967cd3f4aa4a29b79653319a9f513a3583779dce8af9e25f9f5cac8e",
- "format": 1
- },
- {
- "name": "plugins/modules",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/modules/aerospike_migrations.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "52c1641f7f943c727a0d6b8eab2b292b010d9347f28396adc4e8c75159dbb08f",
- "format": 1
- },
- {
- "name": "plugins/modules/airbrake_deployment.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6aa925fba8833cbaa4a23775684646db31a7f1410c4688392ced89db20bbcade",
- "format": 1
- },
- {
- "name": "plugins/modules/aix_devices.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "977386dee01ac51d9c885ecee657e0a24df1b5de87996f0a9c9f8c3d0605c08a",
- "format": 1
- },
- {
- "name": "plugins/modules/aix_filesystem.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "292ff33ccfbcaaf28dc4cd67f6b749dc6b06ae1aa72db436245d348946c19bf7",
- "format": 1
- },
- {
- "name": "plugins/modules/aix_inittab.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1e4b6091b24210a657d58c1767107946ecdf34f90cef0460762144b8cf6d4cd2",
- "format": 1
- },
- {
- "name": "plugins/modules/aix_lvg.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "633b5243b9ea9b21d80f381a9698f140586e3a39310d21fb83ef8b5aa0d350cb",
- "format": 1
- },
- {
- "name": "plugins/modules/aix_lvol.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "feb995da59928c227261390532e549999f7a27594f09744529878c91b72e7bea",
- "format": 1
- },
- {
- "name": "plugins/modules/ali_instance.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6273f052fa89f9ab9a27230eee5064a37333af680e24ba1d5a715ec11e83c980",
- "format": 1
- },
- {
- "name": "plugins/modules/ali_instance_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "34c5d0b44fc32a43160e9c62290e1afecfe73481f22b9a9ce8b444c4517112de",
- "format": 1
- },
- {
- "name": "plugins/modules/alternatives.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "507ab83ed8cc3718318b5de58d67eb743ad0318eab406441eaefd01a5eb18dd1",
- "format": 1
- },
- {
- "name": "plugins/modules/ansible_galaxy_install.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f7662c68a2cd0beb854eb1cb47411a4b5bf7004acfa0cd101898aba88c0afd6a",
- "format": 1
- },
- {
- "name": "plugins/modules/apache2_mod_proxy.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9d5fe445448cb9e4605eb0fe5c84e599ae353ecb8a256729b0510392d4fbbc4e",
- "format": 1
- },
- {
- "name": "plugins/modules/apache2_module.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4dbb4a1e3308a693aaa3101faa828015f66a6a65e040cf3a9a2eee417800d6b0",
- "format": 1
- },
- {
- "name": "plugins/modules/apk.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "066665788179692795453db9675607e9c400f214f80382fa1646c0a5c4e0b709",
- "format": 1
- },
- {
- "name": "plugins/modules/apt_repo.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a92bdffb40fa2bc8fc8e6954573fccec4a94a8a23884dcee4f680ddec78880e2",
- "format": 1
- },
- {
- "name": "plugins/modules/apt_rpm.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e8b8b0d6893fe18ae148144e7ce1e816a07cd760ef60511dcb230c0559b4e433",
- "format": 1
- },
- {
- "name": "plugins/modules/archive.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3a0715d0aae4143b1f42dc73f560afbfa85782c37ef1645840e27400da7534d3",
- "format": 1
- },
- {
- "name": "plugins/modules/atomic_container.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "13baf1b70fda761f06be5d8de58290518bc8707287af37fe1af641284fb504a5",
- "format": 1
- },
- {
- "name": "plugins/modules/atomic_host.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ef3911802c6f970e9014cb8fd849be9df1f8e897876fc9cce03cd66e7d3a2e5f",
- "format": 1
- },
- {
- "name": "plugins/modules/atomic_image.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "dd25dd2258096e58d9d2873a382e9e5f530cd6224d74325c5466a829f9f6c5e2",
- "format": 1
- },
- {
- "name": "plugins/modules/awall.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "63f6d1714ac308da87c08e54b17fc2205f0bf2426d26914061074317ae835b8c",
- "format": 1
- },
- {
- "name": "plugins/modules/beadm.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "07a418d4d0b40c72721627f7c49bc9f2e6c780247e9f101bfa57c79bf18bbf6f",
- "format": 1
- },
- {
- "name": "plugins/modules/bearychat.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8f224a3485783e66fbde1636e5131e561fd1a9006ffe2ec5d24188c07736f5c8",
- "format": 1
- },
- {
- "name": "plugins/modules/bigpanda.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "dcc88a1f79d5f53d3fe5e69d911a01177f063a9aa52428c22b4564d306f35ec4",
- "format": 1
- },
- {
- "name": "plugins/modules/bitbucket_access_key.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "36c0e727d4cf7e57a1ccb7f712ca472f3ed20a8c0b5afa656c9461d39b948ce1",
- "format": 1
- },
- {
- "name": "plugins/modules/bitbucket_pipeline_key_pair.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c4b8d0fe0f4ada9e881cc1e76e9365bbac7d35f0650235b9033037482d1e5670",
- "format": 1
- },
- {
- "name": "plugins/modules/bitbucket_pipeline_known_host.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "dd5b27ae648269aab81d3ac46036fc6288781c2a77c02db480ea66ba1bc1445c",
- "format": 1
- },
- {
- "name": "plugins/modules/bitbucket_pipeline_variable.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3409614c64334e483f093a3f094fab692d09aaac0db65da0225337e4db2993a0",
- "format": 1
- },
- {
- "name": "plugins/modules/bower.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1469648267092280b084c97ff84b89cd29656ae25f5c12b23d6a34d6bd21f214",
- "format": 1
- },
- {
- "name": "plugins/modules/bundler.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b8afe9744c027374c7bb7fce88ed55069f27cbf040447a5f0f04a04b9053012b",
- "format": 1
- },
- {
- "name": "plugins/modules/bzr.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "127a4d24fb7ecd0ae8286c7f1eb5332ca2e3217e7ac29ed85c1e814eb7cfeebb",
- "format": 1
- },
- {
- "name": "plugins/modules/campfire.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d22a3da654653ddb964eb55db9164c254860f4430dbe8b505b6945f220294bea",
- "format": 1
- },
- {
- "name": "plugins/modules/capabilities.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7d9e46ddf9acbb7caa0bf526654e9b199abf60e253a551d9f10c4e4673fd6713",
- "format": 1
- },
- {
- "name": "plugins/modules/cargo.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "bba289036c8d3d677f768224f9eed512badd2d001089ab783be6f5a8f5e868a5",
- "format": 1
- },
- {
- "name": "plugins/modules/catapult.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f1bc195bce4b7de9e4e5c612fba7c422e104af61e77d79860c7dfa69b8b0f15e",
- "format": 1
- },
- {
- "name": "plugins/modules/circonus_annotation.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "57172616325c7ece221ed3f154e59473f1bfe52c802dcaf0fe0f870133f185b8",
- "format": 1
- },
- {
- "name": "plugins/modules/cisco_spark.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9b91160d8b53c538dbdeeb45a5584658fcd1a4c57f43ba8a3237a91860a99b02",
- "format": 1
- },
- {
- "name": "plugins/modules/cisco_webex.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9b91160d8b53c538dbdeeb45a5584658fcd1a4c57f43ba8a3237a91860a99b02",
- "format": 1
- },
- {
- "name": "plugins/modules/clc_aa_policy.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "767f1e863c93bfe0e8d3bb37d7a029384caec1cf41eebde2c6ce60a864feb5c3",
- "format": 1
- },
- {
- "name": "plugins/modules/clc_alert_policy.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "45e07b52737a3326a3debf36f5d38fc1fa33503b8fd7156f5f1fb19035a8f379",
- "format": 1
- },
- {
- "name": "plugins/modules/clc_blueprint_package.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "52d3398cae86c645575a688a7f9dccccbd60b51d69743fdf2e64be70535c75e8",
- "format": 1
- },
- {
- "name": "plugins/modules/clc_firewall_policy.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ef30311f37991878811921a4ece22412e4c94e92527e9d93d2f761efbfca658a",
- "format": 1
- },
- {
- "name": "plugins/modules/clc_group.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "636a3b3a90bb1d9fd744e2a22f3ad42a6a372df6ffd9f2aef92e606391ecaee7",
- "format": 1
- },
- {
- "name": "plugins/modules/clc_loadbalancer.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "87e5dace3e225dbd78b375a034bf5b582a4af0ba05b9276b1bf92caa61a8f5d5",
- "format": 1
- },
- {
- "name": "plugins/modules/clc_modify_server.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "250d51c8692ee01ef2b75c9da4327adeaf79934aae75a942c45807a66ea9de62",
- "format": 1
- },
- {
- "name": "plugins/modules/clc_publicip.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b183d61dc5fb36caf1424935c1915fe087322d608bcfc0211a84b56053e0555e",
- "format": 1
- },
- {
- "name": "plugins/modules/clc_server.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6c7b6c85a2f14f4caab7d170ea0204f87428a5116e21eb8dffd4bcee26540111",
- "format": 1
- },
- {
- "name": "plugins/modules/clc_server_snapshot.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8dd73687f3268d52da21504f88fc735fbf4a0761655db9693486a46b24263a16",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud_init_data_facts.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a45eaa4abec3de3c7d4f0bc9338ed79308b522c2cca5496671da197901688986",
- "format": 1
- },
- {
- "name": "plugins/modules/cloudflare_dns.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "92ca2752e2212e77e6cc3a089a6a72f2a20983ebed40c8edf0e1ceaf18ace10a",
- "format": 1
- },
- {
- "name": "plugins/modules/cobbler_sync.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0a69b0d481ff28ea1a5d848fa8b80f9a07a4ccf3a50b3fd384b588d0184a31d1",
- "format": 1
- },
- {
- "name": "plugins/modules/cobbler_system.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b4d8ac045e7b8cfadaea593081d4e6bd815492162d6a0a105041563e593827f2",
- "format": 1
- },
- {
- "name": "plugins/modules/composer.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7f2740d5b0c235ca97fd503e4441274bc748d4c5b0dcbe3e227831599f573734",
- "format": 1
- },
- {
- "name": "plugins/modules/consul.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4118f4c040b4c3255e9b585aef388871098bb6da386ef3dfb6eff2a62621b7d7",
- "format": 1
- },
- {
- "name": "plugins/modules/consul_acl.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b6f145e052de83a3d5fcdb12fcc783b7c14b42be19bee84b021e28bdd5e4d2b6",
- "format": 1
- },
- {
- "name": "plugins/modules/consul_kv.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "470aac4466c9a747514dcc73b3c50cbab8649050de192563f35d0054820d60ae",
- "format": 1
- },
- {
- "name": "plugins/modules/consul_session.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "cc7f0c964b98a2bd770173babef63981ba77fdba3581f31d844caa7aaf2fe723",
- "format": 1
- },
- {
- "name": "plugins/modules/copr.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ee22d4a8ae70df45b23c47432192ba596568b8ff2ddb225c7c7908b08f316c5d",
- "format": 1
- },
- {
- "name": "plugins/modules/cpanm.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "869b73609aa1f1ba8f2d33ccfed04eec450bcdcf31b710526f2d043aa97c0ea4",
- "format": 1
- },
- {
- "name": "plugins/modules/cronvar.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "14583a0612a939471168bd5d59e7edac48bb01d024aa0d0fc7cdeffd0e923178",
- "format": 1
- },
- {
- "name": "plugins/modules/crypttab.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d020cd305a432f0da349b1243d96fba57a3290b456016dbf7480cf6ca3dd9e92",
- "format": 1
- },
- {
- "name": "plugins/modules/datadog_downtime.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c4671fae964f84c50e802b97fc64b2fa39173f787741887a6772d6a300184b69",
- "format": 1
- },
- {
- "name": "plugins/modules/datadog_event.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "203ee66689572ae405f692c6a34b24d12da75ef835feaf512ee25f179e204077",
- "format": 1
- },
- {
- "name": "plugins/modules/datadog_monitor.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6c1c03834a375f842171002ac31ef4204c4830eb41283263b954704e23353d66",
- "format": 1
- },
- {
- "name": "plugins/modules/dconf.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ca342ed1e3cae2da6bc5ee31e05db30f23344f75e4c68a06f577d24ddde2347a",
- "format": 1
- },
- {
- "name": "plugins/modules/deploy_helper.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d29a73dd509521790e2dcfde24498ea2967bbb5a4c659d26c8a91f41c1cc231c",
- "format": 1
- },
- {
- "name": "plugins/modules/dimensiondata_network.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4adadccb01c1cef01fe7d330d031c733cf61079bf28f82cab9f260d02355eb8a",
- "format": 1
- },
- {
- "name": "plugins/modules/dimensiondata_vlan.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b30817b9ad59ecb496117d3f53cae29c288dc7307f0ea100b7a01f73dfeb998e",
- "format": 1
- },
- {
- "name": "plugins/modules/discord.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4526e01b8b1989fa6bd10ad53702eb0115d7e9d213caa2ddca59d86b521af84d",
- "format": 1
- },
- {
- "name": "plugins/modules/django_manage.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "be65d011c47d6222a81d1b82af3f9e2cd5853f174c60494cfcc1930009e315ba",
- "format": 1
- },
- {
- "name": "plugins/modules/dnf_versionlock.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "bb392c313d8a04369b834a4320c70110311fc1feaef6d58852659dacc682d6d2",
- "format": 1
- },
- {
- "name": "plugins/modules/dnsimple.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0dbb97d863fd4a2fff967c39ea1ea12c18f525db25090b6de23239a7ee1e859e",
- "format": 1
- },
- {
- "name": "plugins/modules/dnsimple_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "cd640688d78766e01ab5ff644b82807ee3af3114a8195a482a7f8a6773a32d64",
- "format": 1
- },
- {
- "name": "plugins/modules/dnsmadeeasy.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a4e6ee3395aa9b100b5f9e0e66bb721bcf9688822833ca3f821d977027961c66",
- "format": 1
- },
- {
- "name": "plugins/modules/dpkg_divert.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "83eb8748719f999e73a1e00bddc2ad0c4fcff0da7d1771feba9e7d1402f260dc",
- "format": 1
- },
- {
- "name": "plugins/modules/easy_install.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a11e3e151595b9b729431aa2a4be23edd5d228870b3876cf95160d4552e2ee14",
- "format": 1
- },
- {
- "name": "plugins/modules/ejabberd_user.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "92c3d42c1eb1126af9f9bb8c118c0a08f28f599c057a03a254b03e76b370614a",
- "format": 1
- },
- {
- "name": "plugins/modules/elasticsearch_plugin.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "09a6283b244e18cdd17f34bcbf8dcfea1c85c7aeba635e033e4b1d7475f4d484",
- "format": 1
- },
- {
- "name": "plugins/modules/emc_vnx_sg_member.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "bdf6c7c0da78522f40ac8678ad94e2088374f137927b412b36c5b538fd257453",
- "format": 1
- },
- {
- "name": "plugins/modules/etcd3.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "eca366113dd69573ccb5c95250ceedfbbec34523cc23ddb2406e3ee9bab01e75",
- "format": 1
- },
- {
- "name": "plugins/modules/facter.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e9dc303791af31b7355e612dcde7b32ecaa6083514c401a900c1bd6c5da5c616",
- "format": 1
- },
- {
- "name": "plugins/modules/filesize.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "181ff76460418648e0b4dd3906d3d7699eb7ebe08eb2b532aa57a295ac06237d",
- "format": 1
- },
- {
- "name": "plugins/modules/filesystem.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "00db45139f32500f03fdb8b276664e856ee2bbd3e48e225d0bc5d3ab0adaedc1",
- "format": 1
- },
- {
- "name": "plugins/modules/flatpak.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "77856cfeb650ab5930a8af1eacf9b87d3c654c0041c713daf6b3f6fe85c4a9ea",
- "format": 1
- },
- {
- "name": "plugins/modules/flatpak_remote.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0694a7aeb1878ffe91f91625b645d9fb6391dae6e57bff17dd106c83c6e9505a",
- "format": 1
- },
- {
- "name": "plugins/modules/flowdock.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c50deeb4589cfd2ae9055e2ca708acceaf41f8c4e705a2f3c84bc4d5093bda9e",
- "format": 1
- },
- {
- "name": "plugins/modules/gandi_livedns.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "93cbd36bb0cb57ab866445984eec096389e81449ede51e141b22284eada70326",
- "format": 1
- },
- {
- "name": "plugins/modules/gconftool2.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6e5a59c14afe686e07a8595a7f102e632ee78d2dc90749bd147e87b8906ef113",
- "format": 1
- },
- {
- "name": "plugins/modules/gem.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "2658234014600b059931be2658b92731a7b317a49ad8b87b7a90f4021d2b92af",
- "format": 1
- },
- {
- "name": "plugins/modules/git_config.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4df0f064e3f827b7af32547777bec982cf08b275708cd41bf44533b57cfefcb6",
- "format": 1
- },
- {
- "name": "plugins/modules/github_deploy_key.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3d942e6c9a4fc0c0b2ab2b6cfcbb2067b044956b0cc8e3a4eb8908fceeca4308",
- "format": 1
- },
- {
- "name": "plugins/modules/github_issue.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c71ba6cb604c76b2200e68acff20cf55e167b5fbc111aa68a6efd0b6b0573977",
- "format": 1
- },
- {
- "name": "plugins/modules/github_key.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "fe0c5fe85830fe7c1bfdcf99cdbc14af5366e29b04eeed1cf551092734279801",
- "format": 1
- },
- {
- "name": "plugins/modules/github_release.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a0feb5df29b4556ddae70b101a78da6127312803680504c61739b57b4008037c",
- "format": 1
- },
- {
- "name": "plugins/modules/github_repo.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "46c5064a6ffa00ff6971115414370a5e49a5dbcef106f18c16a89428e6691fe0",
- "format": 1
- },
- {
- "name": "plugins/modules/github_webhook.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "781a9ccef57e174ddfba6f794b147aa941b53959652a3fbfb9c38b37d4dec4a1",
- "format": 1
- },
- {
- "name": "plugins/modules/github_webhook_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0f2d091ba64877de90900c03df4412db8b71393e0d5a742202feda625c05398a",
- "format": 1
- },
- {
- "name": "plugins/modules/gitlab_branch.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "922b6c30c67ddb2acf0d28aaa9ab16dce5b1f6ad270223ec6773ef680e35c746",
- "format": 1
- },
- {
- "name": "plugins/modules/gitlab_deploy_key.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "43f0d1631cc651c15a935e280f31677805aae6efb6d80b95d21511b8fe4f79ea",
- "format": 1
- },
- {
- "name": "plugins/modules/gitlab_group.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6f566f0df7ea3a6d02b4fe0e8550d06400ac926d3d6a24975582c680d3a52528",
- "format": 1
- },
- {
- "name": "plugins/modules/gitlab_group_members.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "10e9d62d1291f8ca28d2dd9d40d67a10028713c53530f516490edfb2187d3644",
- "format": 1
- },
- {
- "name": "plugins/modules/gitlab_group_variable.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1394fda09fbc289cf2716876d6a5463889abeb5d2ceea2915235dfbf29aa4684",
- "format": 1
- },
- {
- "name": "plugins/modules/gitlab_hook.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "bdce5a96cd31d9444b1841eb9ee396683c70ee3eb50634d2f02c38ce07b374f6",
- "format": 1
- },
- {
- "name": "plugins/modules/gitlab_project.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7ba5e593304a1bb3dce94dab2cc62470a892eb3a039b1e6f99a95869d59c093b",
- "format": 1
- },
- {
- "name": "plugins/modules/gitlab_project_members.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1a3075b6dd2783cf000979cdff99bf7b4f785802ed9e6e08002f629cc1a8efa9",
- "format": 1
- },
- {
- "name": "plugins/modules/gitlab_project_variable.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "48faf16faee67ab8516ea6b0b7052cc272208325f8c8602c2f013b4384d2eef9",
- "format": 1
- },
- {
- "name": "plugins/modules/gitlab_protected_branch.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "95ed01ee57390473707b05542cd73dfbc4ff729c5be435222d74ec4b16502435",
- "format": 1
- },
- {
- "name": "plugins/modules/gitlab_runner.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "63967e029ff266796082e00ef8263369f5a684b01213308f62d35be1d8c65926",
- "format": 1
- },
- {
- "name": "plugins/modules/gitlab_user.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ff0e35d6b34eb457ba640265b41f35bb6fcf335328eb3155f6e3318f12067dd3",
- "format": 1
- },
- {
- "name": "plugins/modules/grove.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b743647c9e91e766f9d75ca332fce7f1ee2d53f1a60c25e30aa1da8c54fc42fd",
- "format": 1
- },
- {
- "name": "plugins/modules/gunicorn.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4c0fc574bc49deaa348708e90945d2b44c5ec61d22f3919022bdc67c105666cd",
- "format": 1
- },
- {
- "name": "plugins/modules/hana_query.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f0503130e11a7444e652e67b08fce9b7ae64fe7e14b201857822558538274387",
- "format": 1
- },
- {
- "name": "plugins/modules/haproxy.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e406159197e286963c9b16223af8602f7347cb22dc6f02345512b8ab2e1ddc38",
- "format": 1
- },
- {
- "name": "plugins/modules/heroku_collaborator.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a540ae7b336b9ceb5b55d841ae1c8aa86b43da70501a51a7eafd576c59a888fe",
- "format": 1
- },
- {
- "name": "plugins/modules/hg.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "142f44f349abfc23bfda7f9f2df47d160f2a97446d7d5d31749fd5eab7adab37",
- "format": 1
- },
- {
- "name": "plugins/modules/hipchat.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "46ca51483cbd2b779fba4a7a938d4b2e4088eab98423a196588dbf5c83287e90",
- "format": 1
- },
- {
- "name": "plugins/modules/homebrew.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "066bf7706d89a85f64b0cf890adc84f4ec37b23291b883c12c73e5b2b80a5c03",
- "format": 1
- },
- {
- "name": "plugins/modules/homebrew_cask.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "2512568adbfbca7a18574b57f68cdf599ea10b5deabab628182ad98c4a71836f",
- "format": 1
- },
- {
- "name": "plugins/modules/homebrew_tap.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f1d8e1a616a2527b3677f208677e9a1261330777aba1acffa03f093d84f2dc84",
- "format": 1
- },
- {
- "name": "plugins/modules/homectl.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b770717fcdd6ce98d6b74d1d050fe20ab9278e7a4d2862882afef34ed3938feb",
- "format": 1
- },
- {
- "name": "plugins/modules/honeybadger_deployment.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "291189d8cb646f5837e39daceeebfd8e54b4f806430deea58c4d54eef50ab709",
- "format": 1
- },
- {
- "name": "plugins/modules/hpilo_boot.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6d0d47b799f9e444207ed5b4667356cee1de57f1d2aeff137aba990ef08beedd",
- "format": 1
- },
- {
- "name": "plugins/modules/hpilo_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "293b316839408346f2c2c0123d90b40c8f609e82a12246c202bc3843fc811d80",
- "format": 1
- },
- {
- "name": "plugins/modules/hponcfg.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "cc4939e4db789e57dd8b72fa79789b5f5004b98b3a3e4e5ad2a1ab370d6ce274",
- "format": 1
- },
- {
- "name": "plugins/modules/htpasswd.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3a9e50c4e8fff4250f074d11041a587ae773629bc33fd8082a1c28c68c99c1b0",
- "format": 1
- },
- {
- "name": "plugins/modules/hwc_ecs_instance.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "89845b03caeb5d8bc17443300b889399ae73b4da9df2d1404c1d9c09f042ae8e",
- "format": 1
- },
- {
- "name": "plugins/modules/hwc_evs_disk.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a74a926cd9e503aaebaa3a77d5e80dbba7e42c4c4a92f9c7dbcd147dda363714",
- "format": 1
- },
- {
- "name": "plugins/modules/hwc_network_vpc.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ad8ab2a633dea8a8afe36d610bd108ec2d8455632452935ae7d32b49b9f9cb4d",
- "format": 1
- },
- {
- "name": "plugins/modules/hwc_smn_topic.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "43f61a1ef273853a04a5a24138bd7f4d716d3892ba456b9d38a352d682fc26d8",
- "format": 1
- },
- {
- "name": "plugins/modules/hwc_vpc_eip.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4006ecd981645492fe82a37ea0910a40aac3e24e0e1503a046afa52e42e614a1",
- "format": 1
- },
- {
- "name": "plugins/modules/hwc_vpc_peering_connect.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d0eca5c552649fd19228928b85cf91670abd2122fd7a6afae49c91f7d84bae03",
- "format": 1
- },
- {
- "name": "plugins/modules/hwc_vpc_port.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0981c5ad00e6719986102308ac2745eb5d316fd7e0785ebc236102ad9c987ec7",
- "format": 1
- },
- {
- "name": "plugins/modules/hwc_vpc_private_ip.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "778aea0f9e96d24c7c51afdf7eb50bdcda5690d2ca1f10511ead89a47c30a116",
- "format": 1
- },
- {
- "name": "plugins/modules/hwc_vpc_route.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4369f9a4cfa48a82a66435bf9ebbfcd9a19dd8c91aaf1c5f6684fd33b5c5103e",
- "format": 1
- },
- {
- "name": "plugins/modules/hwc_vpc_security_group.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "49f9184ecdc9dcc89addc51cd8490746fb3a54089d403f4fb1c64a6f7516f264",
- "format": 1
- },
- {
- "name": "plugins/modules/hwc_vpc_security_group_rule.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "cd75294789234ffe193cfa2ff95084fb3edb0de2a42d9a20309db99bab189997",
- "format": 1
- },
- {
- "name": "plugins/modules/hwc_vpc_subnet.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3e5ac97a4be19828a95658766474adba0d1b9c4f2bb2dff454cd4bb3aa821480",
- "format": 1
- },
- {
- "name": "plugins/modules/ibm_sa_domain.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "846c2e2161c51130505d8caeef87178eb8cd40b5fe42d9f9c6649b444f0d7c7c",
- "format": 1
- },
- {
- "name": "plugins/modules/ibm_sa_host.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "42574cb0750d740dcbf3dc300cca235b15a22ecb00f79af5aa7818a494b60366",
- "format": 1
- },
- {
- "name": "plugins/modules/ibm_sa_host_ports.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "dc5ac76115dfd50d5b8b37aa9de8c75824e6354a4aa925a171a364dd0fe60fbb",
- "format": 1
- },
- {
- "name": "plugins/modules/ibm_sa_pool.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a1d51e21c6dc90ebea2e67c86200aa7c28b8451bd09c35cabdd5d53123cc1b35",
- "format": 1
- },
- {
- "name": "plugins/modules/ibm_sa_vol.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "44582854ca8e702de67f555704e9d3b007ece65d723bb24536a567e9e7031757",
- "format": 1
- },
- {
- "name": "plugins/modules/ibm_sa_vol_map.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7a90662d294fcc853121b02134446a6ae10c430a5caf3ebc0766de0cbba6479a",
- "format": 1
- },
- {
- "name": "plugins/modules/icinga2_feature.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "770edfacd0187f36c9bc94fc88df9fbe51dc29ae1dab5065dbcbd0b0043a089d",
- "format": 1
- },
- {
- "name": "plugins/modules/icinga2_host.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "46b696ade815c4a19e928de8ca0ecdcfe20754bf55cd1f5ace8554daaded778c",
- "format": 1
- },
- {
- "name": "plugins/modules/idrac_redfish_command.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "879b3d5825eb59bc67aea7014006f58df64853f8bff388fbb2b7d0bcb67b71a7",
- "format": 1
- },
- {
- "name": "plugins/modules/idrac_redfish_config.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "24cbee078205ddcf90266adaec93635a38384d7f3ea4db3a8e0adef7e69b05c9",
- "format": 1
- },
- {
- "name": "plugins/modules/idrac_redfish_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "820bb9a147f15fe41bffc5567f699b0f000db2869f2ea268f8e630250d95bd42",
- "format": 1
- },
- {
- "name": "plugins/modules/ilo_redfish_config.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d8546cfb15f05947f7c6760cb5d67928253269aa18102155f600995d3598b739",
- "format": 1
- },
- {
- "name": "plugins/modules/ilo_redfish_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1d175b3b05e25ed30302b1ce7994099a19b07709201c864ff37f210aa7df96ac",
- "format": 1
- },
- {
- "name": "plugins/modules/imc_rest.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e51c5d1375a1a9f469cfc28140144116cb29c3bfa35c459708f6ac76895340d0",
- "format": 1
- },
- {
- "name": "plugins/modules/imgadm.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e7bfa8f3eb4edeb4f1f9e51a4a2c5f17a4390513ff3f2375dc78ab27e5352208",
- "format": 1
- },
- {
- "name": "plugins/modules/infinity.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "639c7ed7633b97041cd61f657ec7d60d28db516cab49fac6c0cfec5a01c013de",
- "format": 1
- },
- {
- "name": "plugins/modules/influxdb_database.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5f7f03aa049ab52e4dbfb809c86a65d026f518047de475693616d52a611090cc",
- "format": 1
- },
- {
- "name": "plugins/modules/influxdb_query.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3a8b781c48ea54c78d2a8ac358ccb5f901746e79b0d0da842b5d06068ce6b1c8",
- "format": 1
- },
- {
- "name": "plugins/modules/influxdb_retention_policy.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "00fba18126835c5c2e9e79ad1a3e0fea04613c9718839ce304bd5fe48a0450de",
- "format": 1
- },
- {
- "name": "plugins/modules/influxdb_user.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6be29679e39cd622bb5eeaec56a6d802992a2e76a66a1058d478fa72ecef3db2",
- "format": 1
- },
- {
- "name": "plugins/modules/influxdb_write.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f5e2d773ee043f148680048a538b3a61d529ea7628b431149ca7f8c51057dbf6",
- "format": 1
- },
- {
- "name": "plugins/modules/ini_file.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ca49a57202bf72b8b079bbbcf5cfd3e33e530e549bd1ca1626f328a11b8b2839",
- "format": 1
- },
- {
- "name": "plugins/modules/installp.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1360ed768c621c482767cb1994d96e93827b55a20da4d3f2cbcfbdb5278f9c18",
- "format": 1
- },
- {
- "name": "plugins/modules/interfaces_file.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "25e134950671398223e77965d70780612354f1f321ef3b196377b8fe734adb03",
- "format": 1
- },
- {
- "name": "plugins/modules/ip_netns.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7703c45b7a46aea0d992130cafc0922dc74d926266b8f908adc15c6eef1cfa29",
- "format": 1
- },
- {
- "name": "plugins/modules/ipa_config.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8338f390c1e9ac774c095ada6731502c1280e30b01bef293a6651ad54d0bfe8b",
- "format": 1
- },
- {
- "name": "plugins/modules/ipa_dnsrecord.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "88fd68dcfd0725e575ce7fac94cb8eb9c74024e83bb0eb5dddec34d568725ebd",
- "format": 1
- },
- {
- "name": "plugins/modules/ipa_dnszone.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9944ce41cae935b07410a1a482d2d4cd1c6f07f7060a360e6888e67992075a36",
- "format": 1
- },
- {
- "name": "plugins/modules/ipa_group.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "70c065752e9e80713862f8fb3fb85f60219ac80d97a49139288bf6dd335ad168",
- "format": 1
- },
- {
- "name": "plugins/modules/ipa_hbacrule.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8350663990ec7b9b46879f317760e64e9eb9ad080170f8a3ab66f26022623cd5",
- "format": 1
- },
- {
- "name": "plugins/modules/ipa_host.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1672d2a59433c0c823dde1d227c7d78caaf492f981d55c6333ba950ba298907c",
- "format": 1
- },
- {
- "name": "plugins/modules/ipa_hostgroup.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ae6569872367a3b15727facea24ff4322cdf35512b1dcd8c4889997943eeb1d8",
- "format": 1
- },
- {
- "name": "plugins/modules/ipa_otpconfig.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "dcd17661ce19b040683bbecd506bdb2ec5ed2909c20d71c0a814bb4f05fee345",
- "format": 1
- },
- {
- "name": "plugins/modules/ipa_otptoken.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ffaa1a58c973d8794d9a1797bd75bccbae783699e1ea87d4bbb7b3ed434d72d4",
- "format": 1
- },
- {
- "name": "plugins/modules/ipa_pwpolicy.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "91f450bc4c6329e67cdf920e7f8499ffb7d27975b0a548ae2110354ed5e2e281",
- "format": 1
- },
- {
- "name": "plugins/modules/ipa_role.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "24e469a9d45178e0fbdfb4635f525640cd1033ec559f45978e4ba7cc42fb95c6",
- "format": 1
- },
- {
- "name": "plugins/modules/ipa_service.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3177e872cdf023c8a7e8bd65bd09e2ac102b2c3565c40ee5dc9d8c0fd8ddfcd6",
- "format": 1
- },
- {
- "name": "plugins/modules/ipa_subca.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "932c8bd910f72a6fd20831704f96358bfd3b96e94ff8346a09a5c401a27087b8",
- "format": 1
- },
- {
- "name": "plugins/modules/ipa_sudocmd.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "58d95fc267fc9d319ff05df6aaab1fb39df187d48bed52d497d92a30c54750ff",
- "format": 1
- },
- {
- "name": "plugins/modules/ipa_sudocmdgroup.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a8fbc39a66b0356ec18f8468789e6d4ffb5a1fae4f0e6d68e8837821d2c138f9",
- "format": 1
- },
- {
- "name": "plugins/modules/ipa_sudorule.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "15ee194ba2afa0982721aed91fdc69f93aee33b45af426efea615e3a03016f51",
- "format": 1
- },
- {
- "name": "plugins/modules/ipa_user.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "97c135b60e1aca5fc78d7af59cbf5f5dbe14b0ccd93951bc10450698596c1aee",
- "format": 1
- },
- {
- "name": "plugins/modules/ipa_vault.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d2ee238e7dab861eec17312d74cd513b493ec69b41e0d225501c8668d61837d2",
- "format": 1
- },
- {
- "name": "plugins/modules/ipify_facts.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a3cfe7e782b99e108e034ad45b38f3a686bd057c13a405e13b4082c9d4655ba8",
- "format": 1
- },
- {
- "name": "plugins/modules/ipinfoio_facts.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ffefdf9402a767ea1aa17675b8be1d868d68e71ef5292b26ea0266a856914208",
- "format": 1
- },
- {
- "name": "plugins/modules/ipmi_boot.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "32bc6fd22d5a4705022af7af389209a8db051bd7994c24e233261bc8188234b3",
- "format": 1
- },
- {
- "name": "plugins/modules/ipmi_power.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ad505007f78f7588bc403a75c522ef4ff75de4b7acfdee4dfbce33aa29713e26",
- "format": 1
- },
- {
- "name": "plugins/modules/iptables_state.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "06358c739fcc70ba79d43af924c0f35a6920d8c5bc4292c14f96dd5870b8d4f7",
- "format": 1
- },
- {
- "name": "plugins/modules/ipwcli_dns.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "27f69f073ce4bd49b82bee81a74f81650a89517936b723a1641f203c281ac406",
- "format": 1
- },
- {
- "name": "plugins/modules/irc.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5056a0944304be0cb4585231a68496ecfc2df86c3013ba1b398a17d73ece48c9",
- "format": 1
- },
- {
- "name": "plugins/modules/iso_create.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e22d344094cca4e10a77f281172b99e2ff51c71d16f63db2088d4cb5cca1dcc0",
- "format": 1
- },
- {
- "name": "plugins/modules/iso_extract.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "45e148bea9a28b93070734fe860f594c56b645deecd5799fcea67e8ac6c8d0e2",
- "format": 1
- },
- {
- "name": "plugins/modules/jabber.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "38e25af68e39cb333fe7d46308e6798e9884c5df4feb3d99a9b5c55e8a264709",
- "format": 1
- },
- {
- "name": "plugins/modules/java_cert.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5c40619fd173dfc758e1dbe6ad2083a924a6b138592fb98244b3d7a152dbbb54",
- "format": 1
- },
- {
- "name": "plugins/modules/java_keystore.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f2b9a344962a24cc2754aa948d60b383fbb21dfb7be36fb4cf2582fdfd896cd7",
- "format": 1
- },
- {
- "name": "plugins/modules/jboss.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "413a5203f4d159144142272b5e494f10d032d589d31b0d5167b60ab0e5d40664",
- "format": 1
- },
- {
- "name": "plugins/modules/jenkins_build.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5a70f3860a8a4adf2ab17cc214be4812d8e72fae7ba2a748fbbbe9bb9755178b",
- "format": 1
- },
- {
- "name": "plugins/modules/jenkins_job.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "289f71c98eae7a1138cb3b922f1b7a431d3cf593ef838ff7f152c5ff60839a28",
- "format": 1
- },
- {
- "name": "plugins/modules/jenkins_job_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "eb90242a9999203cb2fa1d6af3e9a8c54ad57530e91aa338f00cee8fd7a4b32e",
- "format": 1
- },
- {
- "name": "plugins/modules/jenkins_plugin.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9f36ba039a959f4ab537e6736021dbb68c50ed10e7ee3eaad03307c5726155e3",
- "format": 1
- },
- {
- "name": "plugins/modules/jenkins_script.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "194b41bc5b511c44e15b770526dcb63625ec530b963e650343467f12b5a083ee",
- "format": 1
- },
- {
- "name": "plugins/modules/jira.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "072dfce83798a6ca7fb0c0395e8d8168ca28b140857ef73687bcfc04ebe00941",
- "format": 1
- },
- {
- "name": "plugins/modules/kernel_blacklist.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "22cb952a459ea253cfd9eaf5d6612dabe02cf670385d9a95e0ad8212b8496b1c",
- "format": 1
- },
- {
- "name": "plugins/modules/keycloak_authentication.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9c90b1d14c16a6a61e114fcf81cecc8a37c0205d45328b3a2d37e4c26f89bbd1",
- "format": 1
- },
- {
- "name": "plugins/modules/keycloak_client.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6afcc0997e09859e999b6988fc8313c2b6ab6881593c32202caffb9a00d4e8d9",
- "format": 1
- },
- {
- "name": "plugins/modules/keycloak_client_rolemapping.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "591f181bff4630f8102b105189ff5b3a13de126520d1d28def344d175527979b",
- "format": 1
- },
- {
- "name": "plugins/modules/keycloak_clientscope.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5afc1453d8f5360849ee0c3290c0c838f0aada90e1812928e77a1b1e7a5ffd18",
- "format": 1
- },
- {
- "name": "plugins/modules/keycloak_clienttemplate.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c950ef71abd6035f3861bc568f993b414bf1a24e163c7f486ae529ac5a92cb24",
- "format": 1
- },
- {
- "name": "plugins/modules/keycloak_group.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "49d81d24c71674584f1a762d4db1f73d7a13ba78fc367f3961e6e2cafe0c5329",
- "format": 1
- },
- {
- "name": "plugins/modules/keycloak_identity_provider.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "2d458b33b61e2972f529be3fc2b9818bc0bb9511fd2ad1833b8d0ee11032261e",
- "format": 1
- },
- {
- "name": "plugins/modules/keycloak_realm.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6ddd98908cb2d26b7a3627e563b5e8b26335e23d6f8cb7d4675399dc891dd19a",
- "format": 1
- },
- {
- "name": "plugins/modules/keycloak_realm_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "dd2ffd0fbe413e17ef575a432a2ce8d251d3d634f5dcaaa0b70dfd20d2ba22b1",
- "format": 1
- },
- {
- "name": "plugins/modules/keycloak_role.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ad5b8b8c78cf44c6309e19858709eea202cb2a8f20f27e85fc3ea9260bd1b80a",
- "format": 1
- },
- {
- "name": "plugins/modules/keycloak_user_federation.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "100992e28832d1fea678013004dbc8400871bba27af2426c2f240b0eaf4da03e",
- "format": 1
- },
- {
- "name": "plugins/modules/kibana_plugin.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f9ecdf864136ffaeb96c2239570ef3de82852d38cc6d522cb801590c62d4a07a",
- "format": 1
- },
- {
- "name": "plugins/modules/launchd.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "287f7a5a7c8d859038ca8c15e7d221a1bce7c56b02942260f135b52229e177b0",
- "format": 1
- },
- {
- "name": "plugins/modules/layman.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "836e062d867c45bb523e37edfc3cf6b6b9b94700d994f1755d78b706cf3f6bd0",
- "format": 1
- },
- {
- "name": "plugins/modules/lbu.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7471d902ef679d8cc8dbeb52b2f737758d696777c83c36332214a727ab7bf1dc",
- "format": 1
- },
- {
- "name": "plugins/modules/ldap_attrs.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "26070ca9bf3bfd37884672ad9335c2a7706298645e84bac4c259bdaab4269f73",
- "format": 1
- },
- {
- "name": "plugins/modules/ldap_entry.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7c1beee28d7661cce71496558a7a72f3afc3450e92bd5da44c5561192bf34853",
- "format": 1
- },
- {
- "name": "plugins/modules/ldap_passwd.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6ba81db2b15e61479f3621ea0f9c1ee360a6938388349c842ee7cc39d4affaac",
- "format": 1
- },
- {
- "name": "plugins/modules/ldap_search.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "27ace47cfda1f029f3fd0f87e80d19d4170df442a2da819adaf29c169e86c933",
- "format": 1
- },
- {
- "name": "plugins/modules/librato_annotation.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d9f41d406bfe62d78ad1a042c78019c6fd4df50632213dd5a2d619a2e2bcc1ba",
- "format": 1
- },
- {
- "name": "plugins/modules/linode.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "167488e841b7c5636e0c1695d689ae29de74d3dc3d33e6bcb4001fb0a680f8fa",
- "format": 1
- },
- {
- "name": "plugins/modules/linode_v4.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d1484d4125d34af5990398d36e479a00da32dd318259f2c686e315503124940c",
- "format": 1
- },
- {
- "name": "plugins/modules/listen_ports_facts.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5966c7c49a2850b1c13757899a6bd5443a30319f0b6f2628077662fd703df5b5",
- "format": 1
- },
- {
- "name": "plugins/modules/lldp.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0bebe90d2f24144019108f71e7dedb4ed60ec93abe5e96fce73196192de34afa",
- "format": 1
- },
- {
- "name": "plugins/modules/locale_gen.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d79413b262062855f9e4d97f7fefebbf5f18504e8d36da6496f20a0626c7b8be",
- "format": 1
- },
- {
- "name": "plugins/modules/logentries.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "39eda48181ea6b93f08876a2f9db6b3c22693d848dbb07d6f6592a8adda50152",
- "format": 1
- },
- {
- "name": "plugins/modules/logentries_msg.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "34982c5c0e9aef4d724a068cc3bbb34df2d7e9757d7d2ed620990124d64b9a84",
- "format": 1
- },
- {
- "name": "plugins/modules/logstash_plugin.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1d84f5ccd70f2dfdfb0f306ed675920972d332cb07b9d1f7997ee9eb16b6dd0d",
- "format": 1
- },
- {
- "name": "plugins/modules/lvg.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a797ac328651f2c55e0e3f4d09629095014390bd99b82971aa1fced50249177f",
- "format": 1
- },
- {
- "name": "plugins/modules/lvol.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "faa2fddec92f0bebc7a4536cb716748cadb99d57be46e04faf4f14cb43958e86",
- "format": 1
- },
- {
- "name": "plugins/modules/lxc_container.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9479e4e14d7c49ddd745eb4ccbafc171fd89db2bad96b711e74dfcb457ca111d",
- "format": 1
- },
- {
- "name": "plugins/modules/lxca_cmms.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "74ad7330003cfce91c50347b358bea005a2616da70aff5a757bcdd714a3f86a7",
- "format": 1
- },
- {
- "name": "plugins/modules/lxca_nodes.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "82e905a3d21b63b40414f3ec63dcbd578743c38cf62865ddbe84a5dabb8ec622",
- "format": 1
- },
- {
- "name": "plugins/modules/lxd_container.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6f9dcc2405aff8a396a200b4a8ad4d9321553631966ddeed9c0fb1aee7f4ca94",
- "format": 1
- },
- {
- "name": "plugins/modules/lxd_profile.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "cc2d195be2a466ba04309725e6b43fff6933ee7fd979fb7be890bbdd7451d55e",
- "format": 1
- },
- {
- "name": "plugins/modules/macports.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "dbd71696e4f6e58f8d67117c301c32ee210e6765f6b4f7a2a966b64cba91cd16",
- "format": 1
- },
- {
- "name": "plugins/modules/mail.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d321469472ef8dbd1a0c0c06b67c4213df7a11d487ae18b8962ab1ce7302d36e",
- "format": 1
- },
- {
- "name": "plugins/modules/make.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b119a10b4ef68686d49cfad00d5c3f4cfec954bce9f86dacbd5011fe2a746b9c",
- "format": 1
- },
- {
- "name": "plugins/modules/manageiq_alert_profiles.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ddbb9e06f40e750fccf055a42d03a1a80b45bd238d8d4558916c849940b73903",
- "format": 1
- },
- {
- "name": "plugins/modules/manageiq_alerts.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3410230671e4ca67fb49d62280309a70c8e272ed44b063aa133b9e906b5d9f74",
- "format": 1
- },
- {
- "name": "plugins/modules/manageiq_group.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ab64599f102c1cbc693aa6a963bfdd0890cbe5c9a556bbb95b4a085bbb354421",
- "format": 1
- },
- {
- "name": "plugins/modules/manageiq_policies.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "583c115fed4980ab0dd6b7beaf97b8779c5976ed5f212cea213b886f08ea2fbe",
- "format": 1
- },
- {
- "name": "plugins/modules/manageiq_provider.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f229203632039bdf0e89ee52305065bf2038e8d934a94ae293012da52feda470",
- "format": 1
- },
- {
- "name": "plugins/modules/manageiq_tags.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ace512b173524ed7af89882fe3912511f1138a58a8ef9f426c56226ce8e120fd",
- "format": 1
- },
- {
- "name": "plugins/modules/manageiq_tenant.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "99d5ff3a9cc80ba2cb52ac6bcdde27a41e8993d355bae1eea34bf9659e0c7cb0",
- "format": 1
- },
- {
- "name": "plugins/modules/manageiq_user.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c9c425603e1e88919c2d9245030f2f02c3866337aa4e81eb702dd003d45069c0",
- "format": 1
- },
- {
- "name": "plugins/modules/mas.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7346067aa024a97e1fa6c3b2bc55a6eb7469b2eea9c8b69daf179232210248dc",
- "format": 1
- },
- {
- "name": "plugins/modules/matrix.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "49291a2a57c72bea087e2afffade0f7f083deb196f8e32dd6d79955bb5b6116a",
- "format": 1
- },
- {
- "name": "plugins/modules/mattermost.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e4ca0cd2ff4e27e91ffa8542531dd77413443690721b78e468d723e3c85278db",
- "format": 1
- },
- {
- "name": "plugins/modules/maven_artifact.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9093a95b922bf4c93af8d371f23f6ec650bc04cb139cbbb3ade69d50b050d5d6",
- "format": 1
- },
- {
- "name": "plugins/modules/memset_dns_reload.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b84a25907522e6ce4bb42500d5a17d4d532da3de5a6d640fd4fb33a7adb147a3",
- "format": 1
- },
- {
- "name": "plugins/modules/memset_memstore_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "cfa529765f7db308a617550e52b56d21ab49e45003f27ebaa9771b78392abcc0",
- "format": 1
- },
- {
- "name": "plugins/modules/memset_server_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6817c961286632c4ec868845cb3eb62f5095fd7c48a98dad1678071ab08cec28",
- "format": 1
- },
- {
- "name": "plugins/modules/memset_zone.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6a5b2527e6602a6e9533c842cf944b71be146787a9ab908eca03de3d97ab6cc0",
- "format": 1
- },
- {
- "name": "plugins/modules/memset_zone_domain.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "90d015499749fd99206a3f5e435b8bb3c59f971689f33024871a2b18125749c2",
- "format": 1
- },
- {
- "name": "plugins/modules/memset_zone_record.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0db0abd59574ef77493cc31edd1adf8d644740c6968352f94e58a60ea01534a0",
- "format": 1
- },
- {
- "name": "plugins/modules/mksysb.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f4d453b498fb00531d86635f21b89e9da427d17788a8dffd624a7eef2d64260f",
- "format": 1
- },
- {
- "name": "plugins/modules/modprobe.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3d587d82af8364836d095369488fd76b90dea4f4bf068ac96984f50302fc7228",
- "format": 1
- },
- {
- "name": "plugins/modules/monit.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f0e631c78c8748e568fbc1624ac2831861087b07f88cac56cd995602aeb3fb89",
- "format": 1
- },
- {
- "name": "plugins/modules/mqtt.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "dc3caa21d09f3103a4c21cb7719ed69522760f9221b536e79ad9f9cc52470d8a",
- "format": 1
- },
- {
- "name": "plugins/modules/mssql_db.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "10836be6d1f0c2d46a5ad956f66a98f0ee983de1660c462d3220d377a14ce6c2",
- "format": 1
- },
- {
- "name": "plugins/modules/mssql_script.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "fce6238160aaf08763818017d8bd5a211bf2dd8c478daecaa0584166011d58b6",
- "format": 1
- },
- {
- "name": "plugins/modules/nagios.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8f3d329e518de7d3efb7cc6b8d96dd17f420a22134f61012b605e579dd365a7e",
- "format": 1
- },
- {
- "name": "plugins/modules/netcup_dns.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "17d6af51c3f484d8415565c30657315387fe7b669e3f7646aa1f5b9ffa444619",
- "format": 1
- },
- {
- "name": "plugins/modules/newrelic_deployment.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5bab501cf9754d7a6c46ae2977fec718592d45efae4d4cd5a29652e6f76bf33d",
- "format": 1
- },
- {
- "name": "plugins/modules/nexmo.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "944a2d977cdaf55b8c53861b2ac13ba4808e3e49429be8dea75b38ec028d2b18",
- "format": 1
- },
- {
- "name": "plugins/modules/nginx_status_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3be0b85c00ec846e372cd74d28bef34f32211231f6c8cf45803285ff76320d39",
- "format": 1
- },
- {
- "name": "plugins/modules/nictagadm.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "32df37987dc72376f00e17b852b236cb78a6827eddad3459fa8f022eb331494b",
- "format": 1
- },
- {
- "name": "plugins/modules/nmcli.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e585180782651197b35c000a62b28c94f599beea53c963b4b44a4a4733b9e833",
- "format": 1
- },
- {
- "name": "plugins/modules/nomad_job.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f340d59640dbfc609d17914eaae66d0abb75aed40548448b92e88b3070c04064",
- "format": 1
- },
- {
- "name": "plugins/modules/nomad_job_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9fe39694e1781829ce8bd562b30f040127f5e1e2d7a977c82db3202fe0b00352",
- "format": 1
- },
- {
- "name": "plugins/modules/nosh.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b98560dd3abfba1dc2fe078a56a4eb93bdcb24af42ef6ee70c413dc7f1f9df3f",
- "format": 1
- },
- {
- "name": "plugins/modules/npm.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "2ad403903ddfdb432279a0c91640d2bccc6f9ff4fc017f865f144d0cf12c3fa7",
- "format": 1
- },
- {
- "name": "plugins/modules/nsupdate.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3ff24f4b701c08dd89733f207803d8e05e37b0ea0d40ea00f3c2b406c94eddb7",
- "format": 1
- },
- {
- "name": "plugins/modules/oci_vcn.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f40472a5d3fa83672bee22b25f4bb8cd5dc058ffbc68fdd3cac95099e8be9029",
- "format": 1
- },
- {
- "name": "plugins/modules/odbc.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1a07ed4cd1edfc030bd2bc888c365b50d44955cb82d55a69564f524c42a6591d",
- "format": 1
- },
- {
- "name": "plugins/modules/office_365_connector_card.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ca2802d019e153833f903a044a08c233555cc5e7476446c6df780b23995bd26a",
- "format": 1
- },
- {
- "name": "plugins/modules/ohai.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e4272be634bd89295c956ff2215715a967d299b5d1173048d0513cb45dc1f5f9",
- "format": 1
- },
- {
- "name": "plugins/modules/omapi_host.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "32824ddf8d839bdad9decf1161bcee7301af665604be924c98b3378e13315e12",
- "format": 1
- },
- {
- "name": "plugins/modules/one_host.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "27fc10fab8637c26999d160cd0a07a6d2785d0884c0ddf6dd64b9167cbe261a2",
- "format": 1
- },
- {
- "name": "plugins/modules/one_image.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "cc5f0a799258a85e6580bb80e5853fe7f17f64d2baa149eb558994f968e62aeb",
- "format": 1
- },
- {
- "name": "plugins/modules/one_image_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "230859d81cd0cfd8aa3495a6f19de66dc73995a56cd2a7c44fc975c3de94a24e",
- "format": 1
- },
- {
- "name": "plugins/modules/one_service.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b8800ee2c709981d0fcc213975fa886aa4113b9d7b80846458ddfffd91d75420",
- "format": 1
- },
- {
- "name": "plugins/modules/one_template.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "aa8c26db525d6ce3ea088ab7f104ffbe900969c5fef2253b11137ec3bfa76c8f",
- "format": 1
- },
- {
- "name": "plugins/modules/one_vm.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4c2832ad3bd5c28b0c269539286f52c3f0492a52322ca9148335f63b5aac8f4f",
- "format": 1
- },
- {
- "name": "plugins/modules/oneandone_firewall_policy.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "00bef2b89385e4be0273d6054adc6fcaf48909c8ed439860e4623bef5ea9a262",
- "format": 1
- },
- {
- "name": "plugins/modules/oneandone_load_balancer.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6c58464049476dc05439d1b53b4cc76c1bc2efe57ef978e96250b227ad6dabf7",
- "format": 1
- },
- {
- "name": "plugins/modules/oneandone_monitoring_policy.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f43e8dfe07f728583ce6162b1a5981a867bc80ee36577a12c03a330d0c9ede54",
- "format": 1
- },
- {
- "name": "plugins/modules/oneandone_private_network.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "72d4a6199f1720039795746a96b49e65d755fa00ba4a2a2925abdbfd942927fb",
- "format": 1
- },
- {
- "name": "plugins/modules/oneandone_public_ip.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f1621773c8720995326fce8e1c59c4c81c82b32ce86aa7f254bdbcea05ff29c3",
- "format": 1
- },
- {
- "name": "plugins/modules/oneandone_server.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5c210342197346d900dfdd87c9078de8ced7247b82abd4e0ba56a47046729516",
- "format": 1
- },
- {
- "name": "plugins/modules/onepassword_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d0e2a34b5efebec54d9dce104527972c13fce6c7e04ef25220a8073f4d385d35",
- "format": 1
- },
- {
- "name": "plugins/modules/oneview_datacenter_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "355d4c6ef338dcf618383018bb1b7a4dff56e8c01f4241a6ddb28b58fa98f4a1",
- "format": 1
- },
- {
- "name": "plugins/modules/oneview_enclosure_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ba63e68b4e2ce3fbe7cb6e3884ce7f070f6dfdfc4f21ab8f6ccecf32bf4f55db",
- "format": 1
- },
- {
- "name": "plugins/modules/oneview_ethernet_network.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d2d4ccac855870076ac2e5852e5aba82722d56d161317910c65f0144c9888bce",
- "format": 1
- },
- {
- "name": "plugins/modules/oneview_ethernet_network_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b9b15514fd1fc3d8f91b83313acddc8dba8063fdc160c015ca0ac326841d3cd6",
- "format": 1
- },
- {
- "name": "plugins/modules/oneview_fc_network.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3669b6c65a3689dae16737839dccbbe509725ae75f52c55c2bcc935decef6ebd",
- "format": 1
- },
- {
- "name": "plugins/modules/oneview_fc_network_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8a59e9a708eb32e0bc67eca344d458f20171812bb765f54069e707817d32f3a3",
- "format": 1
- },
- {
- "name": "plugins/modules/oneview_fcoe_network.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6afddbe7fa11896de1506c9fe82f234b36ca9640483f8c9247e698981bed83ed",
- "format": 1
- },
- {
- "name": "plugins/modules/oneview_fcoe_network_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a89dc5f2cdc9e48ab64afda2958b7dfe0de623bd09ece5d90309f96c5c82f02a",
- "format": 1
- },
- {
- "name": "plugins/modules/oneview_logical_interconnect_group.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d8ede8042b1abfffb2b7063e081ab962eeddc3462ba9498c5f777ba7b17aeb79",
- "format": 1
- },
- {
- "name": "plugins/modules/oneview_logical_interconnect_group_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "2bfeeb09917fa930055ad91ab23dfcc98cbb1c638c83fb2a484326527541c902",
- "format": 1
- },
- {
- "name": "plugins/modules/oneview_network_set.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a2d0b3c12e770373a5ae9dd4e30e20e9199dd5882cce2ea99b8e132e0d73db4d",
- "format": 1
- },
- {
- "name": "plugins/modules/oneview_network_set_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6ae6c0631e08a394570f300600d4fc4c667e11a0c8c01b52a00b9b73e6be1824",
- "format": 1
- },
- {
- "name": "plugins/modules/oneview_san_manager.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6f1b301a7bef55541938d21ee1b2dd59d86c8b4fdc7a7ec29c2b66f30afd0e22",
- "format": 1
- },
- {
- "name": "plugins/modules/oneview_san_manager_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4d0cc20490ea3903961f2ee4ca7c39bae0c3f2935fd71574fa36a62700283a09",
- "format": 1
- },
- {
- "name": "plugins/modules/online_server_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "660ccee70609be58fdd563f516002d9f966f665367b9033b863572a352e2793f",
- "format": 1
- },
- {
- "name": "plugins/modules/online_user_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9d17d3d216d502dedc3ce76ac76a9037cea71cca92b996125c376581d6c5fc83",
- "format": 1
- },
- {
- "name": "plugins/modules/open_iscsi.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "307fc84c58937372a867cbf944d16e3a0606ea44e6699f5782c49c64f3957eda",
- "format": 1
- },
- {
- "name": "plugins/modules/openbsd_pkg.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9f9274e283af531ea1604d2231d456b443ca118638c24387c285e51af75bb475",
- "format": 1
- },
- {
- "name": "plugins/modules/opendj_backendprop.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e45d6e5a6145f58dec874da17714d239170c25aa3d6b6bed4e7ab5d45aa92e9f",
- "format": 1
- },
- {
- "name": "plugins/modules/openwrt_init.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "55836f6f5d1311011d3184178e63629e7b5a5bc28be88818944e5f8ef9ede13b",
- "format": 1
- },
- {
- "name": "plugins/modules/opkg.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e456e9b6d5a6760dd77954c9c35a50524344c6f381b69a5b1e278a2b51fff048",
- "format": 1
- },
- {
- "name": "plugins/modules/osx_defaults.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "91214ca6596b68554a16c909bb3e5d232b74218b55b9207102ed672ed70b14f6",
- "format": 1
- },
- {
- "name": "plugins/modules/ovh_ip_failover.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "10becd6c09fb98b6e8ed7838f7669e233249d51f8537aef736257b2a7ab62d69",
- "format": 1
- },
- {
- "name": "plugins/modules/ovh_ip_loadbalancing_backend.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f8af6017776a3c0e311a963f790f75705c130f2cfdb3f59e9b090d496d192ae0",
- "format": 1
- },
- {
- "name": "plugins/modules/ovh_monthly_billing.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "921ff4d415e12a6ddbefc4a19a2d8807a9d7a3b7328c474fca5be64c59db55e6",
- "format": 1
- },
- {
- "name": "plugins/modules/pacemaker_cluster.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4386ec559c0dd166cb6b6bf4b2f43f3368c2da231653b3f4027d64fb921b1e48",
- "format": 1
- },
- {
- "name": "plugins/modules/packet_device.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "73d6fe85e58d6598d3c8f264f530ff774991bd76e0cdb84ec521e2b894ec6411",
- "format": 1
- },
- {
- "name": "plugins/modules/packet_ip_subnet.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3866a31a3e100c8615ae771a933061ead29662d1027b48c3584a5b1097f81b2d",
- "format": 1
- },
- {
- "name": "plugins/modules/packet_project.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "fa9a3e9f17818d2a1485e7ada11b23fff4e8b6c375f805f45a7f57681d0c7a6b",
- "format": 1
- },
- {
- "name": "plugins/modules/packet_sshkey.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e16a15dca05b676b606f42d23289dd512a7d465f269af8a60b96839cb19709be",
- "format": 1
- },
- {
- "name": "plugins/modules/packet_volume.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "2c86c31c6ea2c93dcc429b090da7bb20f035a1e21b38ed7010b40cde5fff3113",
- "format": 1
- },
- {
- "name": "plugins/modules/packet_volume_attachment.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ee069291be7657c0fbe528d35cbdf8566a8c558a108ad5c6de8af1e15f3c8175",
- "format": 1
- },
- {
- "name": "plugins/modules/pacman.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0632694bbe9620826447c3841d4581e718395b052c324c821ef261662980d898",
- "format": 1
- },
- {
- "name": "plugins/modules/pacman_key.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ed012d9d887cdf7f21196040f817b2831ee72056f9ce9a9cf52b622547a760c1",
- "format": 1
- },
- {
- "name": "plugins/modules/pagerduty.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "cafe39cf6372187f9c3ab1aa1caedbb31e329474f46662be6dab7247c8db3e10",
- "format": 1
- },
- {
- "name": "plugins/modules/pagerduty_alert.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c816f9a62a0c0ba8c520986f4918945877a7e214de0693da2b444e3550a79419",
- "format": 1
- },
- {
- "name": "plugins/modules/pagerduty_change.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7f8b9d10f9edd7c2a7c896a660f920faa975d680ed799eb738ec7277205e748a",
- "format": 1
- },
- {
- "name": "plugins/modules/pagerduty_user.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "afe68c52a1fee0a441f79308f3e3f8fb296d9e5193bf74cb10b7a611e2a90c5e",
- "format": 1
- },
- {
- "name": "plugins/modules/pam_limits.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "87cc82831d55468a2c0d6d86970417652f0b6403b5f9c50ca6bb6d2e5560a294",
- "format": 1
- },
- {
- "name": "plugins/modules/pamd.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "968da2701d4dcb58bf11fb374bc3ccbbc3060c57ca3881fdf8f6bff30f9a8ad1",
- "format": 1
- },
- {
- "name": "plugins/modules/parted.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9ed692725bcc6a521bfab3f2fadf1933e99cad99896ab3400c8264306e883e46",
- "format": 1
- },
- {
- "name": "plugins/modules/pear.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0f80210e950fbe7d6db548f027713aec26864be6c579179f44128815410597bf",
- "format": 1
- },
- {
- "name": "plugins/modules/pids.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "dc2569182b41b994eba6fe7ff080628813b09e98c7ab70b9c10f236e6f33a01f",
- "format": 1
- },
- {
- "name": "plugins/modules/pingdom.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "19b5785687a9151584a01ce49b9321d1cb4f4fb9a105e8c53a6e10654b1a38ab",
- "format": 1
- },
- {
- "name": "plugins/modules/pip_package_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1b88e00fa70e9bd96bf141c4d44a7a282b02009c43faff54a4d9d54c69d137ac",
- "format": 1
- },
- {
- "name": "plugins/modules/pipx.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "57df11bbbf4ae34e6eb934afc6808286721268d74540379d1ab812fadbac296d",
- "format": 1
- },
- {
- "name": "plugins/modules/pkg5.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e36ce1069607e0608509fc036fb6454af0ede52c3682cb43dea44eedab746729",
- "format": 1
- },
- {
- "name": "plugins/modules/pkg5_publisher.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1197f2086a98fe014717bdf3396a4ab17ce600b9867897b9c9a5464b34f626b6",
- "format": 1
- },
- {
- "name": "plugins/modules/pkgin.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "dcb2759ad7a124939de46ccd21103b3a97d5a9dc027530532a9570cd039eb0d8",
- "format": 1
- },
- {
- "name": "plugins/modules/pkgng.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e7db8e217bcf87e0eb62e61a650f03a800e323132b8d9c25beaa244f77299510",
- "format": 1
- },
- {
- "name": "plugins/modules/pkgutil.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "be59c5c6e33732eee6662cca01a92d47c6391221783a8e13d3f3f6fe81c2116a",
- "format": 1
- },
- {
- "name": "plugins/modules/pmem.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "87f561ffee94533db91e813e348569aa7f44c076935e43430268f62a5ead5c0d",
- "format": 1
- },
- {
- "name": "plugins/modules/portage.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ef869657263254c0fe13e4b160bbf16ce1f935b79d1c65c522e528f1faff98c2",
- "format": 1
- },
- {
- "name": "plugins/modules/portinstall.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7f8c255fa388d228c0c2b3e18296ab1f8d9e0ea669241099f8004ec8989b23b2",
- "format": 1
- },
- {
- "name": "plugins/modules/pritunl_org.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "200240d97abc57f33f1a19342dac1cc7586a35fedb314cc23770567f5af6a5be",
- "format": 1
- },
- {
- "name": "plugins/modules/pritunl_org_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6b8662b1c14487caf1366ef5e99c84e1b5baeb07f1c7d28d23207a1f3d3c46a7",
- "format": 1
- },
- {
- "name": "plugins/modules/pritunl_user.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "fa3c63e2d3575ce83371962f14da45413042adcb058eece23edb26b80e4337f5",
- "format": 1
- },
- {
- "name": "plugins/modules/pritunl_user_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "58e441115653a6326381d3d25bfd37d2a73c52624a67c8432a886baf4ed873dc",
- "format": 1
- },
- {
- "name": "plugins/modules/profitbricks.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e1035b261ade1c2568e0f93dbb06707388e21429b84cfa7b4493292bdb69cd4e",
- "format": 1
- },
- {
- "name": "plugins/modules/profitbricks_datacenter.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3ab1693fea48313a4dc8fb165ae2853158e5709343485d309cbe7961d744bb67",
- "format": 1
- },
- {
- "name": "plugins/modules/profitbricks_nic.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4ce736c85be70ad04561b01d6e6f51e5385b31da9780ba8eb99b08e9a3c36267",
- "format": 1
- },
- {
- "name": "plugins/modules/profitbricks_volume.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d163b85748524327ba48c5a02130375d76d34e6e298c02f486e4f6ab51762430",
- "format": 1
- },
- {
- "name": "plugins/modules/profitbricks_volume_attachments.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4ccdc52719b66398ea0c39c87936dc3e6c4775a9cb0eccafa15ec5b6ecf37a1b",
- "format": 1
- },
- {
- "name": "plugins/modules/proxmox.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "de0af339bdcdae46787c8155267127026c83906fb5a611d413d48f920f593406",
- "format": 1
- },
- {
- "name": "plugins/modules/proxmox_domain_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "404732bc66d7699feef8ef40d0b233175dffa595bcbeb0be5d9c5de1be939ffd",
- "format": 1
- },
- {
- "name": "plugins/modules/proxmox_group_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5a1aaeaeb18930ce9fef9191606f7eb3f17d4e4bede11430cc0a50a5b8ccca5e",
- "format": 1
- },
- {
- "name": "plugins/modules/proxmox_kvm.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "345ff443e5a2c4b7d3b639801158a3348f5206e6e7fb819443b7ddb9abf8d79b",
- "format": 1
- },
- {
- "name": "plugins/modules/proxmox_nic.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "82acf570c96c694cdec1e8c1f54e7175d3d05834f88fd4b8c4400583c61b3dae",
- "format": 1
- },
- {
- "name": "plugins/modules/proxmox_snap.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "05ad941a753ca0cbb753aacc9c689b31ea0738f010021d871f04b73d95e3bccf",
- "format": 1
- },
- {
- "name": "plugins/modules/proxmox_storage_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1dc50691f4b30c6302c87897678574422aec3e1aa21c02725880eca3d6ff1aff",
- "format": 1
- },
- {
- "name": "plugins/modules/proxmox_tasks_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a0d8964c27196fd1859ab45283fa2d5cc71e2190527a6fd5cd8396acfe1f434c",
- "format": 1
- },
- {
- "name": "plugins/modules/proxmox_template.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "23b7eaa4514c3321c755bdeb1b4a234251677c0fd0396ed3262dc92ada19ac0d",
- "format": 1
- },
- {
- "name": "plugins/modules/proxmox_user_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "15179fb8a51fd8634785e7b7a42aab97f83dd4d5d5765eebea3eb31f180286a8",
- "format": 1
- },
- {
- "name": "plugins/modules/pubnub_blocks.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "50fcf2e14b3f56378ea705af16211c4251d4a5a5122958cd6682fced6c98dccc",
- "format": 1
- },
- {
- "name": "plugins/modules/pulp_repo.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "27a10386274c0e0ce4b1898686fadea5811dfd7ad45b5daed757d360a70ba2e0",
- "format": 1
- },
- {
- "name": "plugins/modules/puppet.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b5fa5b7c452ca6ff19a0dec8516667e2afc31f5388fc822a92e20d4c144e2a91",
- "format": 1
- },
- {
- "name": "plugins/modules/pushbullet.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0763b7e2415a71cd93764b56b5a4f8e07431b19f657cdfe5f59b1e8c63b8ddc4",
- "format": 1
- },
- {
- "name": "plugins/modules/pushover.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8d4b6f7686646e0d44a7ad63811b8c1f69927317c2ce8cea4ff855027355c219",
- "format": 1
- },
- {
- "name": "plugins/modules/python_requirements_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9fa050aedaedf5dd2693f4443418b780e5efbe06bf332f6b1fd675dec120ac6f",
- "format": 1
- },
- {
- "name": "plugins/modules/rax.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8e3fbbc102737d0fe16362a643d016bbea5db56591c2be2a1c461f2a8b3d4fc9",
- "format": 1
- },
- {
- "name": "plugins/modules/rax_cbs.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e047fe633ea45e4dd28af0bf2d8ad2a438565d8b846cd0d49354cdd17842996a",
- "format": 1
- },
- {
- "name": "plugins/modules/rax_cbs_attachments.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d2008436cd3bb9269d018c7ca69ffb40a5d21849654d2ce32c77562e548d4dca",
- "format": 1
- },
- {
- "name": "plugins/modules/rax_cdb.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "683f0f1f2b266b1ada2b8be24775212eaaf30be12cc8b635485f19bfc6d9de92",
- "format": 1
- },
- {
- "name": "plugins/modules/rax_cdb_database.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c6fb0e2ff24073a0640ef83786ca3a648b418c8bba2281c6cecaff69903723e3",
- "format": 1
- },
- {
- "name": "plugins/modules/rax_cdb_user.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "57e216faeaf3e709dd5b9a357bc46b177c502ed5faa6e05e41072ebfd7fe3995",
- "format": 1
- },
- {
- "name": "plugins/modules/rax_clb.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8c48721843ae0d6888b58b6d4565d22a5be02937f60cbe3f42d39d7c376cb8e4",
- "format": 1
- },
- {
- "name": "plugins/modules/rax_clb_nodes.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8c9a278d819787913d75be28f443ba31c8c2a108bb63c39352d35cbdb600d067",
- "format": 1
- },
- {
- "name": "plugins/modules/rax_clb_ssl.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "2134871c3e02d0529e9f1ca574f24140c8d6f6abeaf8a6ba96c0105b7541e489",
- "format": 1
- },
- {
- "name": "plugins/modules/rax_dns.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ccbac0266e76a9edf1afcf903675822f0677a3f4d6000d729de7cffc4b54677f",
- "format": 1
- },
- {
- "name": "plugins/modules/rax_dns_record.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d6c8e88e7e4fd6eb2ede8ba0ee9c59083f24204160ae3fddfe6677b036c63491",
- "format": 1
- },
- {
- "name": "plugins/modules/rax_facts.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6530466f3a66bba556b2ebe9c4b11cf825a50f2aa9cdb9de400030d8f6852bea",
- "format": 1
- },
- {
- "name": "plugins/modules/rax_files.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b6bbf1c17d93faa948635783ca33f05597f6f69d81b7c3d24c4c417c428782ba",
- "format": 1
- },
- {
- "name": "plugins/modules/rax_files_objects.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "77fa10c9bf0457aef89bbd5c66d373fa59481009907c963dd7e81b7474d76529",
- "format": 1
- },
- {
- "name": "plugins/modules/rax_identity.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ec5013cf01d52790b1e04c5f4ccb0c7e8ab5b2e2fe67330fcd55ba1c63e1d4dc",
- "format": 1
- },
- {
- "name": "plugins/modules/rax_keypair.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "db1b2855da2f959529c5b377b2b7c7ea8c4a331c4fe507504d57370218b83fa7",
- "format": 1
- },
- {
- "name": "plugins/modules/rax_meta.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7431032026660e219c8878da55c4f92f11caa614f4d08b7623c02ce28bd3b59e",
- "format": 1
- },
- {
- "name": "plugins/modules/rax_mon_alarm.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "aa69ba0c22d53762a053288d543c44a9170f6301904569b894a2ef9065c00af9",
- "format": 1
- },
- {
- "name": "plugins/modules/rax_mon_check.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "38726fb34a9b467da5545307324b3086e52a3c149ea371ff82e175986238dc42",
- "format": 1
- },
- {
- "name": "plugins/modules/rax_mon_entity.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4a5953d1f6359f10d254fcbebcb00d8a635958699ae75291517e7756a226a0e2",
- "format": 1
- },
- {
- "name": "plugins/modules/rax_mon_notification.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a6119af771292fd53cb0c145c91d0064e726e232da3f369f31d854208d83b5b3",
- "format": 1
- },
- {
- "name": "plugins/modules/rax_mon_notification_plan.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "fe003304bbf888f7b68ced5e4dc8348a14d1ae27189b042f71a2855ccc1040fd",
- "format": 1
- },
- {
- "name": "plugins/modules/rax_network.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d09efe5dc0a5abc207de3126078da30dddfa08fdd6fe5134c95c17b4c6d21597",
- "format": 1
- },
- {
- "name": "plugins/modules/rax_queue.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5f35abbe7a421095cddd7477be5d6abc598205d1dcaebb9522b39c69cf6e2b7e",
- "format": 1
- },
- {
- "name": "plugins/modules/rax_scaling_group.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "de94c72c31698ef52fab96fa5de59a280fa501c39925048c6a82c6117454da9c",
- "format": 1
- },
- {
- "name": "plugins/modules/rax_scaling_policy.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8dfc1a96c6377f3000222233ffd13ab883bd66ddb706dc3fa1f810a7a4c066a4",
- "format": 1
- },
- {
- "name": "plugins/modules/read_csv.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d62a1f5b65ca81a1ba775829d7adc5e175a776de15e544cf85ea321ded35c145",
- "format": 1
- },
- {
- "name": "plugins/modules/redfish_command.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "423c2bafbce9538603e607934a6c61cb94d96014b901894a750156f2c6f9134c",
- "format": 1
- },
- {
- "name": "plugins/modules/redfish_config.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0b46c6fd282bac3a6a347c25af71a4c9eaab7a54fb019541606824c4ea167e99",
- "format": 1
- },
- {
- "name": "plugins/modules/redfish_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5f9aeb09e5827e46c9b6b4420362d7c27d729672322a10637d66164d5341e980",
- "format": 1
- },
- {
- "name": "plugins/modules/redhat_subscription.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "69c5a89501f2ec7d9cc4dc7ec38941bbbdaa5548d60121bd8734891f5c210d29",
- "format": 1
- },
- {
- "name": "plugins/modules/redis.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b2d25f0de427359721101190758005d983d3d8f040fcd4a4eeb1453b90e4982b",
- "format": 1
- },
- {
- "name": "plugins/modules/redis_data.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ff6b0f5c3b03c74c23565cea5bc0598d1107145ca22ce05c18f077d2c14546b2",
- "format": 1
- },
- {
- "name": "plugins/modules/redis_data_incr.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f098910c5bfeb5edebd3a6ce2a9156d51c534dce997697f219d2a8eea297a27d",
- "format": 1
- },
- {
- "name": "plugins/modules/redis_data_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "671fc3bfc6d1b36aa4f2ae686d2d5fc180a1decbd61efe3f03bcada8b29da0a8",
- "format": 1
- },
- {
- "name": "plugins/modules/redis_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "50a909e9a9cc3d2b74c3146d45a444f24234dca68e399a18474d8fbdae19d5dd",
- "format": 1
- },
- {
- "name": "plugins/modules/rhevm.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ca073abab71f54b5ad451eb6ba62e075f54bbc106251a44ae984c16b60e4496e",
- "format": 1
- },
- {
- "name": "plugins/modules/rhn_channel.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6753c4f100c54548d9a34cc55191a1dff35e789e3ad60a476eabcb85d6e3a71f",
- "format": 1
- },
- {
- "name": "plugins/modules/rhn_register.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3fff78a3b4e686e3e760bbf42691db83540ef06b7d88f28b57223a09f581485d",
- "format": 1
- },
- {
- "name": "plugins/modules/rhsm_release.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a20574f661bf3bcd1bdd02688ed4112eb7a2b35689427e70f5e455ddad7ec1d4",
- "format": 1
- },
- {
- "name": "plugins/modules/rhsm_repository.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c868fab9daf9cd10efb1b01f613cdb85848f37596464a67fe777b68a681b47b4",
- "format": 1
- },
- {
- "name": "plugins/modules/riak.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e4c8bf321e77871edc3c0a5c342707a50e9c2571fca0ab5bfd8197c682a28b80",
- "format": 1
- },
- {
- "name": "plugins/modules/rocketchat.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "952dbea3dbfd46a029b9ad19b7a5f3d7659df608a9346f067563fd98f9e8ce65",
- "format": 1
- },
- {
- "name": "plugins/modules/rollbar_deployment.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d281b9e5f08730d58c9aac003d90b45151f9819eb871dd900e63ab3d882f5998",
- "format": 1
- },
- {
- "name": "plugins/modules/rpm_ostree_pkg.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e0538d35acc1c91abd3bdfa76310252f9782693e7328722ca04228100cebfb76",
- "format": 1
- },
- {
- "name": "plugins/modules/rundeck_acl_policy.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5f5d8165b92c6995925b290f7956385d5f58e67db78fc5999a8d9fce2c8631a4",
- "format": 1
- },
- {
- "name": "plugins/modules/rundeck_job_executions_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "70a72bee59a76399bccced7e6db5b5079df984405f5e8f6c03aa077cf0a3954e",
- "format": 1
- },
- {
- "name": "plugins/modules/rundeck_job_run.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "11003889632bd0531f924dd291d0e9df1ccad0225e3e252e9dc33a258768c8b1",
- "format": 1
- },
- {
- "name": "plugins/modules/rundeck_project.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c2c34f541040b892e7f031487104db7ec1b0e1a522817e8308d586f9d503f6f8",
- "format": 1
- },
- {
- "name": "plugins/modules/runit.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "72f3a3dfab5c5d69e79feb4564374076228b714b842e6606bebdc08317c2d74e",
- "format": 1
- },
- {
- "name": "plugins/modules/sap_task_list_execute.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b1fe8a9ff6fd21d93aa37a3bb40f875dfae6d25c2d5aedb6580197f77cb75ead",
- "format": 1
- },
- {
- "name": "plugins/modules/sapcar_extract.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "fd7fec63a0695a033b2b637499b1f6ace8dd36bd9656f912632260dbc04ae88d",
- "format": 1
- },
- {
- "name": "plugins/modules/say.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9427eced754de74bbb015098444c4cee334620980bcf62c4c6f7e687475515e6",
- "format": 1
- },
- {
- "name": "plugins/modules/scaleway_compute.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a8f1883b5813315b1c406b285ce00016aa5312559637765b054126e81d818350",
- "format": 1
- },
- {
- "name": "plugins/modules/scaleway_database_backup.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "dcd9a15702a9b326e06210c5c14b402504efae5c23f86242921fe745d321d2a4",
- "format": 1
- },
- {
- "name": "plugins/modules/scaleway_image_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "da6f412ff90955c2ba5ade197e163dc3c36458c036d36d30b0bee1c96e974e43",
- "format": 1
- },
- {
- "name": "plugins/modules/scaleway_ip.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "96e4fa6eb157e3553fedf94669681c529add87cabe8aeab442c9f7173f4f398f",
- "format": 1
- },
- {
- "name": "plugins/modules/scaleway_ip_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5abf4b2c16ef564f485d7be74882b003c3934ded53fe9115022808d88bd90db1",
- "format": 1
- },
- {
- "name": "plugins/modules/scaleway_lb.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a99b738a4a7baffa0ab13ab5ed273bb0d4a0c4ee84a9121dbc7def22fdade7b9",
- "format": 1
- },
- {
- "name": "plugins/modules/scaleway_organization_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6e29446afc7823fbf66142c7296c24418538474da1eb6180a4fe3ae6e97f3477",
- "format": 1
- },
- {
- "name": "plugins/modules/scaleway_private_network.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6f95de713da409754100cd96cee21a007082f2bcc93bcbe5e0cc3b85b0324918",
- "format": 1
- },
- {
- "name": "plugins/modules/scaleway_security_group.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "41072a0cee6fb8794ea5214ef95f065b9b1cda8ee36296966c529e867655e27f",
- "format": 1
- },
- {
- "name": "plugins/modules/scaleway_security_group_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7fa7d5011ba75d648159310c0c1fc26b573d56c973166591c23044d1e72c5492",
- "format": 1
- },
- {
- "name": "plugins/modules/scaleway_security_group_rule.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "03916e5b092a441d6945ccae89c93968f21083bd551ddb48e9f0b280b9abec7e",
- "format": 1
- },
- {
- "name": "plugins/modules/scaleway_server_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4c5955d275d799b7879ce024888d3d62288c3e19f377b42bd8e22c4d366915b7",
- "format": 1
- },
- {
- "name": "plugins/modules/scaleway_snapshot_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6127b39c7c7f02fdd516efe60384f4d0b47165e4fb6ba81b5f96a7e42f559983",
- "format": 1
- },
- {
- "name": "plugins/modules/scaleway_sshkey.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c8b5023d5077dfba435674012a27ea807d9352709feacc2eed6b1e5f86f8e582",
- "format": 1
- },
- {
- "name": "plugins/modules/scaleway_user_data.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "eb81200dec6e35c01ca780a99c933f255f35c5ce84a3f3f9a1fb24547f295cb7",
- "format": 1
- },
- {
- "name": "plugins/modules/scaleway_volume.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "49173705942bca167ab4caf077dd0ada20360272352443e1a341e624e2d5e77d",
- "format": 1
- },
- {
- "name": "plugins/modules/scaleway_volume_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c85b5e98e0b397f944ec775736381d6244c8a983117449366f58df4dd11c49a5",
- "format": 1
- },
- {
- "name": "plugins/modules/sefcontext.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "be1154ed383b3b642dff0e92276c0943ec2e7a5b875e7f16e78ee5764c1d8283",
- "format": 1
- },
- {
- "name": "plugins/modules/selinux_permissive.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "52a988c4f8aa70cd2734333b75b7ec5977be80c272badca53a60df50f157458d",
- "format": 1
- },
- {
- "name": "plugins/modules/selogin.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7424203ca02499f11893f07191e356ee4bf7a92f8c6c66f3760bb3662756bf38",
- "format": 1
- },
- {
- "name": "plugins/modules/sendgrid.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "74a1a142ea29a5519ab4fe938192638ae79b54f40a957dbb7d2b4e3ac4474b87",
- "format": 1
- },
- {
- "name": "plugins/modules/sensu_check.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "15aa5b61a60a0c812caf893e14c76f55150fa535edbba58a698fa0b07a95687b",
- "format": 1
- },
- {
- "name": "plugins/modules/sensu_client.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "658c2f425bd755eca7ff3317d9bc4ae20ab2d4650b8659b9846455a4cf650e84",
- "format": 1
- },
- {
- "name": "plugins/modules/sensu_handler.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d95a2dcc85c1c9ccb51ef8cd0f6412a841db023dfd3412b47bd8aad17e5608fe",
- "format": 1
- },
- {
- "name": "plugins/modules/sensu_silence.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ae9e6d8b69a746cf8e985499ed73c177abb02fdd13bbd04a501a8f76fff96fbc",
- "format": 1
- },
- {
- "name": "plugins/modules/sensu_subscription.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "530a5fd15a37728a1fd346f68300ecc4fcf28904c1cf3663875006514f0db31b",
- "format": 1
- },
- {
- "name": "plugins/modules/seport.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "56ce94a493847ce43ad44e30af4bd87b816feeaa4ce15648828998b34efdb721",
- "format": 1
- },
- {
- "name": "plugins/modules/serverless.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6ebbb91033f3ec1b0a3635b74a288f037d5ed6297f167b5bc94cdcfebc5dd81c",
- "format": 1
- },
- {
- "name": "plugins/modules/shutdown.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "02c339648349f7eaa4fc7b64c85ee8c40cfc98cda4c9b97879658efaf889f552",
- "format": 1
- },
- {
- "name": "plugins/modules/sl_vm.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9b4ec3201ad6d82903722224f35f39c899ee94d96596ada4b112c658d55d8b76",
- "format": 1
- },
- {
- "name": "plugins/modules/slack.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e96ef97177e41d87862c20fe5daa14f60230671ba34309b83477fec933c4238c",
- "format": 1
- },
- {
- "name": "plugins/modules/slackpkg.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "65d52caf009ae8dc698a49d4fef5ac6644954a6c46a68fd961b0e690ddfdc141",
- "format": 1
- },
- {
- "name": "plugins/modules/smartos_image_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e9c4e315cdd50c84ede09c145a86eacb98515d36fc87251ce11759d26de30200",
- "format": 1
- },
- {
- "name": "plugins/modules/snap.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "513ff327c2a09f42eaa5a945f0b72fe2e6e17bbdc5491b6875c04eaa8f846b48",
- "format": 1
- },
- {
- "name": "plugins/modules/snap_alias.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b23129de9e88a07cf2c3d5012cb32ec105622e7dfcdfbcdaf694dcdf92cf518b",
- "format": 1
- },
- {
- "name": "plugins/modules/snmp_facts.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "76246be2be66921ccb940983d25eef4bf5b8cb2f2b96b8bb3f9971bda482ee68",
- "format": 1
- },
- {
- "name": "plugins/modules/solaris_zone.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "60a77ff20a8d31547321204ecb03e5962a99cb34773e9bb46cf25ecfd0ef52d8",
- "format": 1
- },
- {
- "name": "plugins/modules/sorcery.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ed8fec8e6c5357a8e0a4d7cf020c253a574f8c239f3371b9604beb90cb0975db",
- "format": 1
- },
- {
- "name": "plugins/modules/spectrum_device.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "883564d265cd219779f52beb177c1eee686445277aec016a0000a9734bb3f426",
- "format": 1
- },
- {
- "name": "plugins/modules/spectrum_model_attrs.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a67e3c3ee88a04add9cd67e38778c14b56e9dec145c843f4cbafa550fd9851a9",
- "format": 1
- },
- {
- "name": "plugins/modules/spotinst_aws_elastigroup.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f52f9bdf880e006dbdbbe2882289d506e89c50c4d1bad2ffc45706c7fc41eda2",
- "format": 1
- },
- {
- "name": "plugins/modules/ss_3par_cpg.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "2be10ff6aa61f598720d6ca0a1668a5ec6033680223fa3d3231192f3c12006ef",
- "format": 1
- },
- {
- "name": "plugins/modules/ssh_config.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a1764d656d155306fa1c01f06ae71350613998bab940e036272a702ec2cf7510",
- "format": 1
- },
- {
- "name": "plugins/modules/stackdriver.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "2be5529a5b6f3c9366af6e422fafeea193922831655edd3bf7f7d98c440fb506",
- "format": 1
- },
- {
- "name": "plugins/modules/stacki_host.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "63b57ef41bb4ffad7bd5def9d9d592e3bf2aecc1b22dc66a303774f3b6b95ef7",
- "format": 1
- },
- {
- "name": "plugins/modules/statsd.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "792e64a34b4d66ef704505a6464ab5d809822c2cf4277662559b3257b023f903",
- "format": 1
- },
- {
- "name": "plugins/modules/statusio_maintenance.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f86b31e7026fa92e312f3196ff270441d9fe75a5e67886bcc1b8c9e3e8d12459",
- "format": 1
- },
- {
- "name": "plugins/modules/sudoers.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4b125be575e79d2de7d840aef13ddf5ed40623de0f5e5bc74863e5a09610a5ee",
- "format": 1
- },
- {
- "name": "plugins/modules/supervisorctl.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5a130a0e5a2402d2d964a069ae288d1faff9808d48f8b0f4d4a83a9fa55192ba",
- "format": 1
- },
- {
- "name": "plugins/modules/svc.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "97cb8133ea514678200f8dd1d4041ce90327486c903143912d7995806c16457a",
- "format": 1
- },
- {
- "name": "plugins/modules/svr4pkg.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e6fdff83fa4d867e28b52c26ab42377cb8b793218b68a4d538c06b923a78cfff",
- "format": 1
- },
- {
- "name": "plugins/modules/swdepot.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7cf596e285fbcb98e9bae8ee345d63daa2528c34fd93138d6c9afb77db2f7d8e",
- "format": 1
- },
- {
- "name": "plugins/modules/swupd.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8247ec718e884f51246f84426c2c50ed7a48aac0e7ef97161ce11e3aa62662fd",
- "format": 1
- },
- {
- "name": "plugins/modules/syslogger.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "abcf172195a8f6b74396dd273e2d9926c0c6bbba773f5949f9565b2cd2aaea07",
- "format": 1
- },
- {
- "name": "plugins/modules/syspatch.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "89c7d7ddd8731028bb3f5ea8426de2b5b8f19c0d2d9a0e6978aa67347be0540e",
- "format": 1
- },
- {
- "name": "plugins/modules/sysrc.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "cd30445b5d09ca12cd4166dd59f204b4be4e0761ac8ddf7dd851a2d5026bcebb",
- "format": 1
- },
- {
- "name": "plugins/modules/sysupgrade.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9c9bab43a8cc9cb85528181f72c9a881e6e53a39755461800aded2b3a27216c8",
- "format": 1
- },
- {
- "name": "plugins/modules/taiga_issue.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c3f0162389f24357b7981000dc718ef8a794b260ef570753703bfa372d593583",
- "format": 1
- },
- {
- "name": "plugins/modules/telegram.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "166e6d4a5b832d22b024dc9395780a807341ebbb6d5a78726dd40d9f5214fbbb",
- "format": 1
- },
- {
- "name": "plugins/modules/terraform.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "987e496081df3b156adbf5cb65de3e5e4ff9008b04936272b5b63f8d80c65eda",
- "format": 1
- },
- {
- "name": "plugins/modules/timezone.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f762436db06c2b4085c9421b3e9a2337d1b65e1fce6663cc55e6d2efbe774668",
- "format": 1
- },
- {
- "name": "plugins/modules/twilio.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "bc5913224c8de906d7739278662d6efa7055a88ecc24dd2e568a2c33065b0e23",
- "format": 1
- },
- {
- "name": "plugins/modules/typetalk.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8b0c5a2c18ec40da946914f93655f144d608fcc4737cca258642c44d69245b42",
- "format": 1
- },
- {
- "name": "plugins/modules/udm_dns_record.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "828fafca6838e91191a827d40961539a3820507d08f82b0cb6dcdaae53d9b9ba",
- "format": 1
- },
- {
- "name": "plugins/modules/udm_dns_zone.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c18407922bc2d77ecfa7e650c0bd7a90eb896fe07e6c9eac191d0e68f63df2e1",
- "format": 1
- },
- {
- "name": "plugins/modules/udm_group.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9ee3d7282c96a82bf24b58260e3fef9a376e9c2d768d0dd7c4e1ec648288fefd",
- "format": 1
- },
- {
- "name": "plugins/modules/udm_share.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b9ed6ebf6e752a6cb101f24248ec3e319f1965287de8a61a638530701a6137e9",
- "format": 1
- },
- {
- "name": "plugins/modules/udm_user.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "54edc8d89620ddde5caed48faea3e0341ab0d4dff6605c512d67767468fa49ff",
- "format": 1
- },
- {
- "name": "plugins/modules/ufw.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f0958a3686ca75540353eddd3148a6e4b19ed9b57bac7e6994e949572dd2a1fd",
- "format": 1
- },
- {
- "name": "plugins/modules/uptimerobot.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b5aa626e5c790d9b21ef75af42ca78551c07e38e3539ce6dcafcd638cfa8d9ff",
- "format": 1
- },
- {
- "name": "plugins/modules/urpmi.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "2533a194a0b36cceeb0ec69d8586cfe12e8f4c7bdf13e22dc68c7dc9d1c8ceec",
- "format": 1
- },
- {
- "name": "plugins/modules/utm_aaa_group.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0ad7213f9e7d5c8683f0a608a816f02f935bd3aa514be57a18671290391e7a44",
- "format": 1
- },
- {
- "name": "plugins/modules/utm_aaa_group_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0f7e66c06b83fec400b96810f28ce02f9d7c6c20cec8ebe5e321f163c318d8dd",
- "format": 1
- },
- {
- "name": "plugins/modules/utm_ca_host_key_cert.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d2c1649b50116c8b150ecdd4ca13c91bc52f49a22a57cd7aaec2d4c6125c0524",
- "format": 1
- },
- {
- "name": "plugins/modules/utm_ca_host_key_cert_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "826a7d96e136504ae975e591e769dd5fdff2c96b59eaff5535dfeb43fbaf08d5",
- "format": 1
- },
- {
- "name": "plugins/modules/utm_dns_host.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "fc5c40e788f2cf6dd4e82f618f6f37ea21e3ce497c640c49bfd9ec2ccdf234e0",
- "format": 1
- },
- {
- "name": "plugins/modules/utm_network_interface_address.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "29d9fe615e9c8b54a8bdac9ca4c4a0436ae3d3cae2972bae73df9fbb071072e5",
- "format": 1
- },
- {
- "name": "plugins/modules/utm_network_interface_address_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "399fa31a5fc8cfcf1a0f8fd944f7ca139446413e6fff5251083c226bb5274aa7",
- "format": 1
- },
- {
- "name": "plugins/modules/utm_proxy_auth_profile.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "54ded3e29eec68ce76581b665af3228e58fe76211ffc3a392a890d42eac30289",
- "format": 1
- },
- {
- "name": "plugins/modules/utm_proxy_exception.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4dd04942dd16dae3c1e1de10712363b8cc67597db2647fc58d3a085c0a5d6e0b",
- "format": 1
- },
- {
- "name": "plugins/modules/utm_proxy_frontend.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1d39c2514f334eace3ce91c284d85afbaa6ce488b6dec69d7cea6689247fee56",
- "format": 1
- },
- {
- "name": "plugins/modules/utm_proxy_frontend_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "da27864c36b0b1636bb1016f6623d38cc2685d9f1073d9023baf6650e2b5fbc5",
- "format": 1
- },
- {
- "name": "plugins/modules/utm_proxy_location.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9b3f2a4ee29a7fd7a468d7a4feaae37f0ce5d90fc963a91561feae1de5cd21f2",
- "format": 1
- },
- {
- "name": "plugins/modules/utm_proxy_location_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "af35736343e2510d4ff9dc5ca4a01c3a6a17ae83685ea43381b8ae84190f1050",
- "format": 1
- },
- {
- "name": "plugins/modules/vdo.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "89c6c5018638467973eee8012275abf8a5f611a01cc073bc82ce583e52b3639f",
- "format": 1
- },
- {
- "name": "plugins/modules/vertica_configuration.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ff26d24f57fbea1fcf16e64ce0eff1417624bcf5224da566422a6086512a8c19",
- "format": 1
- },
- {
- "name": "plugins/modules/vertica_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5f270fb5c6863524079c13320d7816bb446f48b485e5fda83fba3d76183a70a9",
- "format": 1
- },
- {
- "name": "plugins/modules/vertica_role.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4127075612f26e1b253766e24f5976861a9f3a985cdfc0150c46bccf394f7ba0",
- "format": 1
- },
- {
- "name": "plugins/modules/vertica_schema.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "69e66027dd2f802db9e894e4e45ba61e8f7324d0439807f06f1e0766508e371c",
- "format": 1
- },
- {
- "name": "plugins/modules/vertica_user.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d9c75983531caeba4f82346be16d82e759af99ea6ab5e55253b68cce5919e394",
- "format": 1
- },
- {
- "name": "plugins/modules/vexata_eg.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "fac270b3db28c9f8b6d24d299e753c80f9d251dbbdcb386a319097c17219a80d",
- "format": 1
- },
- {
- "name": "plugins/modules/vexata_volume.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a6377d7306fb5a11f52aaa9a89cff909e8028a7cef71959eb6a7135ba1561d4a",
- "format": 1
- },
- {
- "name": "plugins/modules/vmadm.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0971c82f4b903c4b66b249f6c334ca5e8807d8a6e331259df1e6b3a857b3cf79",
- "format": 1
- },
- {
- "name": "plugins/modules/wakeonlan.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "eaedb6adc85510f03ea6424a673ef862122db281b83f75d3f66668652443fec8",
- "format": 1
- },
- {
- "name": "plugins/modules/webfaction_app.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f7255baa5e5a1f4ce32ccf47ba28e520f8216cc456e76ca4e2f58011db66f55e",
- "format": 1
- },
- {
- "name": "plugins/modules/webfaction_db.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "33d9e30418f53beef9333cac841481ec7a12104b9b9dd83827509662e983b36a",
- "format": 1
- },
- {
- "name": "plugins/modules/webfaction_domain.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7b7e7f59ae55508f5fa363376723a14dc3ab3d7823c962eb37f54c7d01381646",
- "format": 1
- },
- {
- "name": "plugins/modules/webfaction_mailbox.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f7bd28802ef9156cb65184bb6b2c890fe68011c50039032b364e411a7fe778cc",
- "format": 1
- },
- {
- "name": "plugins/modules/webfaction_site.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "251596c3e652ec903f30714b4ed852fbb57ddfeb7a37e11e2189dc2d52a98655",
- "format": 1
- },
- {
- "name": "plugins/modules/xattr.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7e921b2dae03b00730009593599edb959ad3ff62419caeb3cbeaecdd9be9f2c2",
- "format": 1
- },
- {
- "name": "plugins/modules/xbps.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "089f8b636b6bf7eb741857050bb8f3e105c919e705d561501bb91f9a1301af87",
- "format": 1
- },
- {
- "name": "plugins/modules/xcc_redfish_command.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "457f980a1ceb9c24d26aa2b7145d26f8902c56a4cbc0ffc7ddaae24670f48741",
- "format": 1
- },
- {
- "name": "plugins/modules/xenserver_facts.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a5316dbdb00e13930e0b2e5aa05255e7f7166ccb568fda58e4f5b3ef7e9eb7de",
- "format": 1
- },
- {
- "name": "plugins/modules/xenserver_guest.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "75fdda2a572d73b2248badab234d9a4c91caf035be8bbf450652fc567aef5c6b",
- "format": 1
- },
- {
- "name": "plugins/modules/xenserver_guest_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "55a88716e40077de6218ae32f47d73349dd6993469e02a0c0d867b6638991280",
- "format": 1
- },
- {
- "name": "plugins/modules/xenserver_guest_powerstate.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "512b73487169482e8887c48e6f3278950736c93a5c2a4c698b149e80217bf270",
- "format": 1
- },
- {
- "name": "plugins/modules/xfconf.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3e6be01aa8dd20b6a1280caa636ea2321e0ce1635a39ca05517689b94716db9c",
- "format": 1
- },
- {
- "name": "plugins/modules/xfconf_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6a5da7521b9e492203fa819ac907686227c1184a6ccb327c35a3b5e6b59b9e6e",
- "format": 1
- },
- {
- "name": "plugins/modules/xfs_quota.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "27825f948b8481607c8829578da78f5b9030677cdf578304491fc9d6ca4f1348",
- "format": 1
- },
- {
- "name": "plugins/modules/xml.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "20c53e6a1125b9d310540e60133de640668297ff31b91842bdd659ab0155f688",
- "format": 1
- },
- {
- "name": "plugins/modules/yarn.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d932d7644fb9f5e4a333c1e402b68b485a16e3d14883df4b8f9a1f39442d077d",
- "format": 1
- },
- {
- "name": "plugins/modules/yum_versionlock.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e9581edf16a8ece5930e0eefd40622ee4e4b453e564d3e40adcdf949ec1257dc",
- "format": 1
- },
- {
- "name": "plugins/modules/zfs.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0d5c3365e12bd96290f24b1ec13e5161e61f505d07110e03ff58195397373516",
- "format": 1
- },
- {
- "name": "plugins/modules/zfs_delegate_admin.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3109f4627ebfb5190204f57294c84ad0d54197c99c3a001b1f69f5291124490f",
- "format": 1
- },
- {
- "name": "plugins/modules/zfs_facts.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "18a3b38a9f47f4f5112579b819de7d664e0b55db8995743d4eac364579af5e2e",
- "format": 1
- },
- {
- "name": "plugins/modules/znode.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "09e0d13c350448cb46706cc1106c643d1ede2a94fd54eb4c9bf6bb5a6b36839f",
- "format": 1
- },
- {
- "name": "plugins/modules/zpool_facts.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e26beb9afe4a1cbd3b2a05eec94c61ee16b586db9985c962f09c76c15f80883c",
- "format": 1
- },
- {
- "name": "plugins/modules/zypper.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4386efd38cb4d2e6b5f6ffd4a4d66265541f6ba78547359833de537095036b1a",
- "format": 1
- },
- {
- "name": "plugins/modules/zypper_repository.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ef4e8074966a7a65e9b22d703beee3f2b6b7aa5b22e28123bdc18d5043f8db88",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/alicloud",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/alicloud/ali_instance.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6273f052fa89f9ab9a27230eee5064a37333af680e24ba1d5a715ec11e83c980",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/alicloud/ali_instance_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "34c5d0b44fc32a43160e9c62290e1afecfe73481f22b9a9ce8b444c4517112de",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/atomic",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/atomic/atomic_container.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "13baf1b70fda761f06be5d8de58290518bc8707287af37fe1af641284fb504a5",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/atomic/atomic_host.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ef3911802c6f970e9014cb8fd849be9df1f8e897876fc9cce03cd66e7d3a2e5f",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/atomic/atomic_image.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "dd25dd2258096e58d9d2873a382e9e5f530cd6224d74325c5466a829f9f6c5e2",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/centurylink",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/centurylink/clc_aa_policy.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "767f1e863c93bfe0e8d3bb37d7a029384caec1cf41eebde2c6ce60a864feb5c3",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/centurylink/clc_alert_policy.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "45e07b52737a3326a3debf36f5d38fc1fa33503b8fd7156f5f1fb19035a8f379",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/centurylink/clc_blueprint_package.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "52d3398cae86c645575a688a7f9dccccbd60b51d69743fdf2e64be70535c75e8",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/centurylink/clc_firewall_policy.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ef30311f37991878811921a4ece22412e4c94e92527e9d93d2f761efbfca658a",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/centurylink/clc_group.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "636a3b3a90bb1d9fd744e2a22f3ad42a6a372df6ffd9f2aef92e606391ecaee7",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/centurylink/clc_loadbalancer.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "87e5dace3e225dbd78b375a034bf5b582a4af0ba05b9276b1bf92caa61a8f5d5",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/centurylink/clc_modify_server.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "250d51c8692ee01ef2b75c9da4327adeaf79934aae75a942c45807a66ea9de62",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/centurylink/clc_publicip.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b183d61dc5fb36caf1424935c1915fe087322d608bcfc0211a84b56053e0555e",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/centurylink/clc_server.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6c7b6c85a2f14f4caab7d170ea0204f87428a5116e21eb8dffd4bcee26540111",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/centurylink/clc_server_snapshot.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8dd73687f3268d52da21504f88fc735fbf4a0761655db9693486a46b24263a16",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/dimensiondata",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/dimensiondata/dimensiondata_network.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4adadccb01c1cef01fe7d330d031c733cf61079bf28f82cab9f260d02355eb8a",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/dimensiondata/dimensiondata_vlan.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b30817b9ad59ecb496117d3f53cae29c288dc7307f0ea100b7a01f73dfeb998e",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/heroku",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/heroku/heroku_collaborator.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a540ae7b336b9ceb5b55d841ae1c8aa86b43da70501a51a7eafd576c59a888fe",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/huawei",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/huawei/hwc_ecs_instance.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "89845b03caeb5d8bc17443300b889399ae73b4da9df2d1404c1d9c09f042ae8e",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/huawei/hwc_evs_disk.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a74a926cd9e503aaebaa3a77d5e80dbba7e42c4c4a92f9c7dbcd147dda363714",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/huawei/hwc_network_vpc.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ad8ab2a633dea8a8afe36d610bd108ec2d8455632452935ae7d32b49b9f9cb4d",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/huawei/hwc_smn_topic.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "43f61a1ef273853a04a5a24138bd7f4d716d3892ba456b9d38a352d682fc26d8",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/huawei/hwc_vpc_eip.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4006ecd981645492fe82a37ea0910a40aac3e24e0e1503a046afa52e42e614a1",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/huawei/hwc_vpc_peering_connect.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d0eca5c552649fd19228928b85cf91670abd2122fd7a6afae49c91f7d84bae03",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/huawei/hwc_vpc_port.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0981c5ad00e6719986102308ac2745eb5d316fd7e0785ebc236102ad9c987ec7",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/huawei/hwc_vpc_private_ip.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "778aea0f9e96d24c7c51afdf7eb50bdcda5690d2ca1f10511ead89a47c30a116",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/huawei/hwc_vpc_route.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4369f9a4cfa48a82a66435bf9ebbfcd9a19dd8c91aaf1c5f6684fd33b5c5103e",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/huawei/hwc_vpc_security_group.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "49f9184ecdc9dcc89addc51cd8490746fb3a54089d403f4fb1c64a6f7516f264",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/huawei/hwc_vpc_security_group_rule.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "cd75294789234ffe193cfa2ff95084fb3edb0de2a42d9a20309db99bab189997",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/huawei/hwc_vpc_subnet.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3e5ac97a4be19828a95658766474adba0d1b9c4f2bb2dff454cd4bb3aa821480",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/linode",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/linode/linode.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "167488e841b7c5636e0c1695d689ae29de74d3dc3d33e6bcb4001fb0a680f8fa",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/linode/linode_v4.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d1484d4125d34af5990398d36e479a00da32dd318259f2c686e315503124940c",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/lxc",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/lxc/lxc_container.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9479e4e14d7c49ddd745eb4ccbafc171fd89db2bad96b711e74dfcb457ca111d",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/lxd",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/lxd/lxd_container.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6f9dcc2405aff8a396a200b4a8ad4d9321553631966ddeed9c0fb1aee7f4ca94",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/lxd/lxd_profile.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "cc2d195be2a466ba04309725e6b43fff6933ee7fd979fb7be890bbdd7451d55e",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/memset",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/memset/memset_dns_reload.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b84a25907522e6ce4bb42500d5a17d4d532da3de5a6d640fd4fb33a7adb147a3",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/memset/memset_memstore_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "cfa529765f7db308a617550e52b56d21ab49e45003f27ebaa9771b78392abcc0",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/memset/memset_server_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6817c961286632c4ec868845cb3eb62f5095fd7c48a98dad1678071ab08cec28",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/memset/memset_zone.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6a5b2527e6602a6e9533c842cf944b71be146787a9ab908eca03de3d97ab6cc0",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/memset/memset_zone_domain.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "90d015499749fd99206a3f5e435b8bb3c59f971689f33024871a2b18125749c2",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/memset/memset_zone_record.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0db0abd59574ef77493cc31edd1adf8d644740c6968352f94e58a60ea01534a0",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/misc",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/misc/cloud_init_data_facts.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a45eaa4abec3de3c7d4f0bc9338ed79308b522c2cca5496671da197901688986",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/misc/proxmox.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "de0af339bdcdae46787c8155267127026c83906fb5a611d413d48f920f593406",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/misc/proxmox_domain_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "404732bc66d7699feef8ef40d0b233175dffa595bcbeb0be5d9c5de1be939ffd",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/misc/proxmox_group_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5a1aaeaeb18930ce9fef9191606f7eb3f17d4e4bede11430cc0a50a5b8ccca5e",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/misc/proxmox_kvm.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "345ff443e5a2c4b7d3b639801158a3348f5206e6e7fb819443b7ddb9abf8d79b",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/misc/proxmox_nic.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "82acf570c96c694cdec1e8c1f54e7175d3d05834f88fd4b8c4400583c61b3dae",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/misc/proxmox_snap.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "05ad941a753ca0cbb753aacc9c689b31ea0738f010021d871f04b73d95e3bccf",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/misc/proxmox_storage_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1dc50691f4b30c6302c87897678574422aec3e1aa21c02725880eca3d6ff1aff",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/misc/proxmox_tasks_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a0d8964c27196fd1859ab45283fa2d5cc71e2190527a6fd5cd8396acfe1f434c",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/misc/proxmox_template.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "23b7eaa4514c3321c755bdeb1b4a234251677c0fd0396ed3262dc92ada19ac0d",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/misc/proxmox_user_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "15179fb8a51fd8634785e7b7a42aab97f83dd4d5d5765eebea3eb31f180286a8",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/misc/rhevm.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ca073abab71f54b5ad451eb6ba62e075f54bbc106251a44ae984c16b60e4496e",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/misc/serverless.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6ebbb91033f3ec1b0a3635b74a288f037d5ed6297f167b5bc94cdcfebc5dd81c",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/misc/terraform.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "987e496081df3b156adbf5cb65de3e5e4ff9008b04936272b5b63f8d80c65eda",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/misc/xenserver_facts.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a5316dbdb00e13930e0b2e5aa05255e7f7166ccb568fda58e4f5b3ef7e9eb7de",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/oneandone",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/oneandone/oneandone_firewall_policy.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "00bef2b89385e4be0273d6054adc6fcaf48909c8ed439860e4623bef5ea9a262",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/oneandone/oneandone_load_balancer.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6c58464049476dc05439d1b53b4cc76c1bc2efe57ef978e96250b227ad6dabf7",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/oneandone/oneandone_monitoring_policy.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f43e8dfe07f728583ce6162b1a5981a867bc80ee36577a12c03a330d0c9ede54",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/oneandone/oneandone_private_network.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "72d4a6199f1720039795746a96b49e65d755fa00ba4a2a2925abdbfd942927fb",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/oneandone/oneandone_public_ip.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f1621773c8720995326fce8e1c59c4c81c82b32ce86aa7f254bdbcea05ff29c3",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/oneandone/oneandone_server.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5c210342197346d900dfdd87c9078de8ced7247b82abd4e0ba56a47046729516",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/online",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/online/online_server_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "660ccee70609be58fdd563f516002d9f966f665367b9033b863572a352e2793f",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/online/online_user_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9d17d3d216d502dedc3ce76ac76a9037cea71cca92b996125c376581d6c5fc83",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/opennebula",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/opennebula/one_host.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "27fc10fab8637c26999d160cd0a07a6d2785d0884c0ddf6dd64b9167cbe261a2",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/opennebula/one_image.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "cc5f0a799258a85e6580bb80e5853fe7f17f64d2baa149eb558994f968e62aeb",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/opennebula/one_image_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "230859d81cd0cfd8aa3495a6f19de66dc73995a56cd2a7c44fc975c3de94a24e",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/opennebula/one_service.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b8800ee2c709981d0fcc213975fa886aa4113b9d7b80846458ddfffd91d75420",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/opennebula/one_template.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "aa8c26db525d6ce3ea088ab7f104ffbe900969c5fef2253b11137ec3bfa76c8f",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/opennebula/one_vm.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4c2832ad3bd5c28b0c269539286f52c3f0492a52322ca9148335f63b5aac8f4f",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/oracle",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/oracle/oci_vcn.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f40472a5d3fa83672bee22b25f4bb8cd5dc058ffbc68fdd3cac95099e8be9029",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/ovh",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/ovh/ovh_ip_failover.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "10becd6c09fb98b6e8ed7838f7669e233249d51f8537aef736257b2a7ab62d69",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/ovh/ovh_ip_loadbalancing_backend.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f8af6017776a3c0e311a963f790f75705c130f2cfdb3f59e9b090d496d192ae0",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/ovh/ovh_monthly_billing.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "921ff4d415e12a6ddbefc4a19a2d8807a9d7a3b7328c474fca5be64c59db55e6",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/packet",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/packet/packet_device.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "73d6fe85e58d6598d3c8f264f530ff774991bd76e0cdb84ec521e2b894ec6411",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/packet/packet_ip_subnet.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3866a31a3e100c8615ae771a933061ead29662d1027b48c3584a5b1097f81b2d",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/packet/packet_project.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "fa9a3e9f17818d2a1485e7ada11b23fff4e8b6c375f805f45a7f57681d0c7a6b",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/packet/packet_sshkey.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e16a15dca05b676b606f42d23289dd512a7d465f269af8a60b96839cb19709be",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/packet/packet_volume.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "2c86c31c6ea2c93dcc429b090da7bb20f035a1e21b38ed7010b40cde5fff3113",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/packet/packet_volume_attachment.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ee069291be7657c0fbe528d35cbdf8566a8c558a108ad5c6de8af1e15f3c8175",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/profitbricks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/profitbricks/profitbricks.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e1035b261ade1c2568e0f93dbb06707388e21429b84cfa7b4493292bdb69cd4e",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/profitbricks/profitbricks_datacenter.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3ab1693fea48313a4dc8fb165ae2853158e5709343485d309cbe7961d744bb67",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/profitbricks/profitbricks_nic.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4ce736c85be70ad04561b01d6e6f51e5385b31da9780ba8eb99b08e9a3c36267",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/profitbricks/profitbricks_volume.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d163b85748524327ba48c5a02130375d76d34e6e298c02f486e4f6ab51762430",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/profitbricks/profitbricks_volume_attachments.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4ccdc52719b66398ea0c39c87936dc3e6c4775a9cb0eccafa15ec5b6ecf37a1b",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/pubnub",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/pubnub/pubnub_blocks.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "50fcf2e14b3f56378ea705af16211c4251d4a5a5122958cd6682fced6c98dccc",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/rackspace",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/rackspace/rax.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8e3fbbc102737d0fe16362a643d016bbea5db56591c2be2a1c461f2a8b3d4fc9",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/rackspace/rax_cbs.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e047fe633ea45e4dd28af0bf2d8ad2a438565d8b846cd0d49354cdd17842996a",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/rackspace/rax_cbs_attachments.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d2008436cd3bb9269d018c7ca69ffb40a5d21849654d2ce32c77562e548d4dca",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/rackspace/rax_cdb.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "683f0f1f2b266b1ada2b8be24775212eaaf30be12cc8b635485f19bfc6d9de92",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/rackspace/rax_cdb_database.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c6fb0e2ff24073a0640ef83786ca3a648b418c8bba2281c6cecaff69903723e3",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/rackspace/rax_cdb_user.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "57e216faeaf3e709dd5b9a357bc46b177c502ed5faa6e05e41072ebfd7fe3995",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/rackspace/rax_clb.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8c48721843ae0d6888b58b6d4565d22a5be02937f60cbe3f42d39d7c376cb8e4",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/rackspace/rax_clb_nodes.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8c9a278d819787913d75be28f443ba31c8c2a108bb63c39352d35cbdb600d067",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/rackspace/rax_clb_ssl.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "2134871c3e02d0529e9f1ca574f24140c8d6f6abeaf8a6ba96c0105b7541e489",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/rackspace/rax_dns.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ccbac0266e76a9edf1afcf903675822f0677a3f4d6000d729de7cffc4b54677f",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/rackspace/rax_dns_record.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d6c8e88e7e4fd6eb2ede8ba0ee9c59083f24204160ae3fddfe6677b036c63491",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/rackspace/rax_facts.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6530466f3a66bba556b2ebe9c4b11cf825a50f2aa9cdb9de400030d8f6852bea",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/rackspace/rax_files.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b6bbf1c17d93faa948635783ca33f05597f6f69d81b7c3d24c4c417c428782ba",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/rackspace/rax_files_objects.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "77fa10c9bf0457aef89bbd5c66d373fa59481009907c963dd7e81b7474d76529",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/rackspace/rax_identity.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ec5013cf01d52790b1e04c5f4ccb0c7e8ab5b2e2fe67330fcd55ba1c63e1d4dc",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/rackspace/rax_keypair.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "db1b2855da2f959529c5b377b2b7c7ea8c4a331c4fe507504d57370218b83fa7",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/rackspace/rax_meta.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7431032026660e219c8878da55c4f92f11caa614f4d08b7623c02ce28bd3b59e",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/rackspace/rax_mon_alarm.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "aa69ba0c22d53762a053288d543c44a9170f6301904569b894a2ef9065c00af9",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/rackspace/rax_mon_check.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "38726fb34a9b467da5545307324b3086e52a3c149ea371ff82e175986238dc42",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/rackspace/rax_mon_entity.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4a5953d1f6359f10d254fcbebcb00d8a635958699ae75291517e7756a226a0e2",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/rackspace/rax_mon_notification.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a6119af771292fd53cb0c145c91d0064e726e232da3f369f31d854208d83b5b3",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/rackspace/rax_mon_notification_plan.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "fe003304bbf888f7b68ced5e4dc8348a14d1ae27189b042f71a2855ccc1040fd",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/rackspace/rax_network.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d09efe5dc0a5abc207de3126078da30dddfa08fdd6fe5134c95c17b4c6d21597",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/rackspace/rax_queue.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5f35abbe7a421095cddd7477be5d6abc598205d1dcaebb9522b39c69cf6e2b7e",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/rackspace/rax_scaling_group.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "de94c72c31698ef52fab96fa5de59a280fa501c39925048c6a82c6117454da9c",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/rackspace/rax_scaling_policy.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8dfc1a96c6377f3000222233ffd13ab883bd66ddb706dc3fa1f810a7a4c066a4",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/scaleway",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/scaleway/scaleway_compute.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a8f1883b5813315b1c406b285ce00016aa5312559637765b054126e81d818350",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/scaleway/scaleway_database_backup.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "dcd9a15702a9b326e06210c5c14b402504efae5c23f86242921fe745d321d2a4",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/scaleway/scaleway_image_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "da6f412ff90955c2ba5ade197e163dc3c36458c036d36d30b0bee1c96e974e43",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/scaleway/scaleway_ip.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "96e4fa6eb157e3553fedf94669681c529add87cabe8aeab442c9f7173f4f398f",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/scaleway/scaleway_ip_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5abf4b2c16ef564f485d7be74882b003c3934ded53fe9115022808d88bd90db1",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/scaleway/scaleway_lb.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a99b738a4a7baffa0ab13ab5ed273bb0d4a0c4ee84a9121dbc7def22fdade7b9",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/scaleway/scaleway_organization_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6e29446afc7823fbf66142c7296c24418538474da1eb6180a4fe3ae6e97f3477",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/scaleway/scaleway_private_network.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6f95de713da409754100cd96cee21a007082f2bcc93bcbe5e0cc3b85b0324918",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/scaleway/scaleway_security_group.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "41072a0cee6fb8794ea5214ef95f065b9b1cda8ee36296966c529e867655e27f",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/scaleway/scaleway_security_group_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7fa7d5011ba75d648159310c0c1fc26b573d56c973166591c23044d1e72c5492",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/scaleway/scaleway_security_group_rule.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "03916e5b092a441d6945ccae89c93968f21083bd551ddb48e9f0b280b9abec7e",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/scaleway/scaleway_server_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4c5955d275d799b7879ce024888d3d62288c3e19f377b42bd8e22c4d366915b7",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/scaleway/scaleway_snapshot_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6127b39c7c7f02fdd516efe60384f4d0b47165e4fb6ba81b5f96a7e42f559983",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/scaleway/scaleway_sshkey.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c8b5023d5077dfba435674012a27ea807d9352709feacc2eed6b1e5f86f8e582",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/scaleway/scaleway_user_data.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "eb81200dec6e35c01ca780a99c933f255f35c5ce84a3f3f9a1fb24547f295cb7",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/scaleway/scaleway_volume.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "49173705942bca167ab4caf077dd0ada20360272352443e1a341e624e2d5e77d",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/scaleway/scaleway_volume_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c85b5e98e0b397f944ec775736381d6244c8a983117449366f58df4dd11c49a5",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/smartos",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/smartos/imgadm.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e7bfa8f3eb4edeb4f1f9e51a4a2c5f17a4390513ff3f2375dc78ab27e5352208",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/smartos/nictagadm.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "32df37987dc72376f00e17b852b236cb78a6827eddad3459fa8f022eb331494b",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/smartos/smartos_image_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e9c4e315cdd50c84ede09c145a86eacb98515d36fc87251ce11759d26de30200",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/smartos/vmadm.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0971c82f4b903c4b66b249f6c334ca5e8807d8a6e331259df1e6b3a857b3cf79",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/softlayer",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/softlayer/sl_vm.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9b4ec3201ad6d82903722224f35f39c899ee94d96596ada4b112c658d55d8b76",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/spotinst",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f52f9bdf880e006dbdbbe2882289d506e89c50c4d1bad2ffc45706c7fc41eda2",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/univention",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/univention/udm_dns_record.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "828fafca6838e91191a827d40961539a3820507d08f82b0cb6dcdaae53d9b9ba",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/univention/udm_dns_zone.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c18407922bc2d77ecfa7e650c0bd7a90eb896fe07e6c9eac191d0e68f63df2e1",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/univention/udm_group.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9ee3d7282c96a82bf24b58260e3fef9a376e9c2d768d0dd7c4e1ec648288fefd",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/univention/udm_share.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b9ed6ebf6e752a6cb101f24248ec3e319f1965287de8a61a638530701a6137e9",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/univention/udm_user.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "54edc8d89620ddde5caed48faea3e0341ab0d4dff6605c512d67767468fa49ff",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/webfaction",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/webfaction/webfaction_app.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f7255baa5e5a1f4ce32ccf47ba28e520f8216cc456e76ca4e2f58011db66f55e",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/webfaction/webfaction_db.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "33d9e30418f53beef9333cac841481ec7a12104b9b9dd83827509662e983b36a",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/webfaction/webfaction_domain.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7b7e7f59ae55508f5fa363376723a14dc3ab3d7823c962eb37f54c7d01381646",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/webfaction/webfaction_mailbox.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f7bd28802ef9156cb65184bb6b2c890fe68011c50039032b364e411a7fe778cc",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/webfaction/webfaction_site.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "251596c3e652ec903f30714b4ed852fbb57ddfeb7a37e11e2189dc2d52a98655",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/xenserver",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/xenserver/xenserver_guest.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "75fdda2a572d73b2248badab234d9a4c91caf035be8bbf450652fc567aef5c6b",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/xenserver/xenserver_guest_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "55a88716e40077de6218ae32f47d73349dd6993469e02a0c0d867b6638991280",
- "format": 1
- },
- {
- "name": "plugins/modules/cloud/xenserver/xenserver_guest_powerstate.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "512b73487169482e8887c48e6f3278950736c93a5c2a4c698b149e80217bf270",
- "format": 1
- },
- {
- "name": "plugins/modules/clustering",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/modules/clustering/consul",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/modules/clustering/consul/consul.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4118f4c040b4c3255e9b585aef388871098bb6da386ef3dfb6eff2a62621b7d7",
- "format": 1
- },
- {
- "name": "plugins/modules/clustering/consul/consul_acl.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b6f145e052de83a3d5fcdb12fcc783b7c14b42be19bee84b021e28bdd5e4d2b6",
- "format": 1
- },
- {
- "name": "plugins/modules/clustering/consul/consul_kv.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "470aac4466c9a747514dcc73b3c50cbab8649050de192563f35d0054820d60ae",
- "format": 1
- },
- {
- "name": "plugins/modules/clustering/consul/consul_session.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "cc7f0c964b98a2bd770173babef63981ba77fdba3581f31d844caa7aaf2fe723",
- "format": 1
- },
- {
- "name": "plugins/modules/clustering/nomad",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/modules/clustering/nomad/nomad_job.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f340d59640dbfc609d17914eaae66d0abb75aed40548448b92e88b3070c04064",
- "format": 1
- },
- {
- "name": "plugins/modules/clustering/nomad/nomad_job_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9fe39694e1781829ce8bd562b30f040127f5e1e2d7a977c82db3202fe0b00352",
- "format": 1
- },
- {
- "name": "plugins/modules/clustering/etcd3.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "eca366113dd69573ccb5c95250ceedfbbec34523cc23ddb2406e3ee9bab01e75",
- "format": 1
- },
- {
- "name": "plugins/modules/clustering/pacemaker_cluster.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4386ec559c0dd166cb6b6bf4b2f43f3368c2da231653b3f4027d64fb921b1e48",
- "format": 1
- },
- {
- "name": "plugins/modules/clustering/znode.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "09e0d13c350448cb46706cc1106c643d1ede2a94fd54eb4c9bf6bb5a6b36839f",
- "format": 1
- },
- {
- "name": "plugins/modules/database",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/modules/database/aerospike",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/modules/database/aerospike/aerospike_migrations.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "52c1641f7f943c727a0d6b8eab2b292b010d9347f28396adc4e8c75159dbb08f",
- "format": 1
- },
- {
- "name": "plugins/modules/database/influxdb",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/modules/database/influxdb/influxdb_database.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5f7f03aa049ab52e4dbfb809c86a65d026f518047de475693616d52a611090cc",
- "format": 1
- },
- {
- "name": "plugins/modules/database/influxdb/influxdb_query.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3a8b781c48ea54c78d2a8ac358ccb5f901746e79b0d0da842b5d06068ce6b1c8",
- "format": 1
- },
- {
- "name": "plugins/modules/database/influxdb/influxdb_retention_policy.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "00fba18126835c5c2e9e79ad1a3e0fea04613c9718839ce304bd5fe48a0450de",
- "format": 1
- },
- {
- "name": "plugins/modules/database/influxdb/influxdb_user.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6be29679e39cd622bb5eeaec56a6d802992a2e76a66a1058d478fa72ecef3db2",
- "format": 1
- },
- {
- "name": "plugins/modules/database/influxdb/influxdb_write.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f5e2d773ee043f148680048a538b3a61d529ea7628b431149ca7f8c51057dbf6",
- "format": 1
- },
- {
- "name": "plugins/modules/database/misc",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/modules/database/misc/elasticsearch_plugin.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "09a6283b244e18cdd17f34bcbf8dcfea1c85c7aeba635e033e4b1d7475f4d484",
- "format": 1
- },
- {
- "name": "plugins/modules/database/misc/kibana_plugin.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f9ecdf864136ffaeb96c2239570ef3de82852d38cc6d522cb801590c62d4a07a",
- "format": 1
- },
- {
- "name": "plugins/modules/database/misc/odbc.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1a07ed4cd1edfc030bd2bc888c365b50d44955cb82d55a69564f524c42a6591d",
- "format": 1
- },
- {
- "name": "plugins/modules/database/misc/redis.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b2d25f0de427359721101190758005d983d3d8f040fcd4a4eeb1453b90e4982b",
- "format": 1
- },
- {
- "name": "plugins/modules/database/misc/redis_data.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ff6b0f5c3b03c74c23565cea5bc0598d1107145ca22ce05c18f077d2c14546b2",
- "format": 1
- },
- {
- "name": "plugins/modules/database/misc/redis_data_incr.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f098910c5bfeb5edebd3a6ce2a9156d51c534dce997697f219d2a8eea297a27d",
- "format": 1
- },
- {
- "name": "plugins/modules/database/misc/redis_data_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "671fc3bfc6d1b36aa4f2ae686d2d5fc180a1decbd61efe3f03bcada8b29da0a8",
- "format": 1
- },
- {
- "name": "plugins/modules/database/misc/redis_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "50a909e9a9cc3d2b74c3146d45a444f24234dca68e399a18474d8fbdae19d5dd",
- "format": 1
- },
- {
- "name": "plugins/modules/database/misc/riak.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e4c8bf321e77871edc3c0a5c342707a50e9c2571fca0ab5bfd8197c682a28b80",
- "format": 1
- },
- {
- "name": "plugins/modules/database/mssql",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/modules/database/mssql/mssql_db.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "10836be6d1f0c2d46a5ad956f66a98f0ee983de1660c462d3220d377a14ce6c2",
- "format": 1
- },
- {
- "name": "plugins/modules/database/mssql/mssql_script.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "fce6238160aaf08763818017d8bd5a211bf2dd8c478daecaa0584166011d58b6",
- "format": 1
- },
- {
- "name": "plugins/modules/database/saphana",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/modules/database/saphana/hana_query.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f0503130e11a7444e652e67b08fce9b7ae64fe7e14b201857822558538274387",
- "format": 1
- },
- {
- "name": "plugins/modules/database/vertica",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/modules/database/vertica/vertica_configuration.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ff26d24f57fbea1fcf16e64ce0eff1417624bcf5224da566422a6086512a8c19",
- "format": 1
- },
- {
- "name": "plugins/modules/database/vertica/vertica_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5f270fb5c6863524079c13320d7816bb446f48b485e5fda83fba3d76183a70a9",
- "format": 1
- },
- {
- "name": "plugins/modules/database/vertica/vertica_role.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4127075612f26e1b253766e24f5976861a9f3a985cdfc0150c46bccf394f7ba0",
- "format": 1
- },
- {
- "name": "plugins/modules/database/vertica/vertica_schema.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "69e66027dd2f802db9e894e4e45ba61e8f7324d0439807f06f1e0766508e371c",
- "format": 1
- },
- {
- "name": "plugins/modules/database/vertica/vertica_user.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d9c75983531caeba4f82346be16d82e759af99ea6ab5e55253b68cce5919e394",
- "format": 1
- },
- {
- "name": "plugins/modules/files",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/modules/files/archive.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3a0715d0aae4143b1f42dc73f560afbfa85782c37ef1645840e27400da7534d3",
- "format": 1
- },
- {
- "name": "plugins/modules/files/filesize.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "181ff76460418648e0b4dd3906d3d7699eb7ebe08eb2b532aa57a295ac06237d",
- "format": 1
- },
- {
- "name": "plugins/modules/files/ini_file.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ca49a57202bf72b8b079bbbcf5cfd3e33e530e549bd1ca1626f328a11b8b2839",
- "format": 1
- },
- {
- "name": "plugins/modules/files/iso_create.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e22d344094cca4e10a77f281172b99e2ff51c71d16f63db2088d4cb5cca1dcc0",
- "format": 1
- },
- {
- "name": "plugins/modules/files/iso_extract.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "45e148bea9a28b93070734fe860f594c56b645deecd5799fcea67e8ac6c8d0e2",
- "format": 1
- },
- {
- "name": "plugins/modules/files/read_csv.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d62a1f5b65ca81a1ba775829d7adc5e175a776de15e544cf85ea321ded35c145",
- "format": 1
- },
- {
- "name": "plugins/modules/files/sapcar_extract.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "fd7fec63a0695a033b2b637499b1f6ace8dd36bd9656f912632260dbc04ae88d",
- "format": 1
- },
- {
- "name": "plugins/modules/files/xattr.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7e921b2dae03b00730009593599edb959ad3ff62419caeb3cbeaecdd9be9f2c2",
- "format": 1
- },
- {
- "name": "plugins/modules/files/xml.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "20c53e6a1125b9d310540e60133de640668297ff31b91842bdd659ab0155f688",
- "format": 1
- },
- {
- "name": "plugins/modules/identity",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/modules/identity/ipa",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/modules/identity/ipa/ipa_config.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8338f390c1e9ac774c095ada6731502c1280e30b01bef293a6651ad54d0bfe8b",
- "format": 1
- },
- {
- "name": "plugins/modules/identity/ipa/ipa_dnsrecord.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "88fd68dcfd0725e575ce7fac94cb8eb9c74024e83bb0eb5dddec34d568725ebd",
- "format": 1
- },
- {
- "name": "plugins/modules/identity/ipa/ipa_dnszone.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9944ce41cae935b07410a1a482d2d4cd1c6f07f7060a360e6888e67992075a36",
- "format": 1
- },
- {
- "name": "plugins/modules/identity/ipa/ipa_group.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "70c065752e9e80713862f8fb3fb85f60219ac80d97a49139288bf6dd335ad168",
- "format": 1
- },
- {
- "name": "plugins/modules/identity/ipa/ipa_hbacrule.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8350663990ec7b9b46879f317760e64e9eb9ad080170f8a3ab66f26022623cd5",
- "format": 1
- },
- {
- "name": "plugins/modules/identity/ipa/ipa_host.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1672d2a59433c0c823dde1d227c7d78caaf492f981d55c6333ba950ba298907c",
- "format": 1
- },
- {
- "name": "plugins/modules/identity/ipa/ipa_hostgroup.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ae6569872367a3b15727facea24ff4322cdf35512b1dcd8c4889997943eeb1d8",
- "format": 1
- },
- {
- "name": "plugins/modules/identity/ipa/ipa_otpconfig.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "dcd17661ce19b040683bbecd506bdb2ec5ed2909c20d71c0a814bb4f05fee345",
- "format": 1
- },
- {
- "name": "plugins/modules/identity/ipa/ipa_otptoken.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ffaa1a58c973d8794d9a1797bd75bccbae783699e1ea87d4bbb7b3ed434d72d4",
- "format": 1
- },
- {
- "name": "plugins/modules/identity/ipa/ipa_pwpolicy.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "91f450bc4c6329e67cdf920e7f8499ffb7d27975b0a548ae2110354ed5e2e281",
- "format": 1
- },
- {
- "name": "plugins/modules/identity/ipa/ipa_role.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "24e469a9d45178e0fbdfb4635f525640cd1033ec559f45978e4ba7cc42fb95c6",
- "format": 1
- },
- {
- "name": "plugins/modules/identity/ipa/ipa_service.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3177e872cdf023c8a7e8bd65bd09e2ac102b2c3565c40ee5dc9d8c0fd8ddfcd6",
- "format": 1
- },
- {
- "name": "plugins/modules/identity/ipa/ipa_subca.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "932c8bd910f72a6fd20831704f96358bfd3b96e94ff8346a09a5c401a27087b8",
- "format": 1
- },
- {
- "name": "plugins/modules/identity/ipa/ipa_sudocmd.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "58d95fc267fc9d319ff05df6aaab1fb39df187d48bed52d497d92a30c54750ff",
- "format": 1
- },
- {
- "name": "plugins/modules/identity/ipa/ipa_sudocmdgroup.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a8fbc39a66b0356ec18f8468789e6d4ffb5a1fae4f0e6d68e8837821d2c138f9",
- "format": 1
- },
- {
- "name": "plugins/modules/identity/ipa/ipa_sudorule.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "15ee194ba2afa0982721aed91fdc69f93aee33b45af426efea615e3a03016f51",
- "format": 1
- },
- {
- "name": "plugins/modules/identity/ipa/ipa_user.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "97c135b60e1aca5fc78d7af59cbf5f5dbe14b0ccd93951bc10450698596c1aee",
- "format": 1
- },
- {
- "name": "plugins/modules/identity/ipa/ipa_vault.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d2ee238e7dab861eec17312d74cd513b493ec69b41e0d225501c8668d61837d2",
- "format": 1
- },
- {
- "name": "plugins/modules/identity/keycloak",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/modules/identity/keycloak/keycloak_authentication.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9c90b1d14c16a6a61e114fcf81cecc8a37c0205d45328b3a2d37e4c26f89bbd1",
- "format": 1
- },
- {
- "name": "plugins/modules/identity/keycloak/keycloak_client.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6afcc0997e09859e999b6988fc8313c2b6ab6881593c32202caffb9a00d4e8d9",
- "format": 1
- },
- {
- "name": "plugins/modules/identity/keycloak/keycloak_client_rolemapping.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "591f181bff4630f8102b105189ff5b3a13de126520d1d28def344d175527979b",
- "format": 1
- },
- {
- "name": "plugins/modules/identity/keycloak/keycloak_clientscope.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5afc1453d8f5360849ee0c3290c0c838f0aada90e1812928e77a1b1e7a5ffd18",
- "format": 1
- },
- {
- "name": "plugins/modules/identity/keycloak/keycloak_clienttemplate.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c950ef71abd6035f3861bc568f993b414bf1a24e163c7f486ae529ac5a92cb24",
- "format": 1
- },
- {
- "name": "plugins/modules/identity/keycloak/keycloak_group.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "49d81d24c71674584f1a762d4db1f73d7a13ba78fc367f3961e6e2cafe0c5329",
- "format": 1
- },
- {
- "name": "plugins/modules/identity/keycloak/keycloak_identity_provider.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "2d458b33b61e2972f529be3fc2b9818bc0bb9511fd2ad1833b8d0ee11032261e",
- "format": 1
- },
- {
- "name": "plugins/modules/identity/keycloak/keycloak_realm.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6ddd98908cb2d26b7a3627e563b5e8b26335e23d6f8cb7d4675399dc891dd19a",
- "format": 1
- },
- {
- "name": "plugins/modules/identity/keycloak/keycloak_realm_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "dd2ffd0fbe413e17ef575a432a2ce8d251d3d634f5dcaaa0b70dfd20d2ba22b1",
- "format": 1
- },
- {
- "name": "plugins/modules/identity/keycloak/keycloak_role.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ad5b8b8c78cf44c6309e19858709eea202cb2a8f20f27e85fc3ea9260bd1b80a",
- "format": 1
- },
- {
- "name": "plugins/modules/identity/keycloak/keycloak_user_federation.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "100992e28832d1fea678013004dbc8400871bba27af2426c2f240b0eaf4da03e",
- "format": 1
- },
- {
- "name": "plugins/modules/identity/opendj",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/modules/identity/opendj/opendj_backendprop.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e45d6e5a6145f58dec874da17714d239170c25aa3d6b6bed4e7ab5d45aa92e9f",
- "format": 1
- },
- {
- "name": "plugins/modules/identity/onepassword_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d0e2a34b5efebec54d9dce104527972c13fce6c7e04ef25220a8073f4d385d35",
- "format": 1
- },
- {
- "name": "plugins/modules/monitoring",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/modules/monitoring/datadog",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/modules/monitoring/datadog/datadog_downtime.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c4671fae964f84c50e802b97fc64b2fa39173f787741887a6772d6a300184b69",
- "format": 1
- },
- {
- "name": "plugins/modules/monitoring/datadog/datadog_event.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "203ee66689572ae405f692c6a34b24d12da75ef835feaf512ee25f179e204077",
- "format": 1
- },
- {
- "name": "plugins/modules/monitoring/datadog/datadog_monitor.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6c1c03834a375f842171002ac31ef4204c4830eb41283263b954704e23353d66",
- "format": 1
- },
- {
- "name": "plugins/modules/monitoring/sensu",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/modules/monitoring/sensu/sensu_check.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "15aa5b61a60a0c812caf893e14c76f55150fa535edbba58a698fa0b07a95687b",
- "format": 1
- },
- {
- "name": "plugins/modules/monitoring/sensu/sensu_client.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "658c2f425bd755eca7ff3317d9bc4ae20ab2d4650b8659b9846455a4cf650e84",
- "format": 1
- },
- {
- "name": "plugins/modules/monitoring/sensu/sensu_handler.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d95a2dcc85c1c9ccb51ef8cd0f6412a841db023dfd3412b47bd8aad17e5608fe",
- "format": 1
- },
- {
- "name": "plugins/modules/monitoring/sensu/sensu_silence.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ae9e6d8b69a746cf8e985499ed73c177abb02fdd13bbd04a501a8f76fff96fbc",
- "format": 1
- },
- {
- "name": "plugins/modules/monitoring/sensu/sensu_subscription.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "530a5fd15a37728a1fd346f68300ecc4fcf28904c1cf3663875006514f0db31b",
- "format": 1
- },
- {
- "name": "plugins/modules/monitoring/airbrake_deployment.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6aa925fba8833cbaa4a23775684646db31a7f1410c4688392ced89db20bbcade",
- "format": 1
- },
- {
- "name": "plugins/modules/monitoring/bigpanda.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "dcc88a1f79d5f53d3fe5e69d911a01177f063a9aa52428c22b4564d306f35ec4",
- "format": 1
- },
- {
- "name": "plugins/modules/monitoring/circonus_annotation.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "57172616325c7ece221ed3f154e59473f1bfe52c802dcaf0fe0f870133f185b8",
- "format": 1
- },
- {
- "name": "plugins/modules/monitoring/honeybadger_deployment.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "291189d8cb646f5837e39daceeebfd8e54b4f806430deea58c4d54eef50ab709",
- "format": 1
- },
- {
- "name": "plugins/modules/monitoring/icinga2_feature.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "770edfacd0187f36c9bc94fc88df9fbe51dc29ae1dab5065dbcbd0b0043a089d",
- "format": 1
- },
- {
- "name": "plugins/modules/monitoring/icinga2_host.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "46b696ade815c4a19e928de8ca0ecdcfe20754bf55cd1f5ace8554daaded778c",
- "format": 1
- },
- {
- "name": "plugins/modules/monitoring/librato_annotation.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d9f41d406bfe62d78ad1a042c78019c6fd4df50632213dd5a2d619a2e2bcc1ba",
- "format": 1
- },
- {
- "name": "plugins/modules/monitoring/logentries.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "39eda48181ea6b93f08876a2f9db6b3c22693d848dbb07d6f6592a8adda50152",
- "format": 1
- },
- {
- "name": "plugins/modules/monitoring/logstash_plugin.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1d84f5ccd70f2dfdfb0f306ed675920972d332cb07b9d1f7997ee9eb16b6dd0d",
- "format": 1
- },
- {
- "name": "plugins/modules/monitoring/monit.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f0e631c78c8748e568fbc1624ac2831861087b07f88cac56cd995602aeb3fb89",
- "format": 1
- },
- {
- "name": "plugins/modules/monitoring/nagios.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8f3d329e518de7d3efb7cc6b8d96dd17f420a22134f61012b605e579dd365a7e",
- "format": 1
- },
- {
- "name": "plugins/modules/monitoring/newrelic_deployment.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5bab501cf9754d7a6c46ae2977fec718592d45efae4d4cd5a29652e6f76bf33d",
- "format": 1
- },
- {
- "name": "plugins/modules/monitoring/pagerduty.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "cafe39cf6372187f9c3ab1aa1caedbb31e329474f46662be6dab7247c8db3e10",
- "format": 1
- },
- {
- "name": "plugins/modules/monitoring/pagerduty_alert.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c816f9a62a0c0ba8c520986f4918945877a7e214de0693da2b444e3550a79419",
- "format": 1
- },
- {
- "name": "plugins/modules/monitoring/pagerduty_change.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7f8b9d10f9edd7c2a7c896a660f920faa975d680ed799eb738ec7277205e748a",
- "format": 1
- },
- {
- "name": "plugins/modules/monitoring/pagerduty_user.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "afe68c52a1fee0a441f79308f3e3f8fb296d9e5193bf74cb10b7a611e2a90c5e",
- "format": 1
- },
- {
- "name": "plugins/modules/monitoring/pingdom.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "19b5785687a9151584a01ce49b9321d1cb4f4fb9a105e8c53a6e10654b1a38ab",
- "format": 1
- },
- {
- "name": "plugins/modules/monitoring/rollbar_deployment.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d281b9e5f08730d58c9aac003d90b45151f9819eb871dd900e63ab3d882f5998",
- "format": 1
- },
- {
- "name": "plugins/modules/monitoring/spectrum_device.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "883564d265cd219779f52beb177c1eee686445277aec016a0000a9734bb3f426",
- "format": 1
- },
- {
- "name": "plugins/modules/monitoring/spectrum_model_attrs.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a67e3c3ee88a04add9cd67e38778c14b56e9dec145c843f4cbafa550fd9851a9",
- "format": 1
- },
- {
- "name": "plugins/modules/monitoring/stackdriver.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "2be5529a5b6f3c9366af6e422fafeea193922831655edd3bf7f7d98c440fb506",
- "format": 1
- },
- {
- "name": "plugins/modules/monitoring/statsd.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "792e64a34b4d66ef704505a6464ab5d809822c2cf4277662559b3257b023f903",
- "format": 1
- },
- {
- "name": "plugins/modules/monitoring/statusio_maintenance.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f86b31e7026fa92e312f3196ff270441d9fe75a5e67886bcc1b8c9e3e8d12459",
- "format": 1
- },
- {
- "name": "plugins/modules/monitoring/uptimerobot.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b5aa626e5c790d9b21ef75af42ca78551c07e38e3539ce6dcafcd638cfa8d9ff",
- "format": 1
- },
- {
- "name": "plugins/modules/net_tools",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/modules/net_tools/infinity",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/modules/net_tools/infinity/infinity.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "639c7ed7633b97041cd61f657ec7d60d28db516cab49fac6c0cfec5a01c013de",
- "format": 1
- },
- {
- "name": "plugins/modules/net_tools/ldap",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/modules/net_tools/ldap/ldap_attrs.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "26070ca9bf3bfd37884672ad9335c2a7706298645e84bac4c259bdaab4269f73",
- "format": 1
- },
- {
- "name": "plugins/modules/net_tools/ldap/ldap_entry.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7c1beee28d7661cce71496558a7a72f3afc3450e92bd5da44c5561192bf34853",
- "format": 1
- },
- {
- "name": "plugins/modules/net_tools/ldap/ldap_passwd.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6ba81db2b15e61479f3621ea0f9c1ee360a6938388349c842ee7cc39d4affaac",
- "format": 1
- },
- {
- "name": "plugins/modules/net_tools/ldap/ldap_search.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "27ace47cfda1f029f3fd0f87e80d19d4170df442a2da819adaf29c169e86c933",
- "format": 1
- },
- {
- "name": "plugins/modules/net_tools/pritunl",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/modules/net_tools/pritunl/pritunl_org.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "200240d97abc57f33f1a19342dac1cc7586a35fedb314cc23770567f5af6a5be",
- "format": 1
- },
- {
- "name": "plugins/modules/net_tools/pritunl/pritunl_org_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6b8662b1c14487caf1366ef5e99c84e1b5baeb07f1c7d28d23207a1f3d3c46a7",
- "format": 1
- },
- {
- "name": "plugins/modules/net_tools/pritunl/pritunl_user.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "fa3c63e2d3575ce83371962f14da45413042adcb058eece23edb26b80e4337f5",
- "format": 1
- },
- {
- "name": "plugins/modules/net_tools/pritunl/pritunl_user_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "58e441115653a6326381d3d25bfd37d2a73c52624a67c8432a886baf4ed873dc",
- "format": 1
- },
- {
- "name": "plugins/modules/net_tools/cloudflare_dns.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "92ca2752e2212e77e6cc3a089a6a72f2a20983ebed40c8edf0e1ceaf18ace10a",
- "format": 1
- },
- {
- "name": "plugins/modules/net_tools/dnsimple.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0dbb97d863fd4a2fff967c39ea1ea12c18f525db25090b6de23239a7ee1e859e",
- "format": 1
- },
- {
- "name": "plugins/modules/net_tools/dnsimple_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "cd640688d78766e01ab5ff644b82807ee3af3114a8195a482a7f8a6773a32d64",
- "format": 1
- },
- {
- "name": "plugins/modules/net_tools/dnsmadeeasy.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a4e6ee3395aa9b100b5f9e0e66bb721bcf9688822833ca3f821d977027961c66",
- "format": 1
- },
- {
- "name": "plugins/modules/net_tools/gandi_livedns.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "93cbd36bb0cb57ab866445984eec096389e81449ede51e141b22284eada70326",
- "format": 1
- },
- {
- "name": "plugins/modules/net_tools/haproxy.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e406159197e286963c9b16223af8602f7347cb22dc6f02345512b8ab2e1ddc38",
- "format": 1
- },
- {
- "name": "plugins/modules/net_tools/ip_netns.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7703c45b7a46aea0d992130cafc0922dc74d926266b8f908adc15c6eef1cfa29",
- "format": 1
- },
- {
- "name": "plugins/modules/net_tools/ipify_facts.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a3cfe7e782b99e108e034ad45b38f3a686bd057c13a405e13b4082c9d4655ba8",
- "format": 1
- },
- {
- "name": "plugins/modules/net_tools/ipinfoio_facts.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ffefdf9402a767ea1aa17675b8be1d868d68e71ef5292b26ea0266a856914208",
- "format": 1
- },
- {
- "name": "plugins/modules/net_tools/ipwcli_dns.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "27f69f073ce4bd49b82bee81a74f81650a89517936b723a1641f203c281ac406",
- "format": 1
- },
- {
- "name": "plugins/modules/net_tools/lldp.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0bebe90d2f24144019108f71e7dedb4ed60ec93abe5e96fce73196192de34afa",
- "format": 1
- },
- {
- "name": "plugins/modules/net_tools/netcup_dns.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "17d6af51c3f484d8415565c30657315387fe7b669e3f7646aa1f5b9ffa444619",
- "format": 1
- },
- {
- "name": "plugins/modules/net_tools/nmcli.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e585180782651197b35c000a62b28c94f599beea53c963b4b44a4a4733b9e833",
- "format": 1
- },
- {
- "name": "plugins/modules/net_tools/nsupdate.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3ff24f4b701c08dd89733f207803d8e05e37b0ea0d40ea00f3c2b406c94eddb7",
- "format": 1
- },
- {
- "name": "plugins/modules/net_tools/omapi_host.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "32824ddf8d839bdad9decf1161bcee7301af665604be924c98b3378e13315e12",
- "format": 1
- },
- {
- "name": "plugins/modules/net_tools/snmp_facts.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "76246be2be66921ccb940983d25eef4bf5b8cb2f2b96b8bb3f9971bda482ee68",
- "format": 1
- },
- {
- "name": "plugins/modules/notification",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/modules/notification/cisco_spark.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9b91160d8b53c538dbdeeb45a5584658fcd1a4c57f43ba8a3237a91860a99b02",
- "format": 1
- },
- {
- "name": "plugins/modules/notification/bearychat.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8f224a3485783e66fbde1636e5131e561fd1a9006ffe2ec5d24188c07736f5c8",
- "format": 1
- },
- {
- "name": "plugins/modules/notification/campfire.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d22a3da654653ddb964eb55db9164c254860f4430dbe8b505b6945f220294bea",
- "format": 1
- },
- {
- "name": "plugins/modules/notification/catapult.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f1bc195bce4b7de9e4e5c612fba7c422e104af61e77d79860c7dfa69b8b0f15e",
- "format": 1
- },
- {
- "name": "plugins/modules/notification/cisco_webex.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9b91160d8b53c538dbdeeb45a5584658fcd1a4c57f43ba8a3237a91860a99b02",
- "format": 1
- },
- {
- "name": "plugins/modules/notification/discord.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4526e01b8b1989fa6bd10ad53702eb0115d7e9d213caa2ddca59d86b521af84d",
- "format": 1
- },
- {
- "name": "plugins/modules/notification/flowdock.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c50deeb4589cfd2ae9055e2ca708acceaf41f8c4e705a2f3c84bc4d5093bda9e",
- "format": 1
- },
- {
- "name": "plugins/modules/notification/grove.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b743647c9e91e766f9d75ca332fce7f1ee2d53f1a60c25e30aa1da8c54fc42fd",
- "format": 1
- },
- {
- "name": "plugins/modules/notification/hipchat.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "46ca51483cbd2b779fba4a7a938d4b2e4088eab98423a196588dbf5c83287e90",
- "format": 1
- },
- {
- "name": "plugins/modules/notification/irc.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5056a0944304be0cb4585231a68496ecfc2df86c3013ba1b398a17d73ece48c9",
- "format": 1
- },
- {
- "name": "plugins/modules/notification/jabber.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "38e25af68e39cb333fe7d46308e6798e9884c5df4feb3d99a9b5c55e8a264709",
- "format": 1
- },
- {
- "name": "plugins/modules/notification/logentries_msg.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "34982c5c0e9aef4d724a068cc3bbb34df2d7e9757d7d2ed620990124d64b9a84",
- "format": 1
- },
- {
- "name": "plugins/modules/notification/mail.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d321469472ef8dbd1a0c0c06b67c4213df7a11d487ae18b8962ab1ce7302d36e",
- "format": 1
- },
- {
- "name": "plugins/modules/notification/matrix.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "49291a2a57c72bea087e2afffade0f7f083deb196f8e32dd6d79955bb5b6116a",
- "format": 1
- },
- {
- "name": "plugins/modules/notification/mattermost.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e4ca0cd2ff4e27e91ffa8542531dd77413443690721b78e468d723e3c85278db",
- "format": 1
- },
- {
- "name": "plugins/modules/notification/mqtt.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "dc3caa21d09f3103a4c21cb7719ed69522760f9221b536e79ad9f9cc52470d8a",
- "format": 1
- },
- {
- "name": "plugins/modules/notification/nexmo.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "944a2d977cdaf55b8c53861b2ac13ba4808e3e49429be8dea75b38ec028d2b18",
- "format": 1
- },
- {
- "name": "plugins/modules/notification/office_365_connector_card.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ca2802d019e153833f903a044a08c233555cc5e7476446c6df780b23995bd26a",
- "format": 1
- },
- {
- "name": "plugins/modules/notification/pushbullet.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0763b7e2415a71cd93764b56b5a4f8e07431b19f657cdfe5f59b1e8c63b8ddc4",
- "format": 1
- },
- {
- "name": "plugins/modules/notification/pushover.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8d4b6f7686646e0d44a7ad63811b8c1f69927317c2ce8cea4ff855027355c219",
- "format": 1
- },
- {
- "name": "plugins/modules/notification/rocketchat.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "952dbea3dbfd46a029b9ad19b7a5f3d7659df608a9346f067563fd98f9e8ce65",
- "format": 1
- },
- {
- "name": "plugins/modules/notification/say.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9427eced754de74bbb015098444c4cee334620980bcf62c4c6f7e687475515e6",
- "format": 1
- },
- {
- "name": "plugins/modules/notification/sendgrid.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "74a1a142ea29a5519ab4fe938192638ae79b54f40a957dbb7d2b4e3ac4474b87",
- "format": 1
- },
- {
- "name": "plugins/modules/notification/slack.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e96ef97177e41d87862c20fe5daa14f60230671ba34309b83477fec933c4238c",
- "format": 1
- },
- {
- "name": "plugins/modules/notification/syslogger.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "abcf172195a8f6b74396dd273e2d9926c0c6bbba773f5949f9565b2cd2aaea07",
- "format": 1
- },
- {
- "name": "plugins/modules/notification/telegram.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "166e6d4a5b832d22b024dc9395780a807341ebbb6d5a78726dd40d9f5214fbbb",
- "format": 1
- },
- {
- "name": "plugins/modules/notification/twilio.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "bc5913224c8de906d7739278662d6efa7055a88ecc24dd2e568a2c33065b0e23",
- "format": 1
- },
- {
- "name": "plugins/modules/notification/typetalk.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8b0c5a2c18ec40da946914f93655f144d608fcc4737cca258642c44d69245b42",
- "format": 1
- },
- {
- "name": "plugins/modules/packaging",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/modules/packaging/language",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/modules/packaging/language/ansible_galaxy_install.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f7662c68a2cd0beb854eb1cb47411a4b5bf7004acfa0cd101898aba88c0afd6a",
- "format": 1
- },
- {
- "name": "plugins/modules/packaging/language/bower.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1469648267092280b084c97ff84b89cd29656ae25f5c12b23d6a34d6bd21f214",
- "format": 1
- },
- {
- "name": "plugins/modules/packaging/language/bundler.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b8afe9744c027374c7bb7fce88ed55069f27cbf040447a5f0f04a04b9053012b",
- "format": 1
- },
- {
- "name": "plugins/modules/packaging/language/cargo.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "bba289036c8d3d677f768224f9eed512badd2d001089ab783be6f5a8f5e868a5",
- "format": 1
- },
- {
- "name": "plugins/modules/packaging/language/composer.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7f2740d5b0c235ca97fd503e4441274bc748d4c5b0dcbe3e227831599f573734",
- "format": 1
- },
- {
- "name": "plugins/modules/packaging/language/cpanm.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "869b73609aa1f1ba8f2d33ccfed04eec450bcdcf31b710526f2d043aa97c0ea4",
- "format": 1
- },
- {
- "name": "plugins/modules/packaging/language/easy_install.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a11e3e151595b9b729431aa2a4be23edd5d228870b3876cf95160d4552e2ee14",
- "format": 1
- },
- {
- "name": "plugins/modules/packaging/language/gem.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "2658234014600b059931be2658b92731a7b317a49ad8b87b7a90f4021d2b92af",
- "format": 1
- },
- {
- "name": "plugins/modules/packaging/language/maven_artifact.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9093a95b922bf4c93af8d371f23f6ec650bc04cb139cbbb3ade69d50b050d5d6",
- "format": 1
- },
- {
- "name": "plugins/modules/packaging/language/npm.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "2ad403903ddfdb432279a0c91640d2bccc6f9ff4fc017f865f144d0cf12c3fa7",
- "format": 1
- },
- {
- "name": "plugins/modules/packaging/language/pear.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0f80210e950fbe7d6db548f027713aec26864be6c579179f44128815410597bf",
- "format": 1
- },
- {
- "name": "plugins/modules/packaging/language/pip_package_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1b88e00fa70e9bd96bf141c4d44a7a282b02009c43faff54a4d9d54c69d137ac",
- "format": 1
- },
- {
- "name": "plugins/modules/packaging/language/pipx.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "57df11bbbf4ae34e6eb934afc6808286721268d74540379d1ab812fadbac296d",
- "format": 1
- },
- {
- "name": "plugins/modules/packaging/language/yarn.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d932d7644fb9f5e4a333c1e402b68b485a16e3d14883df4b8f9a1f39442d077d",
- "format": 1
- },
- {
- "name": "plugins/modules/packaging/os",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/modules/packaging/os/apk.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "066665788179692795453db9675607e9c400f214f80382fa1646c0a5c4e0b709",
- "format": 1
- },
- {
- "name": "plugins/modules/packaging/os/apt_repo.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a92bdffb40fa2bc8fc8e6954573fccec4a94a8a23884dcee4f680ddec78880e2",
- "format": 1
- },
- {
- "name": "plugins/modules/packaging/os/apt_rpm.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e8b8b0d6893fe18ae148144e7ce1e816a07cd760ef60511dcb230c0559b4e433",
- "format": 1
- },
- {
- "name": "plugins/modules/packaging/os/copr.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ee22d4a8ae70df45b23c47432192ba596568b8ff2ddb225c7c7908b08f316c5d",
- "format": 1
- },
- {
- "name": "plugins/modules/packaging/os/dnf_versionlock.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "bb392c313d8a04369b834a4320c70110311fc1feaef6d58852659dacc682d6d2",
- "format": 1
- },
- {
- "name": "plugins/modules/packaging/os/flatpak.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "77856cfeb650ab5930a8af1eacf9b87d3c654c0041c713daf6b3f6fe85c4a9ea",
- "format": 1
- },
- {
- "name": "plugins/modules/packaging/os/flatpak_remote.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0694a7aeb1878ffe91f91625b645d9fb6391dae6e57bff17dd106c83c6e9505a",
- "format": 1
- },
- {
- "name": "plugins/modules/packaging/os/homebrew.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "066bf7706d89a85f64b0cf890adc84f4ec37b23291b883c12c73e5b2b80a5c03",
- "format": 1
- },
- {
- "name": "plugins/modules/packaging/os/homebrew_cask.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "2512568adbfbca7a18574b57f68cdf599ea10b5deabab628182ad98c4a71836f",
- "format": 1
- },
- {
- "name": "plugins/modules/packaging/os/homebrew_tap.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f1d8e1a616a2527b3677f208677e9a1261330777aba1acffa03f093d84f2dc84",
- "format": 1
- },
- {
- "name": "plugins/modules/packaging/os/installp.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1360ed768c621c482767cb1994d96e93827b55a20da4d3f2cbcfbdb5278f9c18",
- "format": 1
- },
- {
- "name": "plugins/modules/packaging/os/layman.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "836e062d867c45bb523e37edfc3cf6b6b9b94700d994f1755d78b706cf3f6bd0",
- "format": 1
- },
- {
- "name": "plugins/modules/packaging/os/macports.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "dbd71696e4f6e58f8d67117c301c32ee210e6765f6b4f7a2a966b64cba91cd16",
- "format": 1
- },
- {
- "name": "plugins/modules/packaging/os/mas.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7346067aa024a97e1fa6c3b2bc55a6eb7469b2eea9c8b69daf179232210248dc",
- "format": 1
- },
- {
- "name": "plugins/modules/packaging/os/openbsd_pkg.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9f9274e283af531ea1604d2231d456b443ca118638c24387c285e51af75bb475",
- "format": 1
- },
- {
- "name": "plugins/modules/packaging/os/opkg.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e456e9b6d5a6760dd77954c9c35a50524344c6f381b69a5b1e278a2b51fff048",
- "format": 1
- },
- {
- "name": "plugins/modules/packaging/os/pacman.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0632694bbe9620826447c3841d4581e718395b052c324c821ef261662980d898",
- "format": 1
- },
- {
- "name": "plugins/modules/packaging/os/pacman_key.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ed012d9d887cdf7f21196040f817b2831ee72056f9ce9a9cf52b622547a760c1",
- "format": 1
- },
- {
- "name": "plugins/modules/packaging/os/pkg5.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e36ce1069607e0608509fc036fb6454af0ede52c3682cb43dea44eedab746729",
- "format": 1
- },
- {
- "name": "plugins/modules/packaging/os/pkg5_publisher.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1197f2086a98fe014717bdf3396a4ab17ce600b9867897b9c9a5464b34f626b6",
- "format": 1
- },
- {
- "name": "plugins/modules/packaging/os/pkgin.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "dcb2759ad7a124939de46ccd21103b3a97d5a9dc027530532a9570cd039eb0d8",
- "format": 1
- },
- {
- "name": "plugins/modules/packaging/os/pkgng.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e7db8e217bcf87e0eb62e61a650f03a800e323132b8d9c25beaa244f77299510",
- "format": 1
- },
- {
- "name": "plugins/modules/packaging/os/pkgutil.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "be59c5c6e33732eee6662cca01a92d47c6391221783a8e13d3f3f6fe81c2116a",
- "format": 1
- },
- {
- "name": "plugins/modules/packaging/os/portage.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ef869657263254c0fe13e4b160bbf16ce1f935b79d1c65c522e528f1faff98c2",
- "format": 1
- },
- {
- "name": "plugins/modules/packaging/os/portinstall.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7f8c255fa388d228c0c2b3e18296ab1f8d9e0ea669241099f8004ec8989b23b2",
- "format": 1
- },
- {
- "name": "plugins/modules/packaging/os/pulp_repo.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "27a10386274c0e0ce4b1898686fadea5811dfd7ad45b5daed757d360a70ba2e0",
- "format": 1
- },
- {
- "name": "plugins/modules/packaging/os/redhat_subscription.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "69c5a89501f2ec7d9cc4dc7ec38941bbbdaa5548d60121bd8734891f5c210d29",
- "format": 1
- },
- {
- "name": "plugins/modules/packaging/os/rhn_channel.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6753c4f100c54548d9a34cc55191a1dff35e789e3ad60a476eabcb85d6e3a71f",
- "format": 1
- },
- {
- "name": "plugins/modules/packaging/os/rhn_register.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3fff78a3b4e686e3e760bbf42691db83540ef06b7d88f28b57223a09f581485d",
- "format": 1
- },
- {
- "name": "plugins/modules/packaging/os/rhsm_release.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a20574f661bf3bcd1bdd02688ed4112eb7a2b35689427e70f5e455ddad7ec1d4",
- "format": 1
- },
- {
- "name": "plugins/modules/packaging/os/rhsm_repository.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c868fab9daf9cd10efb1b01f613cdb85848f37596464a67fe777b68a681b47b4",
- "format": 1
- },
- {
- "name": "plugins/modules/packaging/os/rpm_ostree_pkg.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e0538d35acc1c91abd3bdfa76310252f9782693e7328722ca04228100cebfb76",
- "format": 1
- },
- {
- "name": "plugins/modules/packaging/os/slackpkg.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "65d52caf009ae8dc698a49d4fef5ac6644954a6c46a68fd961b0e690ddfdc141",
- "format": 1
- },
- {
- "name": "plugins/modules/packaging/os/snap.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "513ff327c2a09f42eaa5a945f0b72fe2e6e17bbdc5491b6875c04eaa8f846b48",
- "format": 1
- },
- {
- "name": "plugins/modules/packaging/os/snap_alias.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b23129de9e88a07cf2c3d5012cb32ec105622e7dfcdfbcdaf694dcdf92cf518b",
- "format": 1
- },
- {
- "name": "plugins/modules/packaging/os/sorcery.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ed8fec8e6c5357a8e0a4d7cf020c253a574f8c239f3371b9604beb90cb0975db",
- "format": 1
- },
- {
- "name": "plugins/modules/packaging/os/svr4pkg.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e6fdff83fa4d867e28b52c26ab42377cb8b793218b68a4d538c06b923a78cfff",
- "format": 1
- },
- {
- "name": "plugins/modules/packaging/os/swdepot.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7cf596e285fbcb98e9bae8ee345d63daa2528c34fd93138d6c9afb77db2f7d8e",
- "format": 1
- },
- {
- "name": "plugins/modules/packaging/os/swupd.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8247ec718e884f51246f84426c2c50ed7a48aac0e7ef97161ce11e3aa62662fd",
- "format": 1
- },
- {
- "name": "plugins/modules/packaging/os/urpmi.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "2533a194a0b36cceeb0ec69d8586cfe12e8f4c7bdf13e22dc68c7dc9d1c8ceec",
- "format": 1
- },
- {
- "name": "plugins/modules/packaging/os/xbps.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "089f8b636b6bf7eb741857050bb8f3e105c919e705d561501bb91f9a1301af87",
- "format": 1
- },
- {
- "name": "plugins/modules/packaging/os/yum_versionlock.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e9581edf16a8ece5930e0eefd40622ee4e4b453e564d3e40adcdf949ec1257dc",
- "format": 1
- },
- {
- "name": "plugins/modules/packaging/os/zypper.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4386efd38cb4d2e6b5f6ffd4a4d66265541f6ba78547359833de537095036b1a",
- "format": 1
- },
- {
- "name": "plugins/modules/packaging/os/zypper_repository.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ef4e8074966a7a65e9b22d703beee3f2b6b7aa5b22e28123bdc18d5043f8db88",
- "format": 1
- },
- {
- "name": "plugins/modules/remote_management",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/modules/remote_management/cobbler",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/modules/remote_management/cobbler/cobbler_sync.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0a69b0d481ff28ea1a5d848fa8b80f9a07a4ccf3a50b3fd384b588d0184a31d1",
- "format": 1
- },
- {
- "name": "plugins/modules/remote_management/cobbler/cobbler_system.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b4d8ac045e7b8cfadaea593081d4e6bd815492162d6a0a105041563e593827f2",
- "format": 1
- },
- {
- "name": "plugins/modules/remote_management/hpilo",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/modules/remote_management/hpilo/hpilo_boot.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6d0d47b799f9e444207ed5b4667356cee1de57f1d2aeff137aba990ef08beedd",
- "format": 1
- },
- {
- "name": "plugins/modules/remote_management/hpilo/hpilo_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "293b316839408346f2c2c0123d90b40c8f609e82a12246c202bc3843fc811d80",
- "format": 1
- },
- {
- "name": "plugins/modules/remote_management/hpilo/hponcfg.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "cc4939e4db789e57dd8b72fa79789b5f5004b98b3a3e4e5ad2a1ab370d6ce274",
- "format": 1
- },
- {
- "name": "plugins/modules/remote_management/imc",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/modules/remote_management/imc/imc_rest.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e51c5d1375a1a9f469cfc28140144116cb29c3bfa35c459708f6ac76895340d0",
- "format": 1
- },
- {
- "name": "plugins/modules/remote_management/ipmi",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/modules/remote_management/ipmi/ipmi_boot.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "32bc6fd22d5a4705022af7af389209a8db051bd7994c24e233261bc8188234b3",
- "format": 1
- },
- {
- "name": "plugins/modules/remote_management/ipmi/ipmi_power.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ad505007f78f7588bc403a75c522ef4ff75de4b7acfdee4dfbce33aa29713e26",
- "format": 1
- },
- {
- "name": "plugins/modules/remote_management/lenovoxcc",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/modules/remote_management/lenovoxcc/xcc_redfish_command.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "457f980a1ceb9c24d26aa2b7145d26f8902c56a4cbc0ffc7ddaae24670f48741",
- "format": 1
- },
- {
- "name": "plugins/modules/remote_management/lxca",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/modules/remote_management/lxca/lxca_cmms.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "74ad7330003cfce91c50347b358bea005a2616da70aff5a757bcdd714a3f86a7",
- "format": 1
- },
- {
- "name": "plugins/modules/remote_management/lxca/lxca_nodes.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "82e905a3d21b63b40414f3ec63dcbd578743c38cf62865ddbe84a5dabb8ec622",
- "format": 1
- },
- {
- "name": "plugins/modules/remote_management/manageiq",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/modules/remote_management/manageiq/manageiq_alert_profiles.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ddbb9e06f40e750fccf055a42d03a1a80b45bd238d8d4558916c849940b73903",
- "format": 1
- },
- {
- "name": "plugins/modules/remote_management/manageiq/manageiq_alerts.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3410230671e4ca67fb49d62280309a70c8e272ed44b063aa133b9e906b5d9f74",
- "format": 1
- },
- {
- "name": "plugins/modules/remote_management/manageiq/manageiq_group.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ab64599f102c1cbc693aa6a963bfdd0890cbe5c9a556bbb95b4a085bbb354421",
- "format": 1
- },
- {
- "name": "plugins/modules/remote_management/manageiq/manageiq_policies.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "583c115fed4980ab0dd6b7beaf97b8779c5976ed5f212cea213b886f08ea2fbe",
- "format": 1
- },
- {
- "name": "plugins/modules/remote_management/manageiq/manageiq_provider.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f229203632039bdf0e89ee52305065bf2038e8d934a94ae293012da52feda470",
- "format": 1
- },
- {
- "name": "plugins/modules/remote_management/manageiq/manageiq_tags.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ace512b173524ed7af89882fe3912511f1138a58a8ef9f426c56226ce8e120fd",
- "format": 1
- },
- {
- "name": "plugins/modules/remote_management/manageiq/manageiq_tenant.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "99d5ff3a9cc80ba2cb52ac6bcdde27a41e8993d355bae1eea34bf9659e0c7cb0",
- "format": 1
- },
- {
- "name": "plugins/modules/remote_management/manageiq/manageiq_user.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c9c425603e1e88919c2d9245030f2f02c3866337aa4e81eb702dd003d45069c0",
- "format": 1
- },
- {
- "name": "plugins/modules/remote_management/oneview",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/modules/remote_management/oneview/oneview_datacenter_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "355d4c6ef338dcf618383018bb1b7a4dff56e8c01f4241a6ddb28b58fa98f4a1",
- "format": 1
- },
- {
- "name": "plugins/modules/remote_management/oneview/oneview_enclosure_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ba63e68b4e2ce3fbe7cb6e3884ce7f070f6dfdfc4f21ab8f6ccecf32bf4f55db",
- "format": 1
- },
- {
- "name": "plugins/modules/remote_management/oneview/oneview_ethernet_network.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d2d4ccac855870076ac2e5852e5aba82722d56d161317910c65f0144c9888bce",
- "format": 1
- },
- {
- "name": "plugins/modules/remote_management/oneview/oneview_ethernet_network_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b9b15514fd1fc3d8f91b83313acddc8dba8063fdc160c015ca0ac326841d3cd6",
- "format": 1
- },
- {
- "name": "plugins/modules/remote_management/oneview/oneview_fc_network.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3669b6c65a3689dae16737839dccbbe509725ae75f52c55c2bcc935decef6ebd",
- "format": 1
- },
- {
- "name": "plugins/modules/remote_management/oneview/oneview_fc_network_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8a59e9a708eb32e0bc67eca344d458f20171812bb765f54069e707817d32f3a3",
- "format": 1
- },
- {
- "name": "plugins/modules/remote_management/oneview/oneview_fcoe_network.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6afddbe7fa11896de1506c9fe82f234b36ca9640483f8c9247e698981bed83ed",
- "format": 1
- },
- {
- "name": "plugins/modules/remote_management/oneview/oneview_fcoe_network_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a89dc5f2cdc9e48ab64afda2958b7dfe0de623bd09ece5d90309f96c5c82f02a",
- "format": 1
- },
- {
- "name": "plugins/modules/remote_management/oneview/oneview_logical_interconnect_group.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d8ede8042b1abfffb2b7063e081ab962eeddc3462ba9498c5f777ba7b17aeb79",
- "format": 1
- },
- {
- "name": "plugins/modules/remote_management/oneview/oneview_logical_interconnect_group_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "2bfeeb09917fa930055ad91ab23dfcc98cbb1c638c83fb2a484326527541c902",
- "format": 1
- },
- {
- "name": "plugins/modules/remote_management/oneview/oneview_network_set.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a2d0b3c12e770373a5ae9dd4e30e20e9199dd5882cce2ea99b8e132e0d73db4d",
- "format": 1
- },
- {
- "name": "plugins/modules/remote_management/oneview/oneview_network_set_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6ae6c0631e08a394570f300600d4fc4c667e11a0c8c01b52a00b9b73e6be1824",
- "format": 1
- },
- {
- "name": "plugins/modules/remote_management/oneview/oneview_san_manager.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6f1b301a7bef55541938d21ee1b2dd59d86c8b4fdc7a7ec29c2b66f30afd0e22",
- "format": 1
- },
- {
- "name": "plugins/modules/remote_management/oneview/oneview_san_manager_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4d0cc20490ea3903961f2ee4ca7c39bae0c3f2935fd71574fa36a62700283a09",
- "format": 1
- },
- {
- "name": "plugins/modules/remote_management/redfish",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/modules/remote_management/redfish/idrac_redfish_command.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "879b3d5825eb59bc67aea7014006f58df64853f8bff388fbb2b7d0bcb67b71a7",
- "format": 1
- },
- {
- "name": "plugins/modules/remote_management/redfish/idrac_redfish_config.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "24cbee078205ddcf90266adaec93635a38384d7f3ea4db3a8e0adef7e69b05c9",
- "format": 1
- },
- {
- "name": "plugins/modules/remote_management/redfish/idrac_redfish_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "820bb9a147f15fe41bffc5567f699b0f000db2869f2ea268f8e630250d95bd42",
- "format": 1
- },
- {
- "name": "plugins/modules/remote_management/redfish/ilo_redfish_config.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d8546cfb15f05947f7c6760cb5d67928253269aa18102155f600995d3598b739",
- "format": 1
- },
- {
- "name": "plugins/modules/remote_management/redfish/ilo_redfish_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1d175b3b05e25ed30302b1ce7994099a19b07709201c864ff37f210aa7df96ac",
- "format": 1
- },
- {
- "name": "plugins/modules/remote_management/redfish/redfish_command.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "423c2bafbce9538603e607934a6c61cb94d96014b901894a750156f2c6f9134c",
- "format": 1
- },
- {
- "name": "plugins/modules/remote_management/redfish/redfish_config.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0b46c6fd282bac3a6a347c25af71a4c9eaab7a54fb019541606824c4ea167e99",
- "format": 1
- },
- {
- "name": "plugins/modules/remote_management/redfish/redfish_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5f9aeb09e5827e46c9b6b4420362d7c27d729672322a10637d66164d5341e980",
- "format": 1
- },
- {
- "name": "plugins/modules/remote_management/stacki",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/modules/remote_management/stacki/stacki_host.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "63b57ef41bb4ffad7bd5def9d9d592e3bf2aecc1b22dc66a303774f3b6b95ef7",
- "format": 1
- },
- {
- "name": "plugins/modules/remote_management/wakeonlan.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "eaedb6adc85510f03ea6424a673ef862122db281b83f75d3f66668652443fec8",
- "format": 1
- },
- {
- "name": "plugins/modules/source_control",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/modules/source_control/bitbucket",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/modules/source_control/bitbucket/bitbucket_access_key.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "36c0e727d4cf7e57a1ccb7f712ca472f3ed20a8c0b5afa656c9461d39b948ce1",
- "format": 1
- },
- {
- "name": "plugins/modules/source_control/bitbucket/bitbucket_pipeline_key_pair.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c4b8d0fe0f4ada9e881cc1e76e9365bbac7d35f0650235b9033037482d1e5670",
- "format": 1
- },
- {
- "name": "plugins/modules/source_control/bitbucket/bitbucket_pipeline_known_host.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "dd5b27ae648269aab81d3ac46036fc6288781c2a77c02db480ea66ba1bc1445c",
- "format": 1
- },
- {
- "name": "plugins/modules/source_control/bitbucket/bitbucket_pipeline_variable.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3409614c64334e483f093a3f094fab692d09aaac0db65da0225337e4db2993a0",
- "format": 1
- },
- {
- "name": "plugins/modules/source_control/github",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/modules/source_control/github/github_deploy_key.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3d942e6c9a4fc0c0b2ab2b6cfcbb2067b044956b0cc8e3a4eb8908fceeca4308",
- "format": 1
- },
- {
- "name": "plugins/modules/source_control/github/github_issue.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c71ba6cb604c76b2200e68acff20cf55e167b5fbc111aa68a6efd0b6b0573977",
- "format": 1
- },
- {
- "name": "plugins/modules/source_control/github/github_key.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "fe0c5fe85830fe7c1bfdcf99cdbc14af5366e29b04eeed1cf551092734279801",
- "format": 1
- },
- {
- "name": "plugins/modules/source_control/github/github_release.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a0feb5df29b4556ddae70b101a78da6127312803680504c61739b57b4008037c",
- "format": 1
- },
- {
- "name": "plugins/modules/source_control/github/github_repo.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "46c5064a6ffa00ff6971115414370a5e49a5dbcef106f18c16a89428e6691fe0",
- "format": 1
- },
- {
- "name": "plugins/modules/source_control/github/github_webhook.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "781a9ccef57e174ddfba6f794b147aa941b53959652a3fbfb9c38b37d4dec4a1",
- "format": 1
- },
- {
- "name": "plugins/modules/source_control/github/github_webhook_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0f2d091ba64877de90900c03df4412db8b71393e0d5a742202feda625c05398a",
- "format": 1
- },
- {
- "name": "plugins/modules/source_control/gitlab",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/modules/source_control/gitlab/gitlab_branch.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "922b6c30c67ddb2acf0d28aaa9ab16dce5b1f6ad270223ec6773ef680e35c746",
- "format": 1
- },
- {
- "name": "plugins/modules/source_control/gitlab/gitlab_deploy_key.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "43f0d1631cc651c15a935e280f31677805aae6efb6d80b95d21511b8fe4f79ea",
- "format": 1
- },
- {
- "name": "plugins/modules/source_control/gitlab/gitlab_group.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6f566f0df7ea3a6d02b4fe0e8550d06400ac926d3d6a24975582c680d3a52528",
- "format": 1
- },
- {
- "name": "plugins/modules/source_control/gitlab/gitlab_group_members.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "10e9d62d1291f8ca28d2dd9d40d67a10028713c53530f516490edfb2187d3644",
- "format": 1
- },
- {
- "name": "plugins/modules/source_control/gitlab/gitlab_group_variable.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1394fda09fbc289cf2716876d6a5463889abeb5d2ceea2915235dfbf29aa4684",
- "format": 1
- },
- {
- "name": "plugins/modules/source_control/gitlab/gitlab_hook.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "bdce5a96cd31d9444b1841eb9ee396683c70ee3eb50634d2f02c38ce07b374f6",
- "format": 1
- },
- {
- "name": "plugins/modules/source_control/gitlab/gitlab_project.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7ba5e593304a1bb3dce94dab2cc62470a892eb3a039b1e6f99a95869d59c093b",
- "format": 1
- },
- {
- "name": "plugins/modules/source_control/gitlab/gitlab_project_members.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1a3075b6dd2783cf000979cdff99bf7b4f785802ed9e6e08002f629cc1a8efa9",
- "format": 1
- },
- {
- "name": "plugins/modules/source_control/gitlab/gitlab_project_variable.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "48faf16faee67ab8516ea6b0b7052cc272208325f8c8602c2f013b4384d2eef9",
- "format": 1
- },
- {
- "name": "plugins/modules/source_control/gitlab/gitlab_protected_branch.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "95ed01ee57390473707b05542cd73dfbc4ff729c5be435222d74ec4b16502435",
- "format": 1
- },
- {
- "name": "plugins/modules/source_control/gitlab/gitlab_runner.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "63967e029ff266796082e00ef8263369f5a684b01213308f62d35be1d8c65926",
- "format": 1
- },
- {
- "name": "plugins/modules/source_control/gitlab/gitlab_user.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ff0e35d6b34eb457ba640265b41f35bb6fcf335328eb3155f6e3318f12067dd3",
- "format": 1
- },
- {
- "name": "plugins/modules/source_control/bzr.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "127a4d24fb7ecd0ae8286c7f1eb5332ca2e3217e7ac29ed85c1e814eb7cfeebb",
- "format": 1
- },
- {
- "name": "plugins/modules/source_control/git_config.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4df0f064e3f827b7af32547777bec982cf08b275708cd41bf44533b57cfefcb6",
- "format": 1
- },
- {
- "name": "plugins/modules/source_control/hg.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "142f44f349abfc23bfda7f9f2df47d160f2a97446d7d5d31749fd5eab7adab37",
- "format": 1
- },
- {
- "name": "plugins/modules/storage",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/modules/storage/emc",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/modules/storage/emc/emc_vnx_sg_member.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "bdf6c7c0da78522f40ac8678ad94e2088374f137927b412b36c5b538fd257453",
- "format": 1
- },
- {
- "name": "plugins/modules/storage/hpe3par",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/modules/storage/hpe3par/ss_3par_cpg.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "2be10ff6aa61f598720d6ca0a1668a5ec6033680223fa3d3231192f3c12006ef",
- "format": 1
- },
- {
- "name": "plugins/modules/storage/ibm",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/modules/storage/ibm/ibm_sa_domain.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "846c2e2161c51130505d8caeef87178eb8cd40b5fe42d9f9c6649b444f0d7c7c",
- "format": 1
- },
- {
- "name": "plugins/modules/storage/ibm/ibm_sa_host.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "42574cb0750d740dcbf3dc300cca235b15a22ecb00f79af5aa7818a494b60366",
- "format": 1
- },
- {
- "name": "plugins/modules/storage/ibm/ibm_sa_host_ports.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "dc5ac76115dfd50d5b8b37aa9de8c75824e6354a4aa925a171a364dd0fe60fbb",
- "format": 1
- },
- {
- "name": "plugins/modules/storage/ibm/ibm_sa_pool.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a1d51e21c6dc90ebea2e67c86200aa7c28b8451bd09c35cabdd5d53123cc1b35",
- "format": 1
- },
- {
- "name": "plugins/modules/storage/ibm/ibm_sa_vol.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "44582854ca8e702de67f555704e9d3b007ece65d723bb24536a567e9e7031757",
- "format": 1
- },
- {
- "name": "plugins/modules/storage/ibm/ibm_sa_vol_map.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7a90662d294fcc853121b02134446a6ae10c430a5caf3ebc0766de0cbba6479a",
- "format": 1
- },
- {
- "name": "plugins/modules/storage/pmem",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/modules/storage/pmem/pmem.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "87f561ffee94533db91e813e348569aa7f44c076935e43430268f62a5ead5c0d",
- "format": 1
- },
- {
- "name": "plugins/modules/storage/vexata",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/modules/storage/vexata/vexata_eg.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "fac270b3db28c9f8b6d24d299e753c80f9d251dbbdcb386a319097c17219a80d",
- "format": 1
- },
- {
- "name": "plugins/modules/storage/vexata/vexata_volume.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a6377d7306fb5a11f52aaa9a89cff909e8028a7cef71959eb6a7135ba1561d4a",
- "format": 1
- },
- {
- "name": "plugins/modules/storage/zfs",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/modules/storage/zfs/zfs.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0d5c3365e12bd96290f24b1ec13e5161e61f505d07110e03ff58195397373516",
- "format": 1
- },
- {
- "name": "plugins/modules/storage/zfs/zfs_delegate_admin.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3109f4627ebfb5190204f57294c84ad0d54197c99c3a001b1f69f5291124490f",
- "format": 1
- },
- {
- "name": "plugins/modules/storage/zfs/zfs_facts.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "18a3b38a9f47f4f5112579b819de7d664e0b55db8995743d4eac364579af5e2e",
- "format": 1
- },
- {
- "name": "plugins/modules/storage/zfs/zpool_facts.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e26beb9afe4a1cbd3b2a05eec94c61ee16b586db9985c962f09c76c15f80883c",
- "format": 1
- },
- {
- "name": "plugins/modules/system",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/modules/system/aix_devices.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "977386dee01ac51d9c885ecee657e0a24df1b5de87996f0a9c9f8c3d0605c08a",
- "format": 1
- },
- {
- "name": "plugins/modules/system/aix_filesystem.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "292ff33ccfbcaaf28dc4cd67f6b749dc6b06ae1aa72db436245d348946c19bf7",
- "format": 1
- },
- {
- "name": "plugins/modules/system/aix_inittab.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1e4b6091b24210a657d58c1767107946ecdf34f90cef0460762144b8cf6d4cd2",
- "format": 1
- },
- {
- "name": "plugins/modules/system/aix_lvg.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "633b5243b9ea9b21d80f381a9698f140586e3a39310d21fb83ef8b5aa0d350cb",
- "format": 1
- },
- {
- "name": "plugins/modules/system/aix_lvol.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "feb995da59928c227261390532e549999f7a27594f09744529878c91b72e7bea",
- "format": 1
- },
- {
- "name": "plugins/modules/system/alternatives.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "507ab83ed8cc3718318b5de58d67eb743ad0318eab406441eaefd01a5eb18dd1",
- "format": 1
- },
- {
- "name": "plugins/modules/system/awall.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "63f6d1714ac308da87c08e54b17fc2205f0bf2426d26914061074317ae835b8c",
- "format": 1
- },
- {
- "name": "plugins/modules/system/beadm.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "07a418d4d0b40c72721627f7c49bc9f2e6c780247e9f101bfa57c79bf18bbf6f",
- "format": 1
- },
- {
- "name": "plugins/modules/system/capabilities.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7d9e46ddf9acbb7caa0bf526654e9b199abf60e253a551d9f10c4e4673fd6713",
- "format": 1
- },
- {
- "name": "plugins/modules/system/cronvar.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "14583a0612a939471168bd5d59e7edac48bb01d024aa0d0fc7cdeffd0e923178",
- "format": 1
- },
- {
- "name": "plugins/modules/system/crypttab.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d020cd305a432f0da349b1243d96fba57a3290b456016dbf7480cf6ca3dd9e92",
- "format": 1
- },
- {
- "name": "plugins/modules/system/dconf.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ca342ed1e3cae2da6bc5ee31e05db30f23344f75e4c68a06f577d24ddde2347a",
- "format": 1
- },
- {
- "name": "plugins/modules/system/dpkg_divert.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "83eb8748719f999e73a1e00bddc2ad0c4fcff0da7d1771feba9e7d1402f260dc",
- "format": 1
- },
- {
- "name": "plugins/modules/system/facter.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e9dc303791af31b7355e612dcde7b32ecaa6083514c401a900c1bd6c5da5c616",
- "format": 1
- },
- {
- "name": "plugins/modules/system/filesystem.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "00db45139f32500f03fdb8b276664e856ee2bbd3e48e225d0bc5d3ab0adaedc1",
- "format": 1
- },
- {
- "name": "plugins/modules/system/gconftool2.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6e5a59c14afe686e07a8595a7f102e632ee78d2dc90749bd147e87b8906ef113",
- "format": 1
- },
- {
- "name": "plugins/modules/system/homectl.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b770717fcdd6ce98d6b74d1d050fe20ab9278e7a4d2862882afef34ed3938feb",
- "format": 1
- },
- {
- "name": "plugins/modules/system/interfaces_file.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "25e134950671398223e77965d70780612354f1f321ef3b196377b8fe734adb03",
- "format": 1
- },
- {
- "name": "plugins/modules/system/iptables_state.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "06358c739fcc70ba79d43af924c0f35a6920d8c5bc4292c14f96dd5870b8d4f7",
- "format": 1
- },
- {
- "name": "plugins/modules/system/java_cert.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5c40619fd173dfc758e1dbe6ad2083a924a6b138592fb98244b3d7a152dbbb54",
- "format": 1
- },
- {
- "name": "plugins/modules/system/java_keystore.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f2b9a344962a24cc2754aa948d60b383fbb21dfb7be36fb4cf2582fdfd896cd7",
- "format": 1
- },
- {
- "name": "plugins/modules/system/kernel_blacklist.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "22cb952a459ea253cfd9eaf5d6612dabe02cf670385d9a95e0ad8212b8496b1c",
- "format": 1
- },
- {
- "name": "plugins/modules/system/launchd.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "287f7a5a7c8d859038ca8c15e7d221a1bce7c56b02942260f135b52229e177b0",
- "format": 1
- },
- {
- "name": "plugins/modules/system/lbu.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7471d902ef679d8cc8dbeb52b2f737758d696777c83c36332214a727ab7bf1dc",
- "format": 1
- },
- {
- "name": "plugins/modules/system/listen_ports_facts.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5966c7c49a2850b1c13757899a6bd5443a30319f0b6f2628077662fd703df5b5",
- "format": 1
- },
- {
- "name": "plugins/modules/system/locale_gen.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d79413b262062855f9e4d97f7fefebbf5f18504e8d36da6496f20a0626c7b8be",
- "format": 1
- },
- {
- "name": "plugins/modules/system/lvg.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a797ac328651f2c55e0e3f4d09629095014390bd99b82971aa1fced50249177f",
- "format": 1
- },
- {
- "name": "plugins/modules/system/lvol.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "faa2fddec92f0bebc7a4536cb716748cadb99d57be46e04faf4f14cb43958e86",
- "format": 1
- },
- {
- "name": "plugins/modules/system/make.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b119a10b4ef68686d49cfad00d5c3f4cfec954bce9f86dacbd5011fe2a746b9c",
- "format": 1
- },
- {
- "name": "plugins/modules/system/mksysb.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f4d453b498fb00531d86635f21b89e9da427d17788a8dffd624a7eef2d64260f",
- "format": 1
- },
- {
- "name": "plugins/modules/system/modprobe.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3d587d82af8364836d095369488fd76b90dea4f4bf068ac96984f50302fc7228",
- "format": 1
- },
- {
- "name": "plugins/modules/system/nosh.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b98560dd3abfba1dc2fe078a56a4eb93bdcb24af42ef6ee70c413dc7f1f9df3f",
- "format": 1
- },
- {
- "name": "plugins/modules/system/ohai.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e4272be634bd89295c956ff2215715a967d299b5d1173048d0513cb45dc1f5f9",
- "format": 1
- },
- {
- "name": "plugins/modules/system/open_iscsi.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "307fc84c58937372a867cbf944d16e3a0606ea44e6699f5782c49c64f3957eda",
- "format": 1
- },
- {
- "name": "plugins/modules/system/openwrt_init.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "55836f6f5d1311011d3184178e63629e7b5a5bc28be88818944e5f8ef9ede13b",
- "format": 1
- },
- {
- "name": "plugins/modules/system/osx_defaults.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "91214ca6596b68554a16c909bb3e5d232b74218b55b9207102ed672ed70b14f6",
- "format": 1
- },
- {
- "name": "plugins/modules/system/pam_limits.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "87cc82831d55468a2c0d6d86970417652f0b6403b5f9c50ca6bb6d2e5560a294",
- "format": 1
- },
- {
- "name": "plugins/modules/system/pamd.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "968da2701d4dcb58bf11fb374bc3ccbbc3060c57ca3881fdf8f6bff30f9a8ad1",
- "format": 1
- },
- {
- "name": "plugins/modules/system/parted.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9ed692725bcc6a521bfab3f2fadf1933e99cad99896ab3400c8264306e883e46",
- "format": 1
- },
- {
- "name": "plugins/modules/system/pids.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "dc2569182b41b994eba6fe7ff080628813b09e98c7ab70b9c10f236e6f33a01f",
- "format": 1
- },
- {
- "name": "plugins/modules/system/puppet.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b5fa5b7c452ca6ff19a0dec8516667e2afc31f5388fc822a92e20d4c144e2a91",
- "format": 1
- },
- {
- "name": "plugins/modules/system/python_requirements_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9fa050aedaedf5dd2693f4443418b780e5efbe06bf332f6b1fd675dec120ac6f",
- "format": 1
- },
- {
- "name": "plugins/modules/system/runit.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "72f3a3dfab5c5d69e79feb4564374076228b714b842e6606bebdc08317c2d74e",
- "format": 1
- },
- {
- "name": "plugins/modules/system/sap_task_list_execute.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b1fe8a9ff6fd21d93aa37a3bb40f875dfae6d25c2d5aedb6580197f77cb75ead",
- "format": 1
- },
- {
- "name": "plugins/modules/system/sefcontext.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "be1154ed383b3b642dff0e92276c0943ec2e7a5b875e7f16e78ee5764c1d8283",
- "format": 1
- },
- {
- "name": "plugins/modules/system/selinux_permissive.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "52a988c4f8aa70cd2734333b75b7ec5977be80c272badca53a60df50f157458d",
- "format": 1
- },
- {
- "name": "plugins/modules/system/selogin.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7424203ca02499f11893f07191e356ee4bf7a92f8c6c66f3760bb3662756bf38",
- "format": 1
- },
- {
- "name": "plugins/modules/system/seport.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "56ce94a493847ce43ad44e30af4bd87b816feeaa4ce15648828998b34efdb721",
- "format": 1
- },
- {
- "name": "plugins/modules/system/shutdown.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "02c339648349f7eaa4fc7b64c85ee8c40cfc98cda4c9b97879658efaf889f552",
- "format": 1
- },
- {
- "name": "plugins/modules/system/solaris_zone.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "60a77ff20a8d31547321204ecb03e5962a99cb34773e9bb46cf25ecfd0ef52d8",
- "format": 1
- },
- {
- "name": "plugins/modules/system/ssh_config.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a1764d656d155306fa1c01f06ae71350613998bab940e036272a702ec2cf7510",
- "format": 1
- },
- {
- "name": "plugins/modules/system/sudoers.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4b125be575e79d2de7d840aef13ddf5ed40623de0f5e5bc74863e5a09610a5ee",
- "format": 1
- },
- {
- "name": "plugins/modules/system/svc.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "97cb8133ea514678200f8dd1d4041ce90327486c903143912d7995806c16457a",
- "format": 1
- },
- {
- "name": "plugins/modules/system/syspatch.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "89c7d7ddd8731028bb3f5ea8426de2b5b8f19c0d2d9a0e6978aa67347be0540e",
- "format": 1
- },
- {
- "name": "plugins/modules/system/sysrc.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "cd30445b5d09ca12cd4166dd59f204b4be4e0761ac8ddf7dd851a2d5026bcebb",
- "format": 1
- },
- {
- "name": "plugins/modules/system/sysupgrade.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9c9bab43a8cc9cb85528181f72c9a881e6e53a39755461800aded2b3a27216c8",
- "format": 1
- },
- {
- "name": "plugins/modules/system/timezone.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f762436db06c2b4085c9421b3e9a2337d1b65e1fce6663cc55e6d2efbe774668",
- "format": 1
- },
- {
- "name": "plugins/modules/system/ufw.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f0958a3686ca75540353eddd3148a6e4b19ed9b57bac7e6994e949572dd2a1fd",
- "format": 1
- },
- {
- "name": "plugins/modules/system/vdo.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "89c6c5018638467973eee8012275abf8a5f611a01cc073bc82ce583e52b3639f",
- "format": 1
- },
- {
- "name": "plugins/modules/system/xfconf.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3e6be01aa8dd20b6a1280caa636ea2321e0ce1635a39ca05517689b94716db9c",
- "format": 1
- },
- {
- "name": "plugins/modules/system/xfconf_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6a5da7521b9e492203fa819ac907686227c1184a6ccb327c35a3b5e6b59b9e6e",
- "format": 1
- },
- {
- "name": "plugins/modules/system/xfs_quota.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "27825f948b8481607c8829578da78f5b9030677cdf578304491fc9d6ca4f1348",
- "format": 1
- },
- {
- "name": "plugins/modules/web_infrastructure",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/modules/web_infrastructure/sophos_utm",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/modules/web_infrastructure/sophos_utm/utm_aaa_group.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0ad7213f9e7d5c8683f0a608a816f02f935bd3aa514be57a18671290391e7a44",
- "format": 1
- },
- {
- "name": "plugins/modules/web_infrastructure/sophos_utm/utm_aaa_group_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0f7e66c06b83fec400b96810f28ce02f9d7c6c20cec8ebe5e321f163c318d8dd",
- "format": 1
- },
- {
- "name": "plugins/modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d2c1649b50116c8b150ecdd4ca13c91bc52f49a22a57cd7aaec2d4c6125c0524",
- "format": 1
- },
- {
- "name": "plugins/modules/web_infrastructure/sophos_utm/utm_ca_host_key_cert_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "826a7d96e136504ae975e591e769dd5fdff2c96b59eaff5535dfeb43fbaf08d5",
- "format": 1
- },
- {
- "name": "plugins/modules/web_infrastructure/sophos_utm/utm_dns_host.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "fc5c40e788f2cf6dd4e82f618f6f37ea21e3ce497c640c49bfd9ec2ccdf234e0",
- "format": 1
- },
- {
- "name": "plugins/modules/web_infrastructure/sophos_utm/utm_network_interface_address.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "29d9fe615e9c8b54a8bdac9ca4c4a0436ae3d3cae2972bae73df9fbb071072e5",
- "format": 1
- },
- {
- "name": "plugins/modules/web_infrastructure/sophos_utm/utm_network_interface_address_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "399fa31a5fc8cfcf1a0f8fd944f7ca139446413e6fff5251083c226bb5274aa7",
- "format": 1
- },
- {
- "name": "plugins/modules/web_infrastructure/sophos_utm/utm_proxy_auth_profile.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "54ded3e29eec68ce76581b665af3228e58fe76211ffc3a392a890d42eac30289",
- "format": 1
- },
- {
- "name": "plugins/modules/web_infrastructure/sophos_utm/utm_proxy_exception.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4dd04942dd16dae3c1e1de10712363b8cc67597db2647fc58d3a085c0a5d6e0b",
- "format": 1
- },
- {
- "name": "plugins/modules/web_infrastructure/sophos_utm/utm_proxy_frontend.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1d39c2514f334eace3ce91c284d85afbaa6ce488b6dec69d7cea6689247fee56",
- "format": 1
- },
- {
- "name": "plugins/modules/web_infrastructure/sophos_utm/utm_proxy_frontend_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "da27864c36b0b1636bb1016f6623d38cc2685d9f1073d9023baf6650e2b5fbc5",
- "format": 1
- },
- {
- "name": "plugins/modules/web_infrastructure/sophos_utm/utm_proxy_location.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9b3f2a4ee29a7fd7a468d7a4feaae37f0ce5d90fc963a91561feae1de5cd21f2",
- "format": 1
- },
- {
- "name": "plugins/modules/web_infrastructure/sophos_utm/utm_proxy_location_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "af35736343e2510d4ff9dc5ca4a01c3a6a17ae83685ea43381b8ae84190f1050",
- "format": 1
- },
- {
- "name": "plugins/modules/web_infrastructure/apache2_mod_proxy.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9d5fe445448cb9e4605eb0fe5c84e599ae353ecb8a256729b0510392d4fbbc4e",
- "format": 1
- },
- {
- "name": "plugins/modules/web_infrastructure/apache2_module.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4dbb4a1e3308a693aaa3101faa828015f66a6a65e040cf3a9a2eee417800d6b0",
- "format": 1
- },
- {
- "name": "plugins/modules/web_infrastructure/deploy_helper.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d29a73dd509521790e2dcfde24498ea2967bbb5a4c659d26c8a91f41c1cc231c",
- "format": 1
- },
- {
- "name": "plugins/modules/web_infrastructure/django_manage.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "be65d011c47d6222a81d1b82af3f9e2cd5853f174c60494cfcc1930009e315ba",
- "format": 1
- },
- {
- "name": "plugins/modules/web_infrastructure/ejabberd_user.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "92c3d42c1eb1126af9f9bb8c118c0a08f28f599c057a03a254b03e76b370614a",
- "format": 1
- },
- {
- "name": "plugins/modules/web_infrastructure/gunicorn.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4c0fc574bc49deaa348708e90945d2b44c5ec61d22f3919022bdc67c105666cd",
- "format": 1
- },
- {
- "name": "plugins/modules/web_infrastructure/htpasswd.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3a9e50c4e8fff4250f074d11041a587ae773629bc33fd8082a1c28c68c99c1b0",
- "format": 1
- },
- {
- "name": "plugins/modules/web_infrastructure/jboss.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "413a5203f4d159144142272b5e494f10d032d589d31b0d5167b60ab0e5d40664",
- "format": 1
- },
- {
- "name": "plugins/modules/web_infrastructure/jenkins_build.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5a70f3860a8a4adf2ab17cc214be4812d8e72fae7ba2a748fbbbe9bb9755178b",
- "format": 1
- },
- {
- "name": "plugins/modules/web_infrastructure/jenkins_job.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "289f71c98eae7a1138cb3b922f1b7a431d3cf593ef838ff7f152c5ff60839a28",
- "format": 1
- },
- {
- "name": "plugins/modules/web_infrastructure/jenkins_job_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "eb90242a9999203cb2fa1d6af3e9a8c54ad57530e91aa338f00cee8fd7a4b32e",
- "format": 1
- },
- {
- "name": "plugins/modules/web_infrastructure/jenkins_plugin.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9f36ba039a959f4ab537e6736021dbb68c50ed10e7ee3eaad03307c5726155e3",
- "format": 1
- },
- {
- "name": "plugins/modules/web_infrastructure/jenkins_script.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "194b41bc5b511c44e15b770526dcb63625ec530b963e650343467f12b5a083ee",
- "format": 1
- },
- {
- "name": "plugins/modules/web_infrastructure/jira.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "072dfce83798a6ca7fb0c0395e8d8168ca28b140857ef73687bcfc04ebe00941",
- "format": 1
- },
- {
- "name": "plugins/modules/web_infrastructure/nginx_status_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3be0b85c00ec846e372cd74d28bef34f32211231f6c8cf45803285ff76320d39",
- "format": 1
- },
- {
- "name": "plugins/modules/web_infrastructure/rundeck_acl_policy.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5f5d8165b92c6995925b290f7956385d5f58e67db78fc5999a8d9fce2c8631a4",
- "format": 1
- },
- {
- "name": "plugins/modules/web_infrastructure/rundeck_job_executions_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "70a72bee59a76399bccced7e6db5b5079df984405f5e8f6c03aa077cf0a3954e",
- "format": 1
- },
- {
- "name": "plugins/modules/web_infrastructure/rundeck_job_run.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "11003889632bd0531f924dd291d0e9df1ccad0225e3e252e9dc33a258768c8b1",
- "format": 1
- },
- {
- "name": "plugins/modules/web_infrastructure/rundeck_project.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c2c34f541040b892e7f031487104db7ec1b0e1a522817e8308d586f9d503f6f8",
- "format": 1
- },
- {
- "name": "plugins/modules/web_infrastructure/supervisorctl.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5a130a0e5a2402d2d964a069ae288d1faff9808d48f8b0f4d4a83a9fa55192ba",
- "format": 1
- },
- {
- "name": "plugins/modules/web_infrastructure/taiga_issue.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c3f0162389f24357b7981000dc718ef8a794b260ef570753703bfa372d593583",
- "format": 1
- },
- {
- "name": "plugins/test",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/test/a_module.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3e1af0bd0e7fb21c5640786d6120056f5dcec24748713cd7b1bf332aef1818b7",
- "format": 1
- },
- {
- "name": "tests",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/aix_devices",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/aix_devices/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/aix_devices/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5926739106235917ed4672c00a9b356ff7ef3016b826d8d0976c65c5b705288b",
- "format": 1
- },
- {
- "name": "tests/integration/targets/aix_devices/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "2fb2fe1de7acac9fe00bbe9918b6ec663623abf8938099a8f7b41505d703db55",
- "format": 1
- },
- {
- "name": "tests/integration/targets/aix_filesystem",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/aix_filesystem/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/aix_filesystem/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a7547d84307d4170efbf1815ffc8bf4badd3e70c285bca90255b2aa80c004758",
- "format": 1
- },
- {
- "name": "tests/integration/targets/aix_filesystem/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f13674351f8231c9e60fc923652b1ce94b62357ee00fedaf23ad3af677aff656",
- "format": 1
- },
- {
- "name": "tests/integration/targets/alternatives",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/alternatives/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/alternatives/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "854b5b1c0dd3a694bcd987ad52daa4a2a5e87e241145505fa364de304e830277",
- "format": 1
- },
- {
- "name": "tests/integration/targets/alternatives/tasks/path_is_checked.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "055494791cbce8c13c3229b97afc4d57de0d7abf31cee3684b6cab1f41265699",
- "format": 1
- },
- {
- "name": "tests/integration/targets/alternatives/tasks/remove_links.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c76d52c3ceac3df1588b8ad3933e040ac9296bff57bf8ac32ae533eedf36453b",
- "format": 1
- },
- {
- "name": "tests/integration/targets/alternatives/tasks/setup.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b882d2293d0e0475c3f0a1d940a9d96fea2a5c377e3b8579f634fad461f6909f",
- "format": 1
- },
- {
- "name": "tests/integration/targets/alternatives/tasks/setup_test.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a7a37539eeb0d2752094ffac9773b106db1f090125ed4ec38213915f7b623e7c",
- "format": 1
- },
- {
- "name": "tests/integration/targets/alternatives/tasks/test.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3724e47d997d1bd51e38961daa366646ec705cef09cd62a2948136e8dd2cf84d",
- "format": 1
- },
- {
- "name": "tests/integration/targets/alternatives/tasks/tests.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3d53e7fb88aca511c4dec5f6993622b07eb75310dd302203bc9d128d2d5eb9a7",
- "format": 1
- },
- {
- "name": "tests/integration/targets/alternatives/tasks/tests_set_priority.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d6a461fd274973c212b83e56e5d7a197e316a582e9ae6e85547476e302494505",
- "format": 1
- },
- {
- "name": "tests/integration/targets/alternatives/templates",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/alternatives/templates/dummy_alternative",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "fa719c49691aabd3b22160f0b5c64afcb002e65dc718e33989523be08faf2971",
- "format": 1
- },
- {
- "name": "tests/integration/targets/alternatives/templates/dummy_command",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8af22a87ded6536dace0aa9e546372b01191d6ea52e9011cc42503d4f8216e0d",
- "format": 1
- },
- {
- "name": "tests/integration/targets/alternatives/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/alternatives/vars/Debian.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6d955d8a80b9d85aab9779d3598143d9a97f02d3987637307bfa69cdb599f844",
- "format": 1
- },
- {
- "name": "tests/integration/targets/alternatives/vars/Suse-42.3.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e2b52f4afd41f1c28b2c48fff66b165191525fb9ebaa825b3e104c98457d540a",
- "format": 1
- },
- {
- "name": "tests/integration/targets/alternatives/vars/default.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "061cd989ba91f9bcaeb0c970b22d2aa9e2cf13a07d1e03f9074ddbe9a874e0db",
- "format": 1
- },
- {
- "name": "tests/integration/targets/alternatives/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "680dff9a6433cbeb4ff95620592e73d53b323a4205d09e030ba29a479c347587",
- "format": 1
- },
- {
- "name": "tests/integration/targets/ansible_galaxy_install",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/ansible_galaxy_install/files",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/ansible_galaxy_install/files/test.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "eaee3ad37e63b6f3e5453cae5e6f3c20ffb8cab3973992d47ebc0a5e187577fc",
- "format": 1
- },
- {
- "name": "tests/integration/targets/ansible_galaxy_install/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/ansible_galaxy_install/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1e8d632f9db7209967c5b2f6d734bede09841acc7b898dafc19f31c72cee9929",
- "format": 1
- },
- {
- "name": "tests/integration/targets/ansible_galaxy_install/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/ansible_galaxy_install/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "2a2bdbf4bb8031c938a4df332d7e01fcb66976aadd532c31e876fe1d09ab411c",
- "format": 1
- },
- {
- "name": "tests/integration/targets/ansible_galaxy_install/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3bd265e7abdf487e7c1979bbd71847984a4d82c60f714b417b5fae98c007967",
- "format": 1
- },
- {
- "name": "tests/integration/targets/apache2_module",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/apache2_module/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/apache2_module/tasks/actualtest.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "30258755b2ed6e44a697865a85bed3e8dcee2b51dd0b3ac6cce0da6b0b668074",
- "format": 1
- },
- {
- "name": "tests/integration/targets/apache2_module/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "bac2332e517b1b916557f9943bf76a536856a58afc6213c77edde4682c07c8df",
- "format": 1
- },
- {
- "name": "tests/integration/targets/apache2_module/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b6e6b3eab89eec157e047b733c9e9c8b2ae7ec87e514ef9057018fee6fca9ba2",
- "format": 1
- },
- {
- "name": "tests/integration/targets/archive",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/archive/files",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/archive/files/sub",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/archive/files/sub/subfile.txt",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/integration/targets/archive/files/bar.txt",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "08bd2d247cc7aa38b8c4b7fd20ee7edad0b593c3debce92f595c9d016da40bae",
- "format": 1
- },
- {
- "name": "tests/integration/targets/archive/files/empty.txt",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/integration/targets/archive/files/foo.txt",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b6a5ff9795209b3d64cb5c04d574515413f9fec7abde49d66b44de90d1e0db14",
- "format": 1
- },
- {
- "name": "tests/integration/targets/archive/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/archive/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "769fd1fe7bec847f2f097de2fc87ddc3062a542f24ea15a783fcea9e18b038f7",
- "format": 1
- },
- {
- "name": "tests/integration/targets/archive/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/archive/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "eb260b9fc93fc4d46a44bd82dd8cd91dece50419b0333ae0720eb7a794c02d8a",
- "format": 1
- },
- {
- "name": "tests/integration/targets/archive/tests",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/archive/tests/broken-link.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "21d26e66620ce3569910265c7a27bfa52768988267f6bd979ff5bd4cd6f87396",
- "format": 1
- },
- {
- "name": "tests/integration/targets/archive/tests/core.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3987b41809217f072800b851ba8f19a00ffdc1b7e4508c9b497eca4a4e9e1438",
- "format": 1
- },
- {
- "name": "tests/integration/targets/archive/tests/exclusions.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1e9d1430f00d9b6cdfd6a57f35df47ba60968cded33565c842f3dcc517a2bbb5",
- "format": 1
- },
- {
- "name": "tests/integration/targets/archive/tests/idempotency.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3ca1627a609eb62411e3c3e6936b0511e17d1ab66bb6fac4afb3b48d0928f3a",
- "format": 1
- },
- {
- "name": "tests/integration/targets/archive/tests/remove.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "20c87205bce112a015d7a53bc819492892425783446f8ffc7c599fa43fc8cac3",
- "format": 1
- },
- {
- "name": "tests/integration/targets/archive/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "06d75688d37669db031d0d5d90607d660a646b524ff85ccefd059bd69b9fb352",
- "format": 1
- },
- {
- "name": "tests/integration/targets/callback",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/callback/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/callback/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "122d3dc3384d2031f9179746389f7641fd35f9bdb31a062613670f8586f2a5bd",
- "format": 1
- },
- {
- "name": "tests/integration/targets/callback/inventory.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "74bdaf35b547d38d9a2d81fb57baf2ff9fe88525b0de1cac491ce9fadcdec6c5",
- "format": 1
- },
- {
- "name": "tests/integration/targets/callback_diy",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/callback_diy/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/callback_diy/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ade8d31897eb82d321318493957dffb3422b03c3ef58e953bd8ae877ccce3e23",
- "format": 1
- },
- {
- "name": "tests/integration/targets/callback_diy/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "2aa69e858ce545ae65624bea04459e7be706c4c3f1014e0a5408501b064663fa",
- "format": 1
- },
- {
- "name": "tests/integration/targets/callback_log_plays",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/callback_log_plays/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8a5657d4075cac91b7e2fc1ffaaa04d37136cc64e51c75c2d261147519bb12d6",
- "format": 1
- },
- {
- "name": "tests/integration/targets/callback_log_plays/ping_log.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "cc5ca975d2e3a0c4289c613b8a1187f1bac1274cf1a96b13d0bf47bf2fb7443b",
- "format": 1
- },
- {
- "name": "tests/integration/targets/callback_log_plays/runme.sh",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ad703f7bee42e32b4bfdc9f79d91bb3e604d12319eed16bbe8c575c691c46290",
- "format": 1
- },
- {
- "name": "tests/integration/targets/callback_yaml",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/callback_yaml/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/callback_yaml/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d4e464262798abd7e2d15aa116626e70ef0e9a77bb7eb2b1d9334f2ee4e14e0d",
- "format": 1
- },
- {
- "name": "tests/integration/targets/callback_yaml/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7d128604afccb61be3dd2a49ccb142879c91ab4786e985688cf23aca7c019b16",
- "format": 1
- },
- {
- "name": "tests/integration/targets/cargo",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/cargo/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/cargo/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c2af6405d42a5abd438f4a256a6a5c19b8fb5eb5559209c0cbabd150af911837",
- "format": 1
- },
- {
- "name": "tests/integration/targets/cargo/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/cargo/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "349f906e66c0d40d5cb80ffb896f006fc4bfd3ccd6883962bc0119138ae58026",
- "format": 1
- },
- {
- "name": "tests/integration/targets/cargo/tasks/setup.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a412dc48a16f026307565acc48f4d2f764e7b3ac7d9d7f6d4b46279bc412979a",
- "format": 1
- },
- {
- "name": "tests/integration/targets/cargo/tasks/test_general.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "47442c80aefa08b4a44ec27d445c0ff81c1634760c8b12ec22dcab80f3233262",
- "format": 1
- },
- {
- "name": "tests/integration/targets/cargo/tasks/test_version.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "37c908993016702c0a0b5e770b82af6aa00a676624d37db6c720c1b9d9951d3e",
- "format": 1
- },
- {
- "name": "tests/integration/targets/cargo/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e8d3dcfac4ab53e9f978dec321326b28e47cd068b23d8d6fb33beda52f87e791",
- "format": 1
- },
- {
- "name": "tests/integration/targets/cloud_init_data_facts",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/cloud_init_data_facts/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/cloud_init_data_facts/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ae816995bad20b4c09762e88d447ddf2793e1df8f6d61c0fdc03a7432a06e75a",
- "format": 1
- },
- {
- "name": "tests/integration/targets/cloud_init_data_facts/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/cloud_init_data_facts/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3633cb76ff657891fec60cf58e6e07a9e08abf697fc54d705ecdd10820f42ec4",
- "format": 1
- },
- {
- "name": "tests/integration/targets/cloud_init_data_facts/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "bd70b4562a8f5f0b0b7166b6815afa23318c040d3ede1055aafe92b302a09169",
- "format": 1
- },
- {
- "name": "tests/integration/targets/connection",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/connection/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e084a3683ef795d1cdbf5e9b253f2ca1f783ae0d0d6e47e419acbbc4fc80bbfa",
- "format": 1
- },
- {
- "name": "tests/integration/targets/connection/test.sh",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f32dbff55de60ace66e2555586b94abd0f74f6bbcc008eb8d1c25dbfcc464a3e",
- "format": 1
- },
- {
- "name": "tests/integration/targets/connection/test_connection.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3297fe2040e5b0c523fd6f14bc0c56a886980c2a1b241b93bcce847958528861",
- "format": 1
- },
- {
- "name": "tests/integration/targets/connection_chroot",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/connection_chroot/runme.sh",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ccdab97fd6569dbe495460cfec0b901006efa79ec4c6d066a98250901b0c9d0e",
- "format": 1
- },
- {
- "name": "tests/integration/targets/connection_chroot/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1d4961f0b8634e5a5e3f194d624d8ff66252900892b32fc46c12db712aa1eb43",
- "format": 1
- },
- {
- "name": "tests/integration/targets/connection_chroot/test_connection.inventory",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5eb84ac30158da1476995439d5c07afbaa95553857727aaf5d68f734f706607b",
- "format": 1
- },
- {
- "name": "tests/integration/targets/connection_jail",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/connection_jail/runme.sh",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ccdab97fd6569dbe495460cfec0b901006efa79ec4c6d066a98250901b0c9d0e",
- "format": 1
- },
- {
- "name": "tests/integration/targets/connection_jail/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f13674351f8231c9e60fc923652b1ce94b62357ee00fedaf23ad3af677aff656",
- "format": 1
- },
- {
- "name": "tests/integration/targets/connection_jail/test_connection.inventory",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e68e68eef9de19ce29e372127ec2ff42dfddee8af5934e1c6785b5896d540681",
- "format": 1
- },
- {
- "name": "tests/integration/targets/connection_lxc",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/connection_lxc/runme.sh",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ccdab97fd6569dbe495460cfec0b901006efa79ec4c6d066a98250901b0c9d0e",
- "format": 1
- },
- {
- "name": "tests/integration/targets/connection_lxc/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f13674351f8231c9e60fc923652b1ce94b62357ee00fedaf23ad3af677aff656",
- "format": 1
- },
- {
- "name": "tests/integration/targets/connection_lxc/test_connection.inventory",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "840bf39cd3675cc46dd72ede6b17ecb9383f97e085b0c194dc33b841213aa886",
- "format": 1
- },
- {
- "name": "tests/integration/targets/connection_lxd",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/connection_lxd/runme.sh",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ccdab97fd6569dbe495460cfec0b901006efa79ec4c6d066a98250901b0c9d0e",
- "format": 1
- },
- {
- "name": "tests/integration/targets/connection_lxd/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ec1a8d284cdd3ebfbde0cfecc54f6852263cd47f652c6b1a7bfc1d874fdb6c18",
- "format": 1
- },
- {
- "name": "tests/integration/targets/connection_lxd/test_connection.inventory",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "44f89257fbaf385b7b113c10a6e47387221ff1a6a851bcf322dfeb55563a3be6",
- "format": 1
- },
- {
- "name": "tests/integration/targets/connection_posix",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/connection_posix/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ba6bc4b7b7f06e33b61092629dbd2f094b2d814d5cb051650b7494031fba6bea",
- "format": 1
- },
- {
- "name": "tests/integration/targets/connection_posix/test.sh",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ccdab97fd6569dbe495460cfec0b901006efa79ec4c6d066a98250901b0c9d0e",
- "format": 1
- },
- {
- "name": "tests/integration/targets/consul",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/consul/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/consul/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0d7dd9e426571c0334ab74bf3c78984772b5478d423fd107c01c504bda6ddb22",
- "format": 1
- },
- {
- "name": "tests/integration/targets/consul/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/consul/tasks/consul_session.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "db366a4d270e5c06f6202377a2f560685268af6413ae761ea431135b9bdd595f",
- "format": 1
- },
- {
- "name": "tests/integration/targets/consul/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "33d18fb680c081d59db5d298a72375481db915692463cffd414b51139b0b9f9f",
- "format": 1
- },
- {
- "name": "tests/integration/targets/consul/templates",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/consul/templates/consul_config.hcl.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c7fa41289f466e39fa1fbb01f05ca4bc39b073d6c96bf35c225c9b1ccc7d61a6",
- "format": 1
- },
- {
- "name": "tests/integration/targets/consul/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a40969a414a8a84d59746ad2ec3a1b2b697443e715086c01925cc8a163b7aa1a",
- "format": 1
- },
- {
- "name": "tests/integration/targets/copr",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/copr/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/copr/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9a9f0d9e896761785cfcf1ea2c50cbe7d5e2961ade059cecd549358c050c8faa",
- "format": 1
- },
- {
- "name": "tests/integration/targets/copr/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ac6f5b8ebe7c5304eaab8f102721e9b25fcb18ff1e35d57e205f2aaa957c4536",
- "format": 1
- },
- {
- "name": "tests/integration/targets/cpanm",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/cpanm/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/cpanm/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ae816995bad20b4c09762e88d447ddf2793e1df8f6d61c0fdc03a7432a06e75a",
- "format": 1
- },
- {
- "name": "tests/integration/targets/cpanm/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/cpanm/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1b8163e6b73737e36aa69350e111ee82450f019cee73ec5bd3e6d38393e39363",
- "format": 1
- },
- {
- "name": "tests/integration/targets/cpanm/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e11285d3c24d91036c84ac9d685ac14affc9303e91d564aa4c61df012b718d88",
- "format": 1
- },
- {
- "name": "tests/integration/targets/cronvar",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/cronvar/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/cronvar/defaults/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0c6d87783ce94ef757b0c55298b080c8f50f00fe51344baa8d122bbcdbbe5cd1",
- "format": 1
- },
- {
- "name": "tests/integration/targets/cronvar/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/cronvar/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ee06f343a7d6949952623866c8845381ed7fb0231f32e94aa8e780c244e38d8a",
- "format": 1
- },
- {
- "name": "tests/integration/targets/cronvar/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/cronvar/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ce10d7cd586ffbc352d18965bbc84ea8fce37fcfc20e049ee4ce2864841eb75f",
- "format": 1
- },
- {
- "name": "tests/integration/targets/cronvar/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "89574b4b847b9f3856aa58220ab1df26bf11517abe25b683576fd1308102b3ac",
- "format": 1
- },
- {
- "name": "tests/integration/targets/deploy_helper",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/deploy_helper/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/deploy_helper/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1e8d632f9db7209967c5b2f6d734bede09841acc7b898dafc19f31c72cee9929",
- "format": 1
- },
- {
- "name": "tests/integration/targets/deploy_helper/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/deploy_helper/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "46ba8553a45e25a89033ed888edf302e8009bf1b3d577f62a37efdf2e33836b6",
- "format": 1
- },
- {
- "name": "tests/integration/targets/deploy_helper/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "811322fad85866ac31f9501f8da0c7072809065397d53c06712447946b746b6b",
- "format": 1
- },
- {
- "name": "tests/integration/targets/django_manage",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/django_manage/files",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/django_manage/files/base_test",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/django_manage/files/base_test/1045-single-app-project",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/django_manage/files/base_test/1045-single-app-project/single_app_project",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/django_manage/files/base_test/1045-single-app-project/single_app_project/core",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/django_manage/files/base_test/1045-single-app-project/single_app_project/core/settings.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d7461ca3b1cc9a25a907ca8c9c21ddd3340bf5848de26df4a98c8eac2ec45572",
- "format": 1
- },
- {
- "name": "tests/integration/targets/django_manage/files/base_test/1045-single-app-project/single_app_project/manage.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e988f7c59ffd15b88919bd673beff2599b04f1501dd7e93252143e7f7898ddb7",
- "format": 1
- },
- {
- "name": "tests/integration/targets/django_manage/files/base_test/simple_project",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/django_manage/files/base_test/simple_project/p1",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/django_manage/files/base_test/simple_project/p1/p1",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/django_manage/files/base_test/simple_project/p1/p1/__init__.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/integration/targets/django_manage/files/base_test/simple_project/p1/p1/settings.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "85e294fc5db7d14dd9083417878601839d56bb166fbcc75d2a294c30e631b493",
- "format": 1
- },
- {
- "name": "tests/integration/targets/django_manage/files/base_test/simple_project/p1/p1/urls.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "535fbfbdfd57ab9ddb8f78425432836748d1a5551c8301c56b25fae52da190ed",
- "format": 1
- },
- {
- "name": "tests/integration/targets/django_manage/files/base_test/simple_project/p1/manage.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "16f347ddca8f37252548a0c35294400ae4592e77fe03367d2da266c4d10be3a8",
- "format": 1
- },
- {
- "name": "tests/integration/targets/django_manage/files/base_test/startproj",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/django_manage/files/base_test/startproj/.keep",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/integration/targets/django_manage/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/django_manage/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c2af6405d42a5abd438f4a256a6a5c19b8fb5eb5559209c0cbabd150af911837",
- "format": 1
- },
- {
- "name": "tests/integration/targets/django_manage/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/django_manage/tasks/main.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "25d9fc3b48e329a06519472febf874f365edb4dc19d9747deec63d874ef8274d",
- "format": 1
- },
- {
- "name": "tests/integration/targets/django_manage/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "2688bf770b1ab976b4c5ecca934a17ff8137e9f2e8e96798f9bb5f2ceb3ec99d",
- "format": 1
- },
- {
- "name": "tests/integration/targets/dnf_versionlock",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/dnf_versionlock/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/dnf_versionlock/tasks/install.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "480c3223f2fa65f6e3a2d7b99a6a8382d240d8a39a1748e9b6f2e264a70b856c",
- "format": 1
- },
- {
- "name": "tests/integration/targets/dnf_versionlock/tasks/lock_bash.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "166c2465c0ddb12d8e6fef61a0719a5e011701bece7d59c897d68da2e101cfe9",
- "format": 1
- },
- {
- "name": "tests/integration/targets/dnf_versionlock/tasks/lock_updates.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "93f4f61d394703660e7b8efb7fdc4185ab7f579cee353e1be2928b6467efd0c5",
- "format": 1
- },
- {
- "name": "tests/integration/targets/dnf_versionlock/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a046156eb25b05dfca2a5f810ab354067a4cc34f003deb0a5e383eae1e110423",
- "format": 1
- },
- {
- "name": "tests/integration/targets/dnf_versionlock/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7366ec1af83f8212f093362620022f4a3e9d9de63b38c75466df933839cb1138",
- "format": 1
- },
- {
- "name": "tests/integration/targets/dpkg_divert",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/dpkg_divert/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/dpkg_divert/tasks/tests",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/dpkg_divert/tasks/tests/01-basic.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "995e695b188db7961d7f3de6fe4270fcacedeadbd3665a90361a984d818188a4",
- "format": 1
- },
- {
- "name": "tests/integration/targets/dpkg_divert/tasks/tests/02-rename.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5f287a9a4d91cb6207959a2d7c577633a894efc6c74b65972e2eb0d3012792b7",
- "format": 1
- },
- {
- "name": "tests/integration/targets/dpkg_divert/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "942f62d745fd5f95f3b5ba8c7b2c7eec4b6bbfbead87f342f75c5bff11680fc3",
- "format": 1
- },
- {
- "name": "tests/integration/targets/dpkg_divert/tasks/prepare.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4a492d008b23484bbe7c6cf2e4af199e619cee6e3f1682e150593d8dc9b8c8f2",
- "format": 1
- },
- {
- "name": "tests/integration/targets/dpkg_divert/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "aabd5fd2d095f61a8447e26e58c1b1ff4f83adc171edb393a8ebcec7222ca934",
- "format": 1
- },
- {
- "name": "tests/integration/targets/etcd3",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/etcd3/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/etcd3/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b3065c9d434c22b1009b808c53c5221d9e2f8c201f58d4a71fff2db06cf72a27",
- "format": 1
- },
- {
- "name": "tests/integration/targets/etcd3/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/etcd3/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "595c566e59d35e9f088f6df364a86f831b79150e7d7dff03d947a0fc61d1e773",
- "format": 1
- },
- {
- "name": "tests/integration/targets/etcd3/tasks/run_tests.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "efae7ddb7f13fdd93ed0502750a0efce2e042137676e429f22097d8ffbe6aeb4",
- "format": 1
- },
- {
- "name": "tests/integration/targets/etcd3/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c410d3272c7c8f07c20809aba1af5eacad70c842e5d15c825ca385ac455fd3a9",
- "format": 1
- },
- {
- "name": "tests/integration/targets/filesize",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/filesize/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/filesize/defaults/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e6209c72ec965475382d3b00ac2a4a421ed6a47d70bcd5ed140aca2d199f7e12",
- "format": 1
- },
- {
- "name": "tests/integration/targets/filesize/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/filesize/tasks/basics.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5ce84cf330b54b496a7767995179c97490b4fe1a7172ce98c118da2d30a830ff",
- "format": 1
- },
- {
- "name": "tests/integration/targets/filesize/tasks/errors.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "57228fe0f877d6dc8a7ff5f4041bffc02bb90edb8393587ed0d3ef371c2c21ca",
- "format": 1
- },
- {
- "name": "tests/integration/targets/filesize/tasks/floats.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3b9570f154c40f1775c1318cd68627d57aa0a8fdb4656d2eb420e4b5e0039247",
- "format": 1
- },
- {
- "name": "tests/integration/targets/filesize/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "307c97bf5bb2d67ee752f2061a35d1550bf6c825faba98cb6b998890359b5bf8",
- "format": 1
- },
- {
- "name": "tests/integration/targets/filesize/tasks/sparse.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "01371d3811f5b3bcb688300ea0b4caaa90e03215e1c2843304c5be53c5d050b3",
- "format": 1
- },
- {
- "name": "tests/integration/targets/filesize/tasks/symlinks.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a9382c1cdff793fa48185fc9c6c8b05b2eb40b81264e27edd79d00c37ee5ecc1",
- "format": 1
- },
- {
- "name": "tests/integration/targets/filesize/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "811322fad85866ac31f9501f8da0c7072809065397d53c06712447946b746b6b",
- "format": 1
- },
- {
- "name": "tests/integration/targets/filesystem",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/filesystem/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/filesystem/defaults/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c77324654c86a859432f147a34bccb1ae7d77f77d04b8bb07aa7a3fcba7cc51f",
- "format": 1
- },
- {
- "name": "tests/integration/targets/filesystem/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/filesystem/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "55a70f7ff94020af40ac26fb36a89e5669f6ceb19447e7017b271c85c0e7e25f",
- "format": 1
- },
- {
- "name": "tests/integration/targets/filesystem/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/filesystem/tasks/create_device.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f592609a39ca48d4a140172efe623a76d27fbdd446fa3fa4b19e70c9eedc5c73",
- "format": 1
- },
- {
- "name": "tests/integration/targets/filesystem/tasks/create_fs.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "22b57f1e04f2f4216b640d700c08066dd39d55b0018b988238567232764a3583",
- "format": 1
- },
- {
- "name": "tests/integration/targets/filesystem/tasks/freebsd_setup.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f4f0c258e9f811c66369538c4b491ab192a65a58bbbccb5f706794cb150cdd33",
- "format": 1
- },
- {
- "name": "tests/integration/targets/filesystem/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f9380a232b46cb9b9e6ab637e668f68d77aa2beace334b7b497f5d85669a717b",
- "format": 1
- },
- {
- "name": "tests/integration/targets/filesystem/tasks/overwrite_another_fs.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "2e4dbe2f4225d31cbd6d5de99d79eb25f05e39300aaca273f1288d699c032219",
- "format": 1
- },
- {
- "name": "tests/integration/targets/filesystem/tasks/remove_fs.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d42286f60abec1118afeeab5847e1f680defb7f047b871003eb682d510f310ee",
- "format": 1
- },
- {
- "name": "tests/integration/targets/filesystem/tasks/setup.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7badcb1afe1eaa2ada350afa495def37bd69032abd912efc342ec89c2171cc95",
- "format": 1
- },
- {
- "name": "tests/integration/targets/filesystem/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/filesystem/vars/Ubuntu-14.04.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d6920cd18ed5a801036ffb67965b492631a80e36d9b800a3bc3ebe8712880c55",
- "format": 1
- },
- {
- "name": "tests/integration/targets/filesystem/vars/default.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "efead6e1ff36b22afa53723f82f74b980f6ec2fcb899cb29be017b9caaaaaf79",
- "format": 1
- },
- {
- "name": "tests/integration/targets/filesystem/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "de4b021a7ffae564d5e878e8a94a4121b367cb3b51a9b0dfe2b08943cdafc492",
- "format": 1
- },
- {
- "name": "tests/integration/targets/filter_counter",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/filter_counter/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/filter_counter/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e1780a85020ca1006bd94e0b0145f6d5b09568b384e2890e09b1f36b5e53e27e",
- "format": 1
- },
- {
- "name": "tests/integration/targets/filter_counter/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3f754208d937e93c9c873d3b8d73be37d9da93b1136989db5236e75f12854608",
- "format": 1
- },
- {
- "name": "tests/integration/targets/filter_dict",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/filter_dict/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/filter_dict/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e61381cffc90f48823126efea33cb42daa5ec933b1df8fe1fe6b321d47db9b49",
- "format": 1
- },
- {
- "name": "tests/integration/targets/filter_dict/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "28d887b76b2449e9952763697c6f74d113f0b9ab49a31fc19581be0be8b31e6a",
- "format": 1
- },
- {
- "name": "tests/integration/targets/filter_dict_kv",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/filter_dict_kv/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/filter_dict_kv/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9aba3847ffd31a14704ebeccf574c11695676707f74bbab7e2aaa44698bd6483",
- "format": 1
- },
- {
- "name": "tests/integration/targets/filter_dict_kv/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3f754208d937e93c9c873d3b8d73be37d9da93b1136989db5236e75f12854608",
- "format": 1
- },
- {
- "name": "tests/integration/targets/filter_from_csv",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/filter_from_csv/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/filter_from_csv/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a584dba9798fb151554dae8caaf4d1c03ed82c01d8df7564fa82dbe1fd03bd7f",
- "format": 1
- },
- {
- "name": "tests/integration/targets/filter_from_csv/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/filter_from_csv/vars/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1277f87c8d45fc388589abc625eac2f462940208027ceea96cbe84a26958d215",
- "format": 1
- },
- {
- "name": "tests/integration/targets/filter_from_csv/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3f754208d937e93c9c873d3b8d73be37d9da93b1136989db5236e75f12854608",
- "format": 1
- },
- {
- "name": "tests/integration/targets/filter_groupby",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/filter_groupby/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/filter_groupby/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "891371439e27b1e84e453be7590313a2cfd91626c2cd44ec1c3e5c030a7c8ea3",
- "format": 1
- },
- {
- "name": "tests/integration/targets/filter_groupby/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/filter_groupby/vars/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "52327d9500a32db44b09e9a9303e595e83887287ee3d8ecdda41ff9f58eef349",
- "format": 1
- },
- {
- "name": "tests/integration/targets/filter_groupby/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "28d887b76b2449e9952763697c6f74d113f0b9ab49a31fc19581be0be8b31e6a",
- "format": 1
- },
- {
- "name": "tests/integration/targets/filter_hashids",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/filter_hashids/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/filter_hashids/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "61fbcd116a06bc7d0dc800980347959f668c5b4c90c9a3af553d11473abd54d9",
- "format": 1
- },
- {
- "name": "tests/integration/targets/filter_hashids/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/filter_hashids/vars/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ae63247cc858c2f35d9acb7fefccfc73791d0af43fbe28b33556f596c8c2f4a8",
- "format": 1
- },
- {
- "name": "tests/integration/targets/filter_hashids/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3f754208d937e93c9c873d3b8d73be37d9da93b1136989db5236e75f12854608",
- "format": 1
- },
- {
- "name": "tests/integration/targets/filter_hashids/runme.sh",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a25a3a5cc05695c1300356e103adcf661d6d755279717a439c46343245aecca2",
- "format": 1
- },
- {
- "name": "tests/integration/targets/filter_hashids/runme.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "baec4a8a3c194b2779731d613c67960123861a400a80958cd575145e22f09dc9",
- "format": 1
- },
- {
- "name": "tests/integration/targets/filter_jc",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/filter_jc/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/filter_jc/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "852b469909b319e64fc4f186cbfa812a7f72d389edf3a2b1deaa45ec1b626fe8",
- "format": 1
- },
- {
- "name": "tests/integration/targets/filter_jc/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1f6109e8ddbb7c2c85cf0d51119d787bafc9d9695dd74bc96d3385fb0a1454d5",
- "format": 1
- },
- {
- "name": "tests/integration/targets/filter_jc/runme.sh",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d1f42825061f9f041ecbd2e1b6c702a9329d8403692df09d23e4540e36391420",
- "format": 1
- },
- {
- "name": "tests/integration/targets/filter_jc/runme.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c028125bc1be296044e51b7bdcb04231cb566386c2f935871f0e40b4947eafcc",
- "format": 1
- },
- {
- "name": "tests/integration/targets/filter_json_query",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/filter_json_query/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/filter_json_query/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "60214d35e8f2e5c325430cd80c635e9d690698824f6f957644bc8e237e1f9638",
- "format": 1
- },
- {
- "name": "tests/integration/targets/filter_json_query/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/filter_json_query/vars/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "00cfbed7e5d4044baaba437bc0d2ba800f28300042c01c53047d8a4894078eef",
- "format": 1
- },
- {
- "name": "tests/integration/targets/filter_json_query/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "fae4c68ed62f686121e77073f3d43160d927a876417feadfa3be71b063ce9fda",
- "format": 1
- },
- {
- "name": "tests/integration/targets/filter_json_query/runme.sh",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a955c14afce0e328ea62f806a2d8a9ffdb7032fdba8e7bbcca41616aa3cdff19",
- "format": 1
- },
- {
- "name": "tests/integration/targets/filter_json_query/runme.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "040ec78afb260ec34ebf5cb4b62cd2203fe9e579f33029d450a92c2f004d3545",
- "format": 1
- },
- {
- "name": "tests/integration/targets/filter_list",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/filter_list/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/filter_list/tasks/lists_mergeby_2-10.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9c901abb1d2647356f2ac233b9f6cf71de132368dc7af7f4e05bdf28fc2cfc6f",
- "format": 1
- },
- {
- "name": "tests/integration/targets/filter_list/tasks/lists_mergeby_default.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "bc55b21ce77cedb27b721178bf96c11afaada2438be2fb0d32e33eea993a79b3",
- "format": 1
- },
- {
- "name": "tests/integration/targets/filter_list/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "176fc3bd46d769a0e2f8a7dc2bb60aa48e9043c22139261dce82b1a25cbb8dc0",
- "format": 1
- },
- {
- "name": "tests/integration/targets/filter_list/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/filter_list/vars/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "34dff693363be5fea786490fc443495aa7f392dbf14181b7ff3b1dc6ceaf0421",
- "format": 1
- },
- {
- "name": "tests/integration/targets/filter_list/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3f754208d937e93c9c873d3b8d73be37d9da93b1136989db5236e75f12854608",
- "format": 1
- },
- {
- "name": "tests/integration/targets/filter_path_join_shim",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/filter_path_join_shim/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/filter_path_join_shim/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "2ea941e7f4e3e218323cd9ee178197e6cccc0cffb6e62f873e0415e9e498392c",
- "format": 1
- },
- {
- "name": "tests/integration/targets/filter_path_join_shim/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "cd53f6f278376c3ddfc1f7e8f51c5d569be05463717d2f5aa6ab60ee03ab7513",
- "format": 1
- },
- {
- "name": "tests/integration/targets/filter_random_mac",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/filter_random_mac/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/filter_random_mac/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1e8d632f9db7209967c5b2f6d734bede09841acc7b898dafc19f31c72cee9929",
- "format": 1
- },
- {
- "name": "tests/integration/targets/filter_random_mac/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/filter_random_mac/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9d9493241922175b56e14798caf608b1567ec0f1d5f521e375bced0e97259a10",
- "format": 1
- },
- {
- "name": "tests/integration/targets/filter_random_mac/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "fae4c68ed62f686121e77073f3d43160d927a876417feadfa3be71b063ce9fda",
- "format": 1
- },
- {
- "name": "tests/integration/targets/filter_time",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/filter_time/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/filter_time/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9ca76bc55b0ed8540c4f3d578884ef636c7b66c1a1cc089e9be629216e45df66",
- "format": 1
- },
- {
- "name": "tests/integration/targets/filter_time/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3f754208d937e93c9c873d3b8d73be37d9da93b1136989db5236e75f12854608",
- "format": 1
- },
- {
- "name": "tests/integration/targets/filter_unicode_normalize",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/filter_unicode_normalize/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/filter_unicode_normalize/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c70a784f3b5c42df4e73ca0a58d524441364b5f2343e5a58a39895a7b302e2d3",
- "format": 1
- },
- {
- "name": "tests/integration/targets/filter_unicode_normalize/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/filter_unicode_normalize/vars/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "02faaf48fbb2fa8d25c1eec1df2ab5c75e67646131c5738a66568c2083b1031a",
- "format": 1
- },
- {
- "name": "tests/integration/targets/filter_unicode_normalize/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3f754208d937e93c9c873d3b8d73be37d9da93b1136989db5236e75f12854608",
- "format": 1
- },
- {
- "name": "tests/integration/targets/filter_version_sort",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/filter_version_sort/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/filter_version_sort/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7e92321cf5603bc9b08bc384259a7235d2329dcb42029600b75aa0076ce8c98d",
- "format": 1
- },
- {
- "name": "tests/integration/targets/filter_version_sort/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3f754208d937e93c9c873d3b8d73be37d9da93b1136989db5236e75f12854608",
- "format": 1
- },
- {
- "name": "tests/integration/targets/flatpak",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/flatpak/files",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/flatpak/files/serve.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "635842d29f27e7e27f969969bcabbb76807a7a1e8f092b834f10209acfa253f6",
- "format": 1
- },
- {
- "name": "tests/integration/targets/flatpak/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/flatpak/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d1f270e41b71c8ccec217bd5e5c397cf527c856657a81a463eaae0b06130f9c7",
- "format": 1
- },
- {
- "name": "tests/integration/targets/flatpak/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/flatpak/tasks/check_mode.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d01f61ca0d7b950f29ff070e7f169561e718a19dbb68c9deb8ac0af480a005fd",
- "format": 1
- },
- {
- "name": "tests/integration/targets/flatpak/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3ff8e182f408bb9b8f0785a037ebb41e314bf7083acc2fd7c579cf8dd643b049",
- "format": 1
- },
- {
- "name": "tests/integration/targets/flatpak/tasks/setup.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "fd7f915a42855db32f162d62d04d7583aa298d2d494388888cf341393043e374",
- "format": 1
- },
- {
- "name": "tests/integration/targets/flatpak/tasks/test.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "835f7bf8121c4b787856213e3f218dabfbe703aae6cc87bc4b4aa9dec6f53df1",
- "format": 1
- },
- {
- "name": "tests/integration/targets/flatpak/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4e8c4d4bf2055db6c7160889d246b648084fa9990fe4b6ff4197e30ebfc62b42",
- "format": 1
- },
- {
- "name": "tests/integration/targets/flatpak_remote",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/flatpak_remote/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/flatpak_remote/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d1f270e41b71c8ccec217bd5e5c397cf527c856657a81a463eaae0b06130f9c7",
- "format": 1
- },
- {
- "name": "tests/integration/targets/flatpak_remote/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/flatpak_remote/tasks/check_mode.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "467e3aec9b6edf845929a1690aba9762b6b7add57207cb503169aa9874705540",
- "format": 1
- },
- {
- "name": "tests/integration/targets/flatpak_remote/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d33778878cb7d90135519335485e691d5f31240507136a7bd6322a218eff7b51",
- "format": 1
- },
- {
- "name": "tests/integration/targets/flatpak_remote/tasks/setup.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "173148e596fb6f57b0b03279f0523b75b1ca7d079c9705c022100afd5fd58d75",
- "format": 1
- },
- {
- "name": "tests/integration/targets/flatpak_remote/tasks/test.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4c7ab224966913a081cb0cb96619472eb6d5caa5afdcb44c219872027da9451c",
- "format": 1
- },
- {
- "name": "tests/integration/targets/flatpak_remote/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4e8c4d4bf2055db6c7160889d246b648084fa9990fe4b6ff4197e30ebfc62b42",
- "format": 1
- },
- {
- "name": "tests/integration/targets/gandi_livedns",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/gandi_livedns/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/gandi_livedns/defaults/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d2541cfa23d346daeb3fa38ca215ddab183bedb0e62d95f8e71e6af9c98973bb",
- "format": 1
- },
- {
- "name": "tests/integration/targets/gandi_livedns/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/gandi_livedns/tasks/create_record.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8474f529fce8a5c495b4779c788326a97c91ac317789dabb68fa009c67d97445",
- "format": 1
- },
- {
- "name": "tests/integration/targets/gandi_livedns/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "57b334769ad3dca857cb9dbef6ceea4269fb571fa4c578930e83fefa5159ed19",
- "format": 1
- },
- {
- "name": "tests/integration/targets/gandi_livedns/tasks/record.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1fc4e933f7c9cab8cb023644431912b892d65ca290fad2a3201f100748609a59",
- "format": 1
- },
- {
- "name": "tests/integration/targets/gandi_livedns/tasks/remove_record.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "eb5a0a923ae8f004baad260ce26fe58901c536b0b6352c6e9420bdba6e474bae",
- "format": 1
- },
- {
- "name": "tests/integration/targets/gandi_livedns/tasks/update_record.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "48ed2f4937679170c6eb87f79e40a0749a61bb738718cb8d2d9f10fd4e4a0970",
- "format": 1
- },
- {
- "name": "tests/integration/targets/gandi_livedns/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4cc754ef899091a5d995b8f57081212b5c66a93c14811173ddff5877c14bc544",
- "format": 1
- },
- {
- "name": "tests/integration/targets/gem",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/gem/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/gem/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "769fd1fe7bec847f2f097de2fc87ddc3062a542f24ea15a783fcea9e18b038f7",
- "format": 1
- },
- {
- "name": "tests/integration/targets/gem/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/gem/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "aec0ba5bc5fd1823b16807705eaf33555a705fda2e116d21846eaa79b0ce1448",
- "format": 1
- },
- {
- "name": "tests/integration/targets/gem/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/gem/vars/FreeBSD.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1bfdff73d0b467666b98caf7ca46c8ae4d1f4d5a21c08d83484718ff3a768a9b",
- "format": 1
- },
- {
- "name": "tests/integration/targets/gem/vars/RedHat.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e539831c03045b807895fbf0f8da5f13e1d3a6a9aed78d1f3946a0fdf3678359",
- "format": 1
- },
- {
- "name": "tests/integration/targets/gem/vars/default.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "37a9c91c779fc3095ab2e480b350e360bb26988d8e0bd3b8f546ce8b538b0f8e",
- "format": 1
- },
- {
- "name": "tests/integration/targets/gem/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "de4b021a7ffae564d5e878e8a94a4121b367cb3b51a9b0dfe2b08943cdafc492",
- "format": 1
- },
- {
- "name": "tests/integration/targets/git_config",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/git_config/files",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/git_config/files/gitconfig",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d709a94ee4ce2ab24b5344060dd6553680071da85b7f216511712d365086c68f",
- "format": 1
- },
- {
- "name": "tests/integration/targets/git_config/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/git_config/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1e8d632f9db7209967c5b2f6d734bede09841acc7b898dafc19f31c72cee9929",
- "format": 1
- },
- {
- "name": "tests/integration/targets/git_config/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/git_config/tasks/exclusion_state_list-all.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "19413ce550186861d20f0ac1ffd3b060b447f928a7f07d993abde073258d6b57",
- "format": 1
- },
- {
- "name": "tests/integration/targets/git_config/tasks/get_set_no_state.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b8bd84dbf2ffdedcfe984e192ab036e4486033b782311293bbd9e0ad6fa349b7",
- "format": 1
- },
- {
- "name": "tests/integration/targets/git_config/tasks/get_set_state_present.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "43513de76f25e00c1b85cbe49f237c6f93e00d4d8f4524ad7eaae8be5823b687",
- "format": 1
- },
- {
- "name": "tests/integration/targets/git_config/tasks/get_set_state_present_file.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f92ed3092d13cb1bdb07ef0c98e7eb284ab9cd94d44c5f360ab896d8d0367e9b",
- "format": 1
- },
- {
- "name": "tests/integration/targets/git_config/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e51f44e6962edc1796f3c57cf9036edf0a3b5d5b7da8e6d252f107268c61816f",
- "format": 1
- },
- {
- "name": "tests/integration/targets/git_config/tasks/precedence_between_unset_and_value.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a6ef0240d167a91fa0cfe6c5e508d29cd22fe026743b34183851f9ce0f0b8193",
- "format": 1
- },
- {
- "name": "tests/integration/targets/git_config/tasks/set_value_with_tilde.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e9b96f0949d8aaf9116677176161c6b10432979a01b844b15c3dc520d4a1185f",
- "format": 1
- },
- {
- "name": "tests/integration/targets/git_config/tasks/setup.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d202854e0c11a186e10df5e9d10fd9c4f2b6496b99a96c54bbac94a6e5a99e1f",
- "format": 1
- },
- {
- "name": "tests/integration/targets/git_config/tasks/setup_no_value.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "42a2ba7577089c90c9ab133845c765bc9fc7bc419396a85d6a6d13be068b0b20",
- "format": 1
- },
- {
- "name": "tests/integration/targets/git_config/tasks/setup_value.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5d3c152303bdc4d895e5cad7b21ac2fbf74e0511def7383a6baf449618404532",
- "format": 1
- },
- {
- "name": "tests/integration/targets/git_config/tasks/unset_check_mode.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "70e35d7994207a6ef6575db2a3bfb2ffc3fbc9abbe4e037ea2718da576a11468",
- "format": 1
- },
- {
- "name": "tests/integration/targets/git_config/tasks/unset_no_value.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1cfd693a2fe6a99cc06e5fb9eaf6cf41dc5fdcb78f9a1d775584e1868b979dc2",
- "format": 1
- },
- {
- "name": "tests/integration/targets/git_config/tasks/unset_value.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "efd37305a713b60bb4da0dd8b714d26c061fb3a23a01b6fa188a73fd59e1425b",
- "format": 1
- },
- {
- "name": "tests/integration/targets/git_config/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/git_config/vars/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4fa6677756ae0c25fc29eb8ee5b7e8cd9cb31b303b6ce8bb053a96f57c1bd874",
- "format": 1
- },
- {
- "name": "tests/integration/targets/git_config/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a2ba48cd370e3f4846785bd8b11b3de27d24f9d47e94b17659aba32ed054bd38",
- "format": 1
- },
- {
- "name": "tests/integration/targets/github_issue",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/github_issue/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/github_issue/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "dc974ee4cab09ff69efa27672a00535d8dfb457584857c7ab320610d8d072bd9",
- "format": 1
- },
- {
- "name": "tests/integration/targets/github_issue/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/github_issue/vars/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6c3e159cbba9dc6d749a2b27f310a79e73dd79f8be491192c16864e709c12814",
- "format": 1
- },
- {
- "name": "tests/integration/targets/github_issue/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9c3727573ab082cf9663d6af87da5aba1a250004f96e94811cfe3e435de28f6f",
- "format": 1
- },
- {
- "name": "tests/integration/targets/gitlab_branch",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/gitlab_branch/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/gitlab_branch/defaults/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3c343d7bbf23d85e04b47eb5a166ae794148dd7a91ea21e9f4da540c0f4fd0cf",
- "format": 1
- },
- {
- "name": "tests/integration/targets/gitlab_branch/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/gitlab_branch/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "fcf47339825c9d5962b27534871fbfedb2b363fa2590d562a4d90c655d698db0",
- "format": 1
- },
- {
- "name": "tests/integration/targets/gitlab_branch/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "77f2ba9334ca23502cad6112f276bf4b73433d1d77f816af6860f85055374aba",
- "format": 1
- },
- {
- "name": "tests/integration/targets/gitlab_deploy_key",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/gitlab_deploy_key/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/gitlab_deploy_key/defaults/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c1e7460fc001a7794a63a5ff0df53ebf4bfbc01c07aa8dc5f4668a0981bcba64",
- "format": 1
- },
- {
- "name": "tests/integration/targets/gitlab_deploy_key/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/gitlab_deploy_key/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0542fd9f80a7fc8960372206b7c61b632f4592b2f7672d852d9aa8a88b9aa168",
- "format": 1
- },
- {
- "name": "tests/integration/targets/gitlab_deploy_key/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c8d60d4380e3eb4babd9d5866b3c3c7cc11cc441302eef5398edf8b12d2cf16a",
- "format": 1
- },
- {
- "name": "tests/integration/targets/gitlab_group",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/gitlab_group/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/gitlab_group/defaults/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4b4c1cfacea788e6b9ce76d8b4d1d3c0bacef30ba57e5af64624c72163c12509",
- "format": 1
- },
- {
- "name": "tests/integration/targets/gitlab_group/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/gitlab_group/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "644365c3bb994e4e94f0b184639d90e1d61e201f44f873aa69677cb0d81b2407",
- "format": 1
- },
- {
- "name": "tests/integration/targets/gitlab_group/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c8d60d4380e3eb4babd9d5866b3c3c7cc11cc441302eef5398edf8b12d2cf16a",
- "format": 1
- },
- {
- "name": "tests/integration/targets/gitlab_group_members",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/gitlab_group_members/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/gitlab_group_members/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c1cf2551973b392d2e4561a3863809c6e905e898e594486edfbde0ddb0ef2ad9",
- "format": 1
- },
- {
- "name": "tests/integration/targets/gitlab_group_members/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/gitlab_group_members/vars/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "2c3a1b5d2012b8506e5943d3ffc37dcce4396e8a3d605c9a2c3817ec6b18fc61",
- "format": 1
- },
- {
- "name": "tests/integration/targets/gitlab_group_members/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "60945d49535300be8e42108658dba31fcd5d665fc40d6f186798e7e0682320ae",
- "format": 1
- },
- {
- "name": "tests/integration/targets/gitlab_group_variable",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/gitlab_group_variable/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/gitlab_group_variable/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "fa1ab4cf78f7a90c216925686d753f7eaffb53e3882357404f80eec20d5e6d5b",
- "format": 1
- },
- {
- "name": "tests/integration/targets/gitlab_group_variable/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f13674351f8231c9e60fc923652b1ce94b62357ee00fedaf23ad3af677aff656",
- "format": 1
- },
- {
- "name": "tests/integration/targets/gitlab_hook",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/gitlab_hook/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/gitlab_hook/defaults/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8ed33d526fae3cdff057b6022f77a7c4d32b7498112efaa0cb7641e69bec96e0",
- "format": 1
- },
- {
- "name": "tests/integration/targets/gitlab_hook/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/gitlab_hook/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0c4931f013923286bfbebcf960b7e40393eebe4f174bf06dcac98844b38e69f6",
- "format": 1
- },
- {
- "name": "tests/integration/targets/gitlab_hook/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c8d60d4380e3eb4babd9d5866b3c3c7cc11cc441302eef5398edf8b12d2cf16a",
- "format": 1
- },
- {
- "name": "tests/integration/targets/gitlab_project",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/gitlab_project/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/gitlab_project/defaults/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "bdca29ef497b21e9bfbb51f911df8c1cc13a3010965f5349d4cc358b1688aff1",
- "format": 1
- },
- {
- "name": "tests/integration/targets/gitlab_project/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/gitlab_project/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1f903f10210e52835f15d7e619c0bf59f5ab95ad2b220e11838c2c7b50db00d6",
- "format": 1
- },
- {
- "name": "tests/integration/targets/gitlab_project/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c8d60d4380e3eb4babd9d5866b3c3c7cc11cc441302eef5398edf8b12d2cf16a",
- "format": 1
- },
- {
- "name": "tests/integration/targets/gitlab_project_members",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/gitlab_project_members/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/gitlab_project_members/defaults/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a2bb1c92d8298e5cdeb1a98cec0fbff5b7da4d8897d34516e4b72e8f759f108e",
- "format": 1
- },
- {
- "name": "tests/integration/targets/gitlab_project_members/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/gitlab_project_members/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "420519b3c8476fc0a98089d738434f8f1909ae1e5ed2602b9bde86470455a12a",
- "format": 1
- },
- {
- "name": "tests/integration/targets/gitlab_project_members/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "60945d49535300be8e42108658dba31fcd5d665fc40d6f186798e7e0682320ae",
- "format": 1
- },
- {
- "name": "tests/integration/targets/gitlab_project_variable",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/gitlab_project_variable/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/gitlab_project_variable/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "92e7e6fa801a0ab0a48db25e740d0a227211ac0b982c68d68e9ba83fac6b307f",
- "format": 1
- },
- {
- "name": "tests/integration/targets/gitlab_project_variable/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f13674351f8231c9e60fc923652b1ce94b62357ee00fedaf23ad3af677aff656",
- "format": 1
- },
- {
- "name": "tests/integration/targets/gitlab_runner",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/gitlab_runner/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/gitlab_runner/defaults/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "2c7e14b41d8d4ada634180cd2ce6e5ac801c719b1742762fa79cc9287215e020",
- "format": 1
- },
- {
- "name": "tests/integration/targets/gitlab_runner/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/gitlab_runner/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c4b0f7fb12d1c5a1dd43495ae9bb60911ef077c599d667af2406b4bfe305e4cc",
- "format": 1
- },
- {
- "name": "tests/integration/targets/gitlab_runner/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c8d60d4380e3eb4babd9d5866b3c3c7cc11cc441302eef5398edf8b12d2cf16a",
- "format": 1
- },
- {
- "name": "tests/integration/targets/gitlab_user",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/gitlab_user/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/gitlab_user/defaults/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a67c504937b390cfd08e7428deeacf53b2fbcfbada9cc952723bc36ab4451a27",
- "format": 1
- },
- {
- "name": "tests/integration/targets/gitlab_user/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/gitlab_user/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0f2ca94e7f32306a0640dc76882d5ea6cb6517fb28fa582ab5160591ef8150dd",
- "format": 1
- },
- {
- "name": "tests/integration/targets/gitlab_user/tasks/sshkey.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "2569ea4605ff02865a17ea59758a3f1326b4675915fccaedc504535cef986965",
- "format": 1
- },
- {
- "name": "tests/integration/targets/gitlab_user/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c8d60d4380e3eb4babd9d5866b3c3c7cc11cc441302eef5398edf8b12d2cf16a",
- "format": 1
- },
- {
- "name": "tests/integration/targets/hg",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/hg/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/hg/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "769fd1fe7bec847f2f097de2fc87ddc3062a542f24ea15a783fcea9e18b038f7",
- "format": 1
- },
- {
- "name": "tests/integration/targets/hg/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/hg/tasks/install.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "084fe24287e02f0091df7cb1e00a4ab15a8ef021fc39e3e52dada5a990815991",
- "format": 1
- },
- {
- "name": "tests/integration/targets/hg/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9bcb9f54c987dff4eb2cf6334ab977cc8c6e0d93083ed06d1e9e684375a95ee5",
- "format": 1
- },
- {
- "name": "tests/integration/targets/hg/tasks/run-tests.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3c1e0433e03ca5fcd553ec8009f5df158e147853ca962f83428a0c1000e4d087",
- "format": 1
- },
- {
- "name": "tests/integration/targets/hg/tasks/uninstall.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b94a8d10092fc2781587cbd2ae5bffa1909832af5780852b03be4124fd393baf",
- "format": 1
- },
- {
- "name": "tests/integration/targets/hg/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7a0d5b9fbb9f7be73ffd1f589ddf8b702d470b7539576afc31855cad91860a08",
- "format": 1
- },
- {
- "name": "tests/integration/targets/homebrew",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/homebrew/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/homebrew/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f4fa733d9f1ccf34b15ecf01294b5b3195d76b8b78b5f0414580160e2109b02c",
- "format": 1
- },
- {
- "name": "tests/integration/targets/homebrew/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9aa01df43b8368ff8ff2f27edfd594993bdcdacf76eb59889807346b97422b0e",
- "format": 1
- },
- {
- "name": "tests/integration/targets/homebrew_cask",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/homebrew_cask/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/homebrew_cask/defaults/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1ab7a687bd7881466d582bfb1345cef7b657ca8fc5972c048211de3bcfb4880f",
- "format": 1
- },
- {
- "name": "tests/integration/targets/homebrew_cask/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/homebrew_cask/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "29afac27fd3f252b502b4b9ce4aba286fd2c516e87a9c18ea8ba9d5995d4349f",
- "format": 1
- },
- {
- "name": "tests/integration/targets/homebrew_cask/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9aa01df43b8368ff8ff2f27edfd594993bdcdacf76eb59889807346b97422b0e",
- "format": 1
- },
- {
- "name": "tests/integration/targets/homectl",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/homectl/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/homectl/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "23ad96b79be8312061f1981311a122382fe4703760d3e27c26bef941fa3b64d9",
- "format": 1
- },
- {
- "name": "tests/integration/targets/homectl/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ebd3c7b8ac2c2ea2e54b29a558aaa75fee29d69bdc957e1c8da6982527017cfc",
- "format": 1
- },
- {
- "name": "tests/integration/targets/hwc_ecs_instance",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/hwc_ecs_instance/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/hwc_ecs_instance/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "de5cf8bd1d385eefa5fe8257ffdb286d58332a7f1a2a1604536c8721d5536a4e",
- "format": 1
- },
- {
- "name": "tests/integration/targets/hwc_ecs_instance/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f13674351f8231c9e60fc923652b1ce94b62357ee00fedaf23ad3af677aff656",
- "format": 1
- },
- {
- "name": "tests/integration/targets/hwc_evs_disk",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/hwc_evs_disk/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/hwc_evs_disk/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a09fbc9b56804ffc0541a1b7fe76b68968335bfd7abc8225881d3bbd726b291d",
- "format": 1
- },
- {
- "name": "tests/integration/targets/hwc_evs_disk/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f13674351f8231c9e60fc923652b1ce94b62357ee00fedaf23ad3af677aff656",
- "format": 1
- },
- {
- "name": "tests/integration/targets/hwc_network_vpc",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/hwc_network_vpc/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/hwc_network_vpc/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1509b38a85cee8bdfe771c6d76fe47f4f137a6c9222a19eeeacf1371a5e26467",
- "format": 1
- },
- {
- "name": "tests/integration/targets/hwc_network_vpc/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f13674351f8231c9e60fc923652b1ce94b62357ee00fedaf23ad3af677aff656",
- "format": 1
- },
- {
- "name": "tests/integration/targets/hwc_smn_topic",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/hwc_smn_topic/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/hwc_smn_topic/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "54bf4ac5512282d30aaa5850e3fcb74659f5a692a71c0172969d45451bb10177",
- "format": 1
- },
- {
- "name": "tests/integration/targets/hwc_smn_topic/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f13674351f8231c9e60fc923652b1ce94b62357ee00fedaf23ad3af677aff656",
- "format": 1
- },
- {
- "name": "tests/integration/targets/hwc_vpc_eip",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/hwc_vpc_eip/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/hwc_vpc_eip/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b5699c9dd705dad64044145400ad39335bd81b444095dd999fe7f58c6339e7ec",
- "format": 1
- },
- {
- "name": "tests/integration/targets/hwc_vpc_eip/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f13674351f8231c9e60fc923652b1ce94b62357ee00fedaf23ad3af677aff656",
- "format": 1
- },
- {
- "name": "tests/integration/targets/hwc_vpc_peering_connect",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/hwc_vpc_peering_connect/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/hwc_vpc_peering_connect/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e261ffb4bbc2eadceb1ea8da7920b6f76a7961aec22eae08c139eb165cce1e96",
- "format": 1
- },
- {
- "name": "tests/integration/targets/hwc_vpc_peering_connect/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f13674351f8231c9e60fc923652b1ce94b62357ee00fedaf23ad3af677aff656",
- "format": 1
- },
- {
- "name": "tests/integration/targets/hwc_vpc_port",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/hwc_vpc_port/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/hwc_vpc_port/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "75820ad044e6554a2cc7f5fee0acf721cd4f9d9134741ed228512190ad27b760",
- "format": 1
- },
- {
- "name": "tests/integration/targets/hwc_vpc_port/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f13674351f8231c9e60fc923652b1ce94b62357ee00fedaf23ad3af677aff656",
- "format": 1
- },
- {
- "name": "tests/integration/targets/hwc_vpc_private_ip",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/hwc_vpc_private_ip/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/hwc_vpc_private_ip/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "92f41d7c43c5ca3572571fe48a3863d87cfcd3b816a698628ebcec600449b773",
- "format": 1
- },
- {
- "name": "tests/integration/targets/hwc_vpc_private_ip/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f13674351f8231c9e60fc923652b1ce94b62357ee00fedaf23ad3af677aff656",
- "format": 1
- },
- {
- "name": "tests/integration/targets/hwc_vpc_route",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/hwc_vpc_route/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/hwc_vpc_route/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5c9bd96aa2b256b53990d2dc16cdc80e1ce4481c2ca5f47937877c716b514497",
- "format": 1
- },
- {
- "name": "tests/integration/targets/hwc_vpc_route/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f13674351f8231c9e60fc923652b1ce94b62357ee00fedaf23ad3af677aff656",
- "format": 1
- },
- {
- "name": "tests/integration/targets/hwc_vpc_security_group",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/hwc_vpc_security_group/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/hwc_vpc_security_group/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3262d580c2976d2486690d580e450acdbfbaa461c31ca8910e8e236de3c41db3",
- "format": 1
- },
- {
- "name": "tests/integration/targets/hwc_vpc_security_group/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f13674351f8231c9e60fc923652b1ce94b62357ee00fedaf23ad3af677aff656",
- "format": 1
- },
- {
- "name": "tests/integration/targets/hwc_vpc_security_group_rule",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/hwc_vpc_security_group_rule/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/hwc_vpc_security_group_rule/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "cbf8477e586f8bb57278d7d7e64f132d814916a864f50b3e8dd229125ddc8ede",
- "format": 1
- },
- {
- "name": "tests/integration/targets/hwc_vpc_security_group_rule/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f13674351f8231c9e60fc923652b1ce94b62357ee00fedaf23ad3af677aff656",
- "format": 1
- },
- {
- "name": "tests/integration/targets/hwc_vpc_subnet",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/hwc_vpc_subnet/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/hwc_vpc_subnet/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "cd1ea2b20a87c7d012cd2c523c12c3cbbc59e85ff6a8cad5b17ad1f0069e28a5",
- "format": 1
- },
- {
- "name": "tests/integration/targets/hwc_vpc_subnet/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f13674351f8231c9e60fc923652b1ce94b62357ee00fedaf23ad3af677aff656",
- "format": 1
- },
- {
- "name": "tests/integration/targets/ilo_redfish_config",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/ilo_redfish_config/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/ilo_redfish_config/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "537fd2fdc275af6ba5b5aa64dafa7adf8679efbcf8f1d6c4563d8e5b4b8c8c0c",
- "format": 1
- },
- {
- "name": "tests/integration/targets/ilo_redfish_config/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f13674351f8231c9e60fc923652b1ce94b62357ee00fedaf23ad3af677aff656",
- "format": 1
- },
- {
- "name": "tests/integration/targets/ilo_redfish_info",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/ilo_redfish_info/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/ilo_redfish_info/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a851c988a2b856c560749ea717a9f38fb2501392ca984038054ee4114a7ae437",
- "format": 1
- },
- {
- "name": "tests/integration/targets/ilo_redfish_info/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f13674351f8231c9e60fc923652b1ce94b62357ee00fedaf23ad3af677aff656",
- "format": 1
- },
- {
- "name": "tests/integration/targets/influxdb_user",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/influxdb_user/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/influxdb_user/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "158ad00ea1445a97e1c5b6b0e12365134587a090b899a5b01bd76beec2dd8e22",
- "format": 1
- },
- {
- "name": "tests/integration/targets/influxdb_user/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/influxdb_user/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9e631aa1aca12eb260bbb34bd7f771b07a965d504095be4569a148f400ec2328",
- "format": 1
- },
- {
- "name": "tests/integration/targets/influxdb_user/tasks/tests.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a185251f0cf4666d4fe3e2ae59c3da7366fb995b786274d4408f65f149c85272",
- "format": 1
- },
- {
- "name": "tests/integration/targets/influxdb_user/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5dc95ef850dbe4e8cdbdcf90c74b390fcf1ad12cf3b15de1b037c5a06194fb28",
- "format": 1
- },
- {
- "name": "tests/integration/targets/ini_file",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/ini_file/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/ini_file/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1e8d632f9db7209967c5b2f6d734bede09841acc7b898dafc19f31c72cee9929",
- "format": 1
- },
- {
- "name": "tests/integration/targets/ini_file/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/ini_file/tasks/tests",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/ini_file/tasks/tests/00-basic.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d02f0c61deaa8879f18cb4b7b077be3ad6979356969d073b368e77a51022b5d6",
- "format": 1
- },
- {
- "name": "tests/integration/targets/ini_file/tasks/tests/01-value.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "976c5d3b0a55c5801f8bf52065dcbfb09e2a64cae0e5bde38bee69056aaccdf2",
- "format": 1
- },
- {
- "name": "tests/integration/targets/ini_file/tasks/tests/02-values.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "acb18d72ee309d2fa719977a7b1580c7a6e2095dbf666af5db99d8a6dadeb425",
- "format": 1
- },
- {
- "name": "tests/integration/targets/ini_file/tasks/tests/03-encoding.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1da025b5496ab1abf9bdc41b92f6ee43aff0700ab392733daa4bade024c9a668",
- "format": 1
- },
- {
- "name": "tests/integration/targets/ini_file/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "faf98414a89b98ff97eaea421694f7fe9e936d33dcc72812b7b8ea36a1c04472",
- "format": 1
- },
- {
- "name": "tests/integration/targets/ini_file/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e5a8866bb35edfbfde89676bc1698a7dd27c89530da249de619169892e7482d1",
- "format": 1
- },
- {
- "name": "tests/integration/targets/interfaces_file",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/interfaces_file/files",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/interfaces_file/files/interfaces_ff",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a1e8604733a58feb25d85d2693040ddfce90486d641ab06fa289d45a27ce090d",
- "format": 1
- },
- {
- "name": "tests/integration/targets/interfaces_file/files/interfaces_ff_3841",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "12b375a22f718f2244edbc059c736d141f7944d989fb709159578cc65891506e",
- "format": 1
- },
- {
- "name": "tests/integration/targets/interfaces_file/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/interfaces_file/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1e8d632f9db7209967c5b2f6d734bede09841acc7b898dafc19f31c72cee9929",
- "format": 1
- },
- {
- "name": "tests/integration/targets/interfaces_file/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/interfaces_file/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8f57f67f3df8bafd2118721ad3a5ecc85c28ba9c69abe39a955b7de8f6525b20",
- "format": 1
- },
- {
- "name": "tests/integration/targets/interfaces_file/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e5a8866bb35edfbfde89676bc1698a7dd27c89530da249de619169892e7482d1",
- "format": 1
- },
- {
- "name": "tests/integration/targets/ipify_facts",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/ipify_facts/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/ipify_facts/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "222d6440e89b804afdb94b10bf7407c0aaddcb63d68658efdffbf48e868723ad",
- "format": 1
- },
- {
- "name": "tests/integration/targets/ipify_facts/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/ipify_facts/vars/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ebe010ed6441ca00f76d559f7645469f089a40f79cdc3fb0af3511dd0e03222c",
- "format": 1
- },
- {
- "name": "tests/integration/targets/ipify_facts/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e5a8866bb35edfbfde89676bc1698a7dd27c89530da249de619169892e7482d1",
- "format": 1
- },
- {
- "name": "tests/integration/targets/iptables_state",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/iptables_state/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/iptables_state/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ae816995bad20b4c09762e88d447ddf2793e1df8f6d61c0fdc03a7432a06e75a",
- "format": 1
- },
- {
- "name": "tests/integration/targets/iptables_state/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/iptables_state/tasks/tests",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/iptables_state/tasks/tests/00-basic.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "2cfe87661c422f8bec04b806d6e906353dc259d55e6a2b6a18c278a0d3e0d90d",
- "format": 1
- },
- {
- "name": "tests/integration/targets/iptables_state/tasks/tests/01-tables.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "cedfd25751938db5690276e1832ab4f0f4eb88d92a7454fb9e0d9d2dda11b3d8",
- "format": 1
- },
- {
- "name": "tests/integration/targets/iptables_state/tasks/tests/10-rollback.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0f5f17a3117bb19cf75aa1969b637271124d44e50a776c737e5b718fea131738",
- "format": 1
- },
- {
- "name": "tests/integration/targets/iptables_state/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3688529100e39015e96632ce94deca3f35fa32c6dc96e4cecee76484b0e7ea2a",
- "format": 1
- },
- {
- "name": "tests/integration/targets/iptables_state/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "519a3d522001e03d5b1a6676ae34217f74d21b0c07c64ed8d5d0a7ef5226d8ca",
- "format": 1
- },
- {
- "name": "tests/integration/targets/ipwcli_dns",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/ipwcli_dns/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/ipwcli_dns/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "dc9f592affb4dffd770f50930869e51394e7907cf5352f17a48491153cedbbf0",
- "format": 1
- },
- {
- "name": "tests/integration/targets/ipwcli_dns/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "536a08c5b55f0ea5d5b58de3d90806e1341df3c8c1c568dc2494be42afb1e73f",
- "format": 1
- },
- {
- "name": "tests/integration/targets/iso_create",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/iso_create/files",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/iso_create/files/test_dir",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/iso_create/files/test_dir/test2.cfg",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "52e199689c2481bec1de30c69fe948823b576f222a75360cc7ef7da65578262a",
- "format": 1
- },
- {
- "name": "tests/integration/targets/iso_create/files/test1.cfg",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "52e199689c2481bec1de30c69fe948823b576f222a75360cc7ef7da65578262a",
- "format": 1
- },
- {
- "name": "tests/integration/targets/iso_create/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/iso_create/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "769fd1fe7bec847f2f097de2fc87ddc3062a542f24ea15a783fcea9e18b038f7",
- "format": 1
- },
- {
- "name": "tests/integration/targets/iso_create/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/iso_create/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "fa6468f53938b73f79347dc9b29706a1321ac1247e813925e0c95a05d9ae1c5b",
- "format": 1
- },
- {
- "name": "tests/integration/targets/iso_create/tasks/prepare_dest_dir.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "df4f88a131096ff24fdf30a18c7d6f11d5cd020d2064e94fe8da9b1e6b1c9068",
- "format": 1
- },
- {
- "name": "tests/integration/targets/iso_create/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f4de83e34024a07b9684aab1f4a6217aa205a7737493fafe42f6ff40141eec04",
- "format": 1
- },
- {
- "name": "tests/integration/targets/iso_extract",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/iso_extract/files",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/iso_extract/files/test.iso",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9c5a1719603516790e3a007e17f28e28fca7eb5ec8d6205692e906d4239fe068",
- "format": 1
- },
- {
- "name": "tests/integration/targets/iso_extract/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/iso_extract/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "eb510400875f8efd1df6eb1c488c7f645f120f0e618e2169920c33492102b429",
- "format": 1
- },
- {
- "name": "tests/integration/targets/iso_extract/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/iso_extract/tasks/7zip.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "975b4509c955dc0f879211315f91331e092fa2a713dcfdcf2256454d1dfbbaac",
- "format": 1
- },
- {
- "name": "tests/integration/targets/iso_extract/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "61349cf9d51fd68d556001198120758755862b0758e57fc4201594731c7711ec",
- "format": 1
- },
- {
- "name": "tests/integration/targets/iso_extract/tasks/prepare.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e886917ac7434f5740c2cc2b61107b7c40b32934959ba594ef47549629550034",
- "format": 1
- },
- {
- "name": "tests/integration/targets/iso_extract/tasks/tests.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3bce6efd1142cf1138d0a8c7ab3e1ef038e15471dfa9be2738451c5a0e50e4cd",
- "format": 1
- },
- {
- "name": "tests/integration/targets/iso_extract/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/iso_extract/vars/Alpine.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6a7cb2dcfc68f900b0a830283aa6117f5390a63f7361f0de0aac5d2bb5b5e96b",
- "format": 1
- },
- {
- "name": "tests/integration/targets/iso_extract/vars/Archlinux.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6a7cb2dcfc68f900b0a830283aa6117f5390a63f7361f0de0aac5d2bb5b5e96b",
- "format": 1
- },
- {
- "name": "tests/integration/targets/iso_extract/vars/Debian.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "26272cf027dc30fcd95df70e0b3aa3656228b0a3285e48aae289bb649dc4dc23",
- "format": 1
- },
- {
- "name": "tests/integration/targets/iso_extract/vars/FreeBSD.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6a7cb2dcfc68f900b0a830283aa6117f5390a63f7361f0de0aac5d2bb5b5e96b",
- "format": 1
- },
- {
- "name": "tests/integration/targets/iso_extract/vars/RedHat.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b294d90c2772adc4f7a238290888129b12021d8b6d23b97589f81e72befac2a1",
- "format": 1
- },
- {
- "name": "tests/integration/targets/iso_extract/vars/Suse.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ff54674472190ab490cccbe6e71e3226bc5640b41776bc8453592e587da5cd13",
- "format": 1
- },
- {
- "name": "tests/integration/targets/iso_extract/vars/Ubuntu.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "26272cf027dc30fcd95df70e0b3aa3656228b0a3285e48aae289bb649dc4dc23",
- "format": 1
- },
- {
- "name": "tests/integration/targets/iso_extract/vars/default.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/integration/targets/iso_extract/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8e9c8f4f328cd81650a403cdfbe34be2f0460da90e460a962943ed635633bc05",
- "format": 1
- },
- {
- "name": "tests/integration/targets/java_cert",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/java_cert/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/java_cert/defaults/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c8fef72781f995752ac2345d27dfebb882a0f4bfa096d9a3308c8b0f7e7381ec",
- "format": 1
- },
- {
- "name": "tests/integration/targets/java_cert/files",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/java_cert/files/setupSSLServer.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e073d13bc2dbee83b1b6265d4ac781fce844c668b8249948924481629f9853d1",
- "format": 1
- },
- {
- "name": "tests/integration/targets/java_cert/files/testpkcs.p12",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "194ae4f77eeaf175ebefa471eced93551d2b9f0a0018e9bfd0a24cb0acc380da",
- "format": 1
- },
- {
- "name": "tests/integration/targets/java_cert/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/java_cert/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "efaf82279408067868c1523dc741ac54e4e178cdea5c72e568323b9fef478a6c",
- "format": 1
- },
- {
- "name": "tests/integration/targets/java_cert/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/java_cert/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ef674f384bef0618944aaac1aadd8c8974a87970b14dcfa0c4cd61188eb60cd7",
- "format": 1
- },
- {
- "name": "tests/integration/targets/java_cert/tasks/state_change.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "662609b6a22f94aa6aebe1673f99b3ec0183bbed70cd5173ab711d0d866d040f",
- "format": 1
- },
- {
- "name": "tests/integration/targets/java_cert/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f15869163e4716570ba32825c2afa82d1a0553a245b6c483d0e852ec8a5ee826",
- "format": 1
- },
- {
- "name": "tests/integration/targets/java_keystore",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/java_keystore/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/java_keystore/defaults/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d8d889797b46d3474b92f0f707eb37ded692bb9a10c5ae96d0f6eafc999917a8",
- "format": 1
- },
- {
- "name": "tests/integration/targets/java_keystore/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/java_keystore/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "efaf82279408067868c1523dc741ac54e4e178cdea5c72e568323b9fef478a6c",
- "format": 1
- },
- {
- "name": "tests/integration/targets/java_keystore/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/java_keystore/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a20ff593bac026b2e925c07b9b9bcfb7e3da90a575b1940fcf185789d3683d3c",
- "format": 1
- },
- {
- "name": "tests/integration/targets/java_keystore/tasks/prepare.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d39bc52885014bc3f14a04fd39fc15feaa1950cc0d9b1d5bbd3982b65a05ea03",
- "format": 1
- },
- {
- "name": "tests/integration/targets/java_keystore/tasks/tests.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6f8e5cb58596bb5a36ca750744922904b610a6bf6ce2a9f1508b8022da686a0e",
- "format": 1
- },
- {
- "name": "tests/integration/targets/java_keystore/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f15869163e4716570ba32825c2afa82d1a0553a245b6c483d0e852ec8a5ee826",
- "format": 1
- },
- {
- "name": "tests/integration/targets/jboss",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/jboss/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/jboss/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "653439bf464124f10cbf0e09beba4e37fdbb99fc3ac82ffbdb48d8c5a6f23874",
- "format": 1
- },
- {
- "name": "tests/integration/targets/jboss/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/jboss/tasks/jboss.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8873ae18f2441425cbe73266fcbfe9ff8580a81e5e3c98245f736b92c7e3b79e",
- "format": 1
- },
- {
- "name": "tests/integration/targets/jboss/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f186d67ed2a7b23c533cdae8cea758cb8ffa2dbd5858b64ea4be861d1dfd922a",
- "format": 1
- },
- {
- "name": "tests/integration/targets/jboss/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4eb67a6465d730c165121c76bcb6825d72c1894f6ba3a3c797d6a11b8b4e687c",
- "format": 1
- },
- {
- "name": "tests/integration/targets/jira",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/jira/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/jira/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "461146e6c4e20c2980f2a774e26f8d7aaaf29c36f696dec6c4ab9de528465954",
- "format": 1
- },
- {
- "name": "tests/integration/targets/jira/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/jira/vars/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d7f9490ec266ebf584e612c75f7ed8ea51ec1393428f1fa8f8bffe9e909e3755",
- "format": 1
- },
- {
- "name": "tests/integration/targets/jira/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "44808479bfd6a34a9ae1631f02d8805a5fbdc06a9a1ed80146de1bf1d2607cac",
- "format": 1
- },
- {
- "name": "tests/integration/targets/kernel_blacklist",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/kernel_blacklist/files",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/kernel_blacklist/files/blacklist",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ee4d3ecffc31f6d6f12f30e3934e36ab7f523d920e428b31c23d7c8509e2f2c7",
- "format": 1
- },
- {
- "name": "tests/integration/targets/kernel_blacklist/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/kernel_blacklist/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1e8d632f9db7209967c5b2f6d734bede09841acc7b898dafc19f31c72cee9929",
- "format": 1
- },
- {
- "name": "tests/integration/targets/kernel_blacklist/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/kernel_blacklist/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "00ecb0d28591430779770a2736ba6a9f93b636ec7acb606abf15cd8c99ebc690",
- "format": 1
- },
- {
- "name": "tests/integration/targets/kernel_blacklist/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "811322fad85866ac31f9501f8da0c7072809065397d53c06712447946b746b6b",
- "format": 1
- },
- {
- "name": "tests/integration/targets/keycloak_client",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/keycloak_client/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/keycloak_client/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8783e53bdddaad295467048e483edecd1176bb2127b98c09ba8b45a309b13368",
- "format": 1
- },
- {
- "name": "tests/integration/targets/keycloak_client/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/keycloak_client/vars/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4f761bb393b96d75911e13b573f5cb4a7c131df8f4737c734a14766bd59c631e",
- "format": 1
- },
- {
- "name": "tests/integration/targets/keycloak_client/README.md",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "290c1e7d6698f4a13ddb9112a9e01e68458bb06e5ce25e8cb892a9720bf96b64",
- "format": 1
- },
- {
- "name": "tests/integration/targets/keycloak_client/docker-compose.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "875f6faa01cccd6194195ad2b11ee47cc83592408856ff402b7e4f84fb661a4b",
- "format": 1
- },
- {
- "name": "tests/integration/targets/keycloak_identity_provider",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/keycloak_identity_provider/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/keycloak_identity_provider/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "479980d4962c6bb999c685961ec9cfb599e11c63bd04d11cb68086e303a9f73e",
- "format": 1
- },
- {
- "name": "tests/integration/targets/keycloak_identity_provider/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/keycloak_identity_provider/vars/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1eccd2eefe0879171c7c4d21953f58c2d42254373390b99fc1ac633e02d56c49",
- "format": 1
- },
- {
- "name": "tests/integration/targets/keycloak_identity_provider/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f13674351f8231c9e60fc923652b1ce94b62357ee00fedaf23ad3af677aff656",
- "format": 1
- },
- {
- "name": "tests/integration/targets/keycloak_role",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/keycloak_role/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/keycloak_role/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "80f441cdc22c348f50e8ffea42b0aafe895de50b767d25a99832fe11d809a0b0",
- "format": 1
- },
- {
- "name": "tests/integration/targets/keycloak_role/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/keycloak_role/vars/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a95051d0d8259f0ae32235ed669d7bb89db37e49a0bd3163baac7ebacaa38f29",
- "format": 1
- },
- {
- "name": "tests/integration/targets/keycloak_role/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f13674351f8231c9e60fc923652b1ce94b62357ee00fedaf23ad3af677aff656",
- "format": 1
- },
- {
- "name": "tests/integration/targets/keycloak_user_federation",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/keycloak_user_federation/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/keycloak_user_federation/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d48160447515c3c470044be9eb97c2447e5dfe308301065344c17edfcf89eada",
- "format": 1
- },
- {
- "name": "tests/integration/targets/keycloak_user_federation/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/keycloak_user_federation/vars/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "684833062e98a096c4980af186b03f7ed9991c5e03e3d759f7434575b1d7597a",
- "format": 1
- },
- {
- "name": "tests/integration/targets/keycloak_user_federation/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f13674351f8231c9e60fc923652b1ce94b62357ee00fedaf23ad3af677aff656",
- "format": 1
- },
- {
- "name": "tests/integration/targets/launchd",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/launchd/files",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/launchd/files/ansible_test_service.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d10b0fcb1c6ec6b0e5250821e1ee5a2782e34cad5327b37660eb457400b455bb",
- "format": 1
- },
- {
- "name": "tests/integration/targets/launchd/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/launchd/tasks/tests",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/launchd/tasks/tests/test_reload.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b20f13d5ee7a2aa15015a8155b3529d0b1a2cebb1d49dd5d7465bb37874c4188",
- "format": 1
- },
- {
- "name": "tests/integration/targets/launchd/tasks/tests/test_restart.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0c1d201bbda5261054ea2665e5d2e6f31a61c07529a839a14958ad71d58a59d2",
- "format": 1
- },
- {
- "name": "tests/integration/targets/launchd/tasks/tests/test_runatload.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8299cde6cf41dfe095828bf838e72b4f96743f5fd6d4a06690f99b350f29fc61",
- "format": 1
- },
- {
- "name": "tests/integration/targets/launchd/tasks/tests/test_start_stop.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "523ce654be36680e551275021267b27a14956266b06feaad9755f2b0609971c9",
- "format": 1
- },
- {
- "name": "tests/integration/targets/launchd/tasks/tests/test_unknown.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "cd41fbcd89bce7da78fc8f6a1852e489a529c82531d06b6fefcc62ec58b55db7",
- "format": 1
- },
- {
- "name": "tests/integration/targets/launchd/tasks/tests/test_unload.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "98aff4473c41233d041f404c30be7db975f6b552acb22a9f2cfdee5c139f199e",
- "format": 1
- },
- {
- "name": "tests/integration/targets/launchd/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "785e14037edd887fc59a7cb2bf4d384dc6b67d54eb2a3683a133056b6efa3bd1",
- "format": 1
- },
- {
- "name": "tests/integration/targets/launchd/tasks/setup.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "df6d13c8c4fa9d93a8d958e577c8e92b7789ea31806835c15866047bddf1f82b",
- "format": 1
- },
- {
- "name": "tests/integration/targets/launchd/tasks/teardown.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d982202029c12820d4d29bd3d35d5568f7370d0d8fe292e81e2779da393c8af9",
- "format": 1
- },
- {
- "name": "tests/integration/targets/launchd/tasks/test.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "13c2574fda72b4a025e49f7eb748cd9242765bd62b9572d35da396a74705e05e",
- "format": 1
- },
- {
- "name": "tests/integration/targets/launchd/templates",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/launchd/templates/launchd.test.service.plist.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3b2aa7254c3c493666745f80868f5eed3ea63089ded01c1d45a97a84338c8585",
- "format": 1
- },
- {
- "name": "tests/integration/targets/launchd/templates/modified.launchd.test.service.plist.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ab2ed9efd43886160eb67c8a7d31b949c203616466c2a6762604ff2074292406",
- "format": 1
- },
- {
- "name": "tests/integration/targets/launchd/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/launchd/vars/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7568f2a55857df4d6b83e7be5dd251131e4e092f6be4e74e479a85280ff9a1ff",
- "format": 1
- },
- {
- "name": "tests/integration/targets/launchd/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "75ad1ca2f920de9b6df6fecc06134026eb2f9e59cd559c3cdb214e88c93f03d1",
- "format": 1
- },
- {
- "name": "tests/integration/targets/ldap_search",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/ldap_search/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/ldap_search/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d5d4da7d75a6e80bc78241b575d83e252dcbe32f9fd3770e05d808d896dd6f31",
- "format": 1
- },
- {
- "name": "tests/integration/targets/ldap_search/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/ldap_search/tasks/tests",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/ldap_search/tasks/tests/basic.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6b3ddab041bd891f70f2d3d8c83c7480aab5fc5654165f2e3cb4a96bb2e1a5e9",
- "format": 1
- },
- {
- "name": "tests/integration/targets/ldap_search/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "aa78306d233b0651c0e88c40ebc4974b38f6ff3aec34f344413a4db4ee3e785b",
- "format": 1
- },
- {
- "name": "tests/integration/targets/ldap_search/tasks/run-test.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/integration/targets/ldap_search/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d8f4c57a73a1bb071fa3c931f0cddbb6b8dd7ce48e4e1afcfbab2a6d1e915482",
- "format": 1
- },
- {
- "name": "tests/integration/targets/listen_ports_facts",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/listen_ports_facts/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/listen_ports_facts/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c2af6405d42a5abd438f4a256a6a5c19b8fb5eb5559209c0cbabd150af911837",
- "format": 1
- },
- {
- "name": "tests/integration/targets/listen_ports_facts/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/listen_ports_facts/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d8c8b38f02715e310477cd9002218a7e69f2ed4ce3061f6312386d799fa0eb9d",
- "format": 1
- },
- {
- "name": "tests/integration/targets/listen_ports_facts/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "59fc7c3959a7cbc9ff73abdf90f8fa853c52ff56602f548faacdda0b5bc3d485",
- "format": 1
- },
- {
- "name": "tests/integration/targets/locale_gen",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/locale_gen/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/locale_gen/tasks/locale_gen.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7ee978f272037425567d6e2051c8ec5a0f263d25a81bfb4ced417aeef52a4147",
- "format": 1
- },
- {
- "name": "tests/integration/targets/locale_gen/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ee234de7e912e0b7b6aa49643af769675600152350ae20862abe8d38d62f5976",
- "format": 1
- },
- {
- "name": "tests/integration/targets/locale_gen/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c6fe5ba544a3a858f7921ead134971c6b094f597106f6c621ea21ab4608ba5f0",
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_cartesian",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_cartesian/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_cartesian/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "bb854c1b495c50987895fd8a267dffe50c4521cf73d6018df77d511c915930a6",
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_cartesian/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a448d01a35a1c112f7d370e8a29da2e5d960c967c9c80f2322450041aca81da7",
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_collection_version",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_collection_version/collections",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_collection_version/collections/ansible_collections",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll/plugins",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll/plugins/modules",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll/plugins/modules/collection_module.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c3f0114d080c409c58c8846be8da7b91137b38eaf2d24f72a4a61a303f925f4d",
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll/galaxy.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "69c0cb85c493f4a56758eb814a9c36104cf36c449a0e54d1a6b4b72bbda01ec1",
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll_mf",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll_mf/plugins",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll_mf/plugins/modules",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll_mf/plugins/modules/collection_module.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c3f0114d080c409c58c8846be8da7b91137b38eaf2d24f72a4a61a303f925f4d",
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll_mf/FILES.json",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "025818f18fcae5c9f78d778ae6e246ecffed6d56a886ffbc145cb66d54e9951e",
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll_mf/MANIFEST.json",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8692610cecf8948d2a62fcb3b22f56c0fdee4e2d9d8e69a8cd3571a01cb724dc",
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll_mf/README.md",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll_nothing",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll_nothing/plugins",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll_nothing/plugins/modules",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll_nothing/plugins/modules/collection_module.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c3f0114d080c409c58c8846be8da7b91137b38eaf2d24f72a4a61a303f925f4d",
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll_nv",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll_nv/plugins",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll_nv/plugins/modules",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll_nv/plugins/modules/collection_module.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c3f0114d080c409c58c8846be8da7b91137b38eaf2d24f72a4a61a303f925f4d",
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll_nv/galaxy.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9c3ae898ca973c0b55357648796561a08a11b914469d9b8bab4488bde9448b6f",
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_collection_version/library",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_collection_version/library/local_module.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "853a4708a3d35eec2ffe537982f938eb947da2faf1b405a4690b5b6a2ed5dc7c",
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_collection_version/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8a5657d4075cac91b7e2fc1ffaaa04d37136cc64e51c75c2d261147519bb12d6",
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_collection_version/runme.sh",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "737ebeb3884ca0395d497b4d357dfff97dcd1463a494b2fbb2e8dfaaf1b4251a",
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_collection_version/runme.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4f807dd9c15cbc744a767651439de60fb743e716433196f25db62259bee61f06",
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_dependent",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_dependent/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_dependent/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "807e9e08b8a7aa88d7062411b826bc8e6857f4b44a3792336d57cb0c793cb880",
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_dependent/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "695e4b8fa8a5286051c058d1844092e58bc1550040a7c82112912d2d2b870ac1",
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_etcd3",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_etcd3/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_etcd3/defaults/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "60c4ec43079a8d934d7f8c21cf902cbddf88d6d43432f45acf00a06804209ff5",
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_etcd3/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_etcd3/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ae816995bad20b4c09762e88d447ddf2793e1df8f6d61c0fdc03a7432a06e75a",
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_etcd3/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_etcd3/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4ecde298d2c5c606796c2d9d6786ac9dc0c81f99f59a501b45fcd037ea36e13d",
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_etcd3/tasks/tests.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a7e80348f1c6c2dde46f749d14d6098983cd5586973b52fddb99c4ff0494cc5b",
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_etcd3/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d8415b9a44e521d868b65251bb6810a29a0d3cd513751441e932fd84bf3b461b",
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_etcd3/dependencies.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f30e183aa0eb77ea5830ce9cc7166cc93b874663a4247caa67bff8144641490c",
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_etcd3/runme.sh",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0ef51b03fc4c317f790f7717b8134a950749cef70f871d5efe3ab2762e0ed15f",
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_etcd3/test_lookup_etcd3.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "2871df47541b86d0adb6a4444eb01df5ab1124c1dae75c3ec7d8d0513ea093ac",
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_flattened",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_flattened/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_flattened/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1374dbaac0fbbb8c550643883dc61e1346375901fe96cf98b1366d2301261384",
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_flattened/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "49ba587b44048f1af400f126684ddaae01111f2d3c8d7a3587b7387daf5b9f92",
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_lmdb_kv",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_lmdb_kv/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "991cb87e99f0e7f52f7416a83c33bb6eea974c5d884023b0b57c4f99bc70a37b",
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_lmdb_kv/dependencies.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "2b937926bdd526ef463973b166cde34c884fa779bd898b6f63e901ed7c0f62d5",
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_lmdb_kv/runme.sh",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d63607a90c412835ad16889e23ffb58bf589878980a7ff2ede8fe06dde514d73",
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_lmdb_kv/test.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a2a322dfe5df22a6407fcf5b91572ec5b232d97d8b4ba1a6af45bc4fd7baaa60",
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_lmdb_kv/test_db.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f340fcdcb004de2205af050b6077e89bb71f1ce08b972c4deafa7e45bc3809b2",
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_passwordstore",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_passwordstore/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_passwordstore/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ae816995bad20b4c09762e88d447ddf2793e1df8f6d61c0fdc03a7432a06e75a",
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_passwordstore/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_passwordstore/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b3735d01e792324eb746207de73f90bd1bd83ee3aeda65e6d733d270f86c5ffc",
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_passwordstore/tasks/package.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e4bdb24595075fcad9c9f2340e03bbcb9906cc12ef1429178df485b48623ac4a",
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_passwordstore/tasks/tests.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "46aa3ae28612cfc2d26e4e562b960ffc37fb7f8ae726aa3d345c93b4d065374e",
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_passwordstore/templates",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_passwordstore/templates/input",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9b5f9a20d102714792f1cc5d2eb6b87ae0379b2ce632d3ea1bd983b13a2d819f",
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_passwordstore/templates/security-privacy.repo.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "00b89accfb1e13ae32acd82783723e64793a7d4870461b042fecdf80d9dfab0c",
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_passwordstore/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_passwordstore/vars/Alpine.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a867b420471083523fa4867d401e2709b60258caf7ba1a5069049c809158277e",
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_passwordstore/vars/Archlinux.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a867b420471083523fa4867d401e2709b60258caf7ba1a5069049c809158277e",
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_passwordstore/vars/Debian.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a867b420471083523fa4867d401e2709b60258caf7ba1a5069049c809158277e",
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_passwordstore/vars/Fedora.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a867b420471083523fa4867d401e2709b60258caf7ba1a5069049c809158277e",
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_passwordstore/vars/FreeBSD.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f341735e70e95d1e7b23ea160010b148bef2e38f2e58daf9fbf1131318763fc6",
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_passwordstore/vars/default.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_passwordstore/vars/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "29b468e6a619569af108d581b1fc0099511cea7bfeacd2e771817abfcc17db83",
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_passwordstore/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "10316af7c1503e4b04d885baab5bc61ecf51c4b073a4c5b4078b8bf9772a1535",
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_random_pet",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_random_pet/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "49ba587b44048f1af400f126684ddaae01111f2d3c8d7a3587b7387daf5b9f92",
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_random_pet/dependencies.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6e867200e5243183bfc0d00303ed73c82179449721c3b536cc28cdbc451a51b0",
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_random_pet/runme.sh",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d63607a90c412835ad16889e23ffb58bf589878980a7ff2ede8fe06dde514d73",
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_random_pet/test.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a6ebdf42ae20e632330b2159cbaa2b0d6ec2731dd8dd62cf873ee18f83b119d0",
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_random_string",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_random_string/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "49ba587b44048f1af400f126684ddaae01111f2d3c8d7a3587b7387daf5b9f92",
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_random_string/runme.sh",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "de720cc8d59a01261c316a93c01f6a2906ab6d8e042d2487350b7c748aa2ff8a",
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_random_string/test.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "54f5c7b625741fe234715e821492938834905f6dde23d86aa922af79c09d966f",
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_random_words",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_random_words/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "49ba587b44048f1af400f126684ddaae01111f2d3c8d7a3587b7387daf5b9f92",
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_random_words/dependencies.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "2950a0d6312c16a08102c19a68801298007bdc538f84f36ce599b20c76eacb84",
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_random_words/runme.sh",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d63607a90c412835ad16889e23ffb58bf589878980a7ff2ede8fe06dde514d73",
- "format": 1
- },
- {
- "name": "tests/integration/targets/lookup_random_words/test.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "32511a0bfba3767e12ad08903c8a90ed51c9af627b3b020fba5e626bfd609f34",
- "format": 1
- },
- {
- "name": "tests/integration/targets/lvg",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/lvg/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/lvg/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "769fd1fe7bec847f2f097de2fc87ddc3062a542f24ea15a783fcea9e18b038f7",
- "format": 1
- },
- {
- "name": "tests/integration/targets/lvg/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/lvg/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "73c27a48b9413acda1ebeb7276920d77ed66258741e3f7c8b95424dda21bb5c7",
- "format": 1
- },
- {
- "name": "tests/integration/targets/lvg/tasks/setup.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "aa736d1ebed4a320d02af6397a292c44f243acb4738dba4798ff8ea8920b4de7",
- "format": 1
- },
- {
- "name": "tests/integration/targets/lvg/tasks/teardown.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f9cd523d7c8218d7b4f296ec486d2d933d4c3e9051d4b25e6f1251bf70462e71",
- "format": 1
- },
- {
- "name": "tests/integration/targets/lvg/tasks/test_grow_reduce.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "564cf97c44900d4e3c5a3148a900b4577927179cdf8732d6958cea9862c88ac6",
- "format": 1
- },
- {
- "name": "tests/integration/targets/lvg/tasks/test_indempotency.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "acda7d6e98f33b902a2f78d1389d57a6b268b0f1dd04b68e615603c358a6bab6",
- "format": 1
- },
- {
- "name": "tests/integration/targets/lvg/tasks/test_pvresize.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "10f5e2906836542bd73b121fcf6d93b69395e142f103c1abf754af5d5fcac44f",
- "format": 1
- },
- {
- "name": "tests/integration/targets/lvg/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9d74111e18f3d38965d19bda2998ddc9bfe25a34dbf5c26c2700c70c0f645453",
- "format": 1
- },
- {
- "name": "tests/integration/targets/mail",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/mail/files",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/mail/files/smtpserver.crt",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a55d9996272afcf7ba2064e9d7ca4c77617a405966cab5157da80566482431d0",
- "format": 1
- },
- {
- "name": "tests/integration/targets/mail/files/smtpserver.key",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0895aca337049f7faddb275b6e8a81ae0e8285fc9388f2d96e9b970a0c31541e",
- "format": 1
- },
- {
- "name": "tests/integration/targets/mail/files/smtpserver.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "07ce913cff3a6a186774bb3ac87b9d195f650141abdbc26e7bcb52777392fc9b",
- "format": 1
- },
- {
- "name": "tests/integration/targets/mail/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/mail/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1e8d632f9db7209967c5b2f6d734bede09841acc7b898dafc19f31c72cee9929",
- "format": 1
- },
- {
- "name": "tests/integration/targets/mail/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/mail/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "27c7a92b984df15bd6b134295d99c0c1615823f91bfa3b3106cf9d5aa9033aa5",
- "format": 1
- },
- {
- "name": "tests/integration/targets/mail/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "811322fad85866ac31f9501f8da0c7072809065397d53c06712447946b746b6b",
- "format": 1
- },
- {
- "name": "tests/integration/targets/mas",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/mas/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/mas/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1133fa36703e51b978dee3be6f6e7e291ea0e744208c39ae9088b5ddaac49d6b",
- "format": 1
- },
- {
- "name": "tests/integration/targets/mas/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "95b0a7b5f17cd3772fc4b68faeb81e6cb6b459013f4b5c91a2812d06afc31e2f",
- "format": 1
- },
- {
- "name": "tests/integration/targets/memset_dns_reload",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/memset_dns_reload/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/memset_dns_reload/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "cb3f91d54eee30e53e35b2b99905f70f169ed549fd78909d3dac2defc9ed8d3b",
- "format": 1
- },
- {
- "name": "tests/integration/targets/memset_dns_reload/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/memset_dns_reload/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "04d09b3332b47f6b7c88e028e61eb609d9619f2348f72e06822082cd472113df",
- "format": 1
- },
- {
- "name": "tests/integration/targets/memset_dns_reload/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f13674351f8231c9e60fc923652b1ce94b62357ee00fedaf23ad3af677aff656",
- "format": 1
- },
- {
- "name": "tests/integration/targets/memset_memstore_info",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/memset_memstore_info/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/memset_memstore_info/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "cb3f91d54eee30e53e35b2b99905f70f169ed549fd78909d3dac2defc9ed8d3b",
- "format": 1
- },
- {
- "name": "tests/integration/targets/memset_memstore_info/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/memset_memstore_info/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c6cc39e44674cdd97f45755136cc976294e1df35be4128970367159ceaaee3c8",
- "format": 1
- },
- {
- "name": "tests/integration/targets/memset_memstore_info/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f13674351f8231c9e60fc923652b1ce94b62357ee00fedaf23ad3af677aff656",
- "format": 1
- },
- {
- "name": "tests/integration/targets/memset_server_info",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/memset_server_info/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/memset_server_info/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "cb3f91d54eee30e53e35b2b99905f70f169ed549fd78909d3dac2defc9ed8d3b",
- "format": 1
- },
- {
- "name": "tests/integration/targets/memset_server_info/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/memset_server_info/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c80257da29e1d730786a3377fd78936022c5e532bca726dc8c4110c0eb935428",
- "format": 1
- },
- {
- "name": "tests/integration/targets/memset_server_info/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f13674351f8231c9e60fc923652b1ce94b62357ee00fedaf23ad3af677aff656",
- "format": 1
- },
- {
- "name": "tests/integration/targets/memset_zone",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/memset_zone/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/memset_zone/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f52d711103d50a437830c6fbcd04fb4bab49a0f82f6d26d1c791c6e8488dd090",
- "format": 1
- },
- {
- "name": "tests/integration/targets/memset_zone/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/memset_zone/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1e6779c3721824e477ee2b1075957ea75de4985e790631967c6e404bf963a7c3",
- "format": 1
- },
- {
- "name": "tests/integration/targets/memset_zone/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/memset_zone/vars/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4c81bfc3449bf48a59e3ee5bfb16dce121eee7001770bfe843233684a4a09d82",
- "format": 1
- },
- {
- "name": "tests/integration/targets/memset_zone/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f13674351f8231c9e60fc923652b1ce94b62357ee00fedaf23ad3af677aff656",
- "format": 1
- },
- {
- "name": "tests/integration/targets/memset_zone_domain",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/memset_zone_domain/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/memset_zone_domain/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "cb3f91d54eee30e53e35b2b99905f70f169ed549fd78909d3dac2defc9ed8d3b",
- "format": 1
- },
- {
- "name": "tests/integration/targets/memset_zone_domain/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/memset_zone_domain/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "bf5ffdd7f49c858218d24c2b92b4b0cad7c308e2b91c8aa7f9aa26c42728b525",
- "format": 1
- },
- {
- "name": "tests/integration/targets/memset_zone_domain/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/memset_zone_domain/vars/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "2a37de1a0afb6552501549e1f955115730ca1ed0e60cf18f07b89704b749ae96",
- "format": 1
- },
- {
- "name": "tests/integration/targets/memset_zone_domain/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f13674351f8231c9e60fc923652b1ce94b62357ee00fedaf23ad3af677aff656",
- "format": 1
- },
- {
- "name": "tests/integration/targets/memset_zone_record",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/memset_zone_record/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/memset_zone_record/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f52d711103d50a437830c6fbcd04fb4bab49a0f82f6d26d1c791c6e8488dd090",
- "format": 1
- },
- {
- "name": "tests/integration/targets/memset_zone_record/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/memset_zone_record/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "eb0b5904368bb749f72ab0dccb6452fd5b92903763c9c7b02c654ea2a254d9d7",
- "format": 1
- },
- {
- "name": "tests/integration/targets/memset_zone_record/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/memset_zone_record/vars/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1dab77163c3e164596e15c1eb1658f88196dcc1e92f38f067c652d3c27756025",
- "format": 1
- },
- {
- "name": "tests/integration/targets/memset_zone_record/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f13674351f8231c9e60fc923652b1ce94b62357ee00fedaf23ad3af677aff656",
- "format": 1
- },
- {
- "name": "tests/integration/targets/module_helper",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/module_helper/library",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/module_helper/library/mdepfail.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ffde27e3ef28041d0cf5826ff7395267a37a1d2bbe648802088839649b2d15b8",
- "format": 1
- },
- {
- "name": "tests/integration/targets/module_helper/library/msimple.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "db49c81d8bf650fe23c5cece2017d1faf94809ee8a39854f16cc5c0e87b87701",
- "format": 1
- },
- {
- "name": "tests/integration/targets/module_helper/library/msimpleda.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3f96a70bdcaa67a0aa9c1755ae8f0b28d2fbef24414d265b929d6086c6e4e4a1",
- "format": 1
- },
- {
- "name": "tests/integration/targets/module_helper/library/mstate.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "010b5c1b2c0180c9acfb7779f0fc4abc21128ac5e3598bda62415ae5761cd687",
- "format": 1
- },
- {
- "name": "tests/integration/targets/module_helper/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/module_helper/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f192756d5450841cb56dd181ecd4b640ce5835fb4918a4829a94d2f9625e07ff",
- "format": 1
- },
- {
- "name": "tests/integration/targets/module_helper/tasks/mdepfail.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0aedede136fa52bdd76262e978f30a185223c8291df5b36f301d7008c1fa18fb",
- "format": 1
- },
- {
- "name": "tests/integration/targets/module_helper/tasks/msimple.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "340b40190549cc359558857ec33b68842c2f00b74598b47e2a800450f08783fe",
- "format": 1
- },
- {
- "name": "tests/integration/targets/module_helper/tasks/msimpleda.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "95510ddd0bea47f218c8865c589ea83d02213f79e78f99bfb1d948b580cf7abc",
- "format": 1
- },
- {
- "name": "tests/integration/targets/module_helper/tasks/mstate.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1bff596c9a017abe3f7ef1279ca5d0d602d797c27230a96272d00ec6400330a0",
- "format": 1
- },
- {
- "name": "tests/integration/targets/module_helper/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e5a8866bb35edfbfde89676bc1698a7dd27c89530da249de619169892e7482d1",
- "format": 1
- },
- {
- "name": "tests/integration/targets/monit",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/monit/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/monit/defaults/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8c965891bd86e77ca17e9eb381554259b5be20540a005a4024bd405a7b398ec1",
- "format": 1
- },
- {
- "name": "tests/integration/targets/monit/files",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/monit/files/httpd_echo.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9c7a5c16f2ae6c611edd538f3d0549f89db33015ee5e7cb9193b60d0b39540c7",
- "format": 1
- },
- {
- "name": "tests/integration/targets/monit/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/monit/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ae816995bad20b4c09762e88d447ddf2793e1df8f6d61c0fdc03a7432a06e75a",
- "format": 1
- },
- {
- "name": "tests/integration/targets/monit/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/monit/tasks/check_state.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e08af6eb7cf52766f08ed7e4cc9ac32c9974eae8e8c47203e9fbf89337826377",
- "format": 1
- },
- {
- "name": "tests/integration/targets/monit/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b71412a837cef2ebcc536d77efe1d5146ef33526791fd54c2868eb017549b541",
- "format": 1
- },
- {
- "name": "tests/integration/targets/monit/tasks/test.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "77856e3d9b710a347000a4df4d7fae6622f56bbcfe71a200b114f643bd2bf594",
- "format": 1
- },
- {
- "name": "tests/integration/targets/monit/tasks/test_errors.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7fbd04ef7bf73505329dd0340d28330b1dd8f80564e649a3d217844359f9d3c4",
- "format": 1
- },
- {
- "name": "tests/integration/targets/monit/tasks/test_reload_present.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ce236af6a0d981a66b001afbfcd7d45d7544a4397739ed45b256e9c5fc94da81",
- "format": 1
- },
- {
- "name": "tests/integration/targets/monit/tasks/test_state.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "cf46c8a28d3089d91b851add3f68540830af7281cd848b64fb1e21677cdcb8b3",
- "format": 1
- },
- {
- "name": "tests/integration/targets/monit/templates",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/monit/templates/monitrc.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9de9c9c884d2080f8ec61a09fee4e1b493cd6f76f669bc866daaa1637c3b16c8",
- "format": 1
- },
- {
- "name": "tests/integration/targets/monit/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/monit/vars/Alpine.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1eebd4e36416221a18f3948f6b64cde3c9ecc7de4a3873cc4650232cb4eccf4f",
- "format": 1
- },
- {
- "name": "tests/integration/targets/monit/vars/Archlinux.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1eebd4e36416221a18f3948f6b64cde3c9ecc7de4a3873cc4650232cb4eccf4f",
- "format": 1
- },
- {
- "name": "tests/integration/targets/monit/vars/CentOS-6.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0116067b4db69e9e5ff4178810fb268021a7d89787f3fe692863d78a4977362c",
- "format": 1
- },
- {
- "name": "tests/integration/targets/monit/vars/RedHat.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1eebd4e36416221a18f3948f6b64cde3c9ecc7de4a3873cc4650232cb4eccf4f",
- "format": 1
- },
- {
- "name": "tests/integration/targets/monit/vars/Suse.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1eebd4e36416221a18f3948f6b64cde3c9ecc7de4a3873cc4650232cb4eccf4f",
- "format": 1
- },
- {
- "name": "tests/integration/targets/monit/vars/defaults.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "fd17ec0f116ab194e2dbd313fca5de30362d09be9b48ed4d87bdaa7df5ed4f7a",
- "format": 1
- },
- {
- "name": "tests/integration/targets/monit/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f50efd00db8c2d6177833ea2ff8b473fc4656571203581300af933f6f30f9635",
- "format": 1
- },
- {
- "name": "tests/integration/targets/mqtt",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/mqtt/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/mqtt/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "bcefe5429771d37b3d344153395093b33a0ecd10d8fd9968af8ee0d63684899b",
- "format": 1
- },
- {
- "name": "tests/integration/targets/mqtt/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/mqtt/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a1807cb88a11eec71eeb260bc7a744c740f81256fe80f6a37a2c55de020fcb79",
- "format": 1
- },
- {
- "name": "tests/integration/targets/mqtt/tasks/ubuntu.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a581edcff230b2c1df3d2fdb921ed8c6a511ec27de41be2b02078e092d314688",
- "format": 1
- },
- {
- "name": "tests/integration/targets/mqtt/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9ff7f409a177587a8bb388e3940800f5817072d7bb6ed5288c9c4445536be484",
- "format": 1
- },
- {
- "name": "tests/integration/targets/mssql_script",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/mssql_script/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/mssql_script/defaults/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a69057ec4212ddd879447db0cb98c105b7f138c89a13f40c186dd8eaf87f66a3",
- "format": 1
- },
- {
- "name": "tests/integration/targets/mssql_script/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/mssql_script/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c04d6c95c17d90a7ba3c4c161f90be9faa916e8acbb77eeea69f54e14f695da8",
- "format": 1
- },
- {
- "name": "tests/integration/targets/mssql_script/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9e395016992b8dfa6def8cd2338465b68d0e735397c07899ca77bf5c0d493467",
- "format": 1
- },
- {
- "name": "tests/integration/targets/nomad",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/nomad/files",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/nomad/files/job.hcl",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "37e149184dfb3c56a86af62362de71f887b6e3d75b9bb8ffab07bbc4dd8aa2af",
- "format": 1
- },
- {
- "name": "tests/integration/targets/nomad/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/nomad/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0d7dd9e426571c0334ab74bf3c78984772b5478d423fd107c01c504bda6ddb22",
- "format": 1
- },
- {
- "name": "tests/integration/targets/nomad/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/nomad/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8c12e85ed9418ebece20dbbe3adde959d8e0711da29277df9fb082cd8701f92b",
- "format": 1
- },
- {
- "name": "tests/integration/targets/nomad/tasks/nomad_job.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c3c067f9a1599dcc8ae567507fd85091739acd42c409712cc7ff80261f8778a5",
- "format": 1
- },
- {
- "name": "tests/integration/targets/nomad/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "11923455942cc5d6bf1e89cfec52d38ce22ed832752a317d9906562d6986b98b",
- "format": 1
- },
- {
- "name": "tests/integration/targets/npm",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/npm/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/npm/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d8fc45c0fa92f9e855eaef341b5d10d9ee2630f93bc4bd041a943aa8c1169b3d",
- "format": 1
- },
- {
- "name": "tests/integration/targets/npm/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/npm/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "2f69f96675c5d909074d14b469fd11b5d0799710cc39d78b0700726753b49421",
- "format": 1
- },
- {
- "name": "tests/integration/targets/npm/tasks/no_bin_links.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5c6cc3371774a8379e1e2a2192af509387a4877d18b07a369ebd433fc1044b79",
- "format": 1
- },
- {
- "name": "tests/integration/targets/npm/tasks/run.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f30f4b64bdb651d01b7cc458d7269c00d968c236ad8d4f084c6b4cfad7ee4913",
- "format": 1
- },
- {
- "name": "tests/integration/targets/npm/tasks/setup.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "befd3af1f502a374000746f446419951f16814bf9f2ff5081ed4b6e4dfb1c631",
- "format": 1
- },
- {
- "name": "tests/integration/targets/npm/tasks/test.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "05eba1a52eb4261b982cbcff041f15300dff20612b7dbf0cfde3d45e8bd5084c",
- "format": 1
- },
- {
- "name": "tests/integration/targets/npm/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6513e6ddce12ed2e95878f953386ea898ad88bec8085158c8a422d78a03a4a5c",
- "format": 1
- },
- {
- "name": "tests/integration/targets/odbc",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/odbc/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/odbc/defaults/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d9d5d091d0392e99760e265a1af2d157adb0214801f24e96ffc454993d206307",
- "format": 1
- },
- {
- "name": "tests/integration/targets/odbc/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/odbc/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ebc8c5ac109203353a763337fe5239f104f0a997f4bb80144ce0af08da9fdc6a",
- "format": 1
- },
- {
- "name": "tests/integration/targets/odbc/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/odbc/tasks/install_pyodbc.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c35a5a838c05ae876ac633ac75a308b3a94f5d34a5ba7e0bed62edbb547e59a0",
- "format": 1
- },
- {
- "name": "tests/integration/targets/odbc/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3eb64aeb340726077141d3cf9f6700ed30c00b4ee0b8a7fb8f2e47e24aca80e5",
- "format": 1
- },
- {
- "name": "tests/integration/targets/odbc/tasks/negative_tests.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f655299c1f15bd9f4cfb04cce676fd6aa13f0b0052c18a17c4fe0f2de52b18bf",
- "format": 1
- },
- {
- "name": "tests/integration/targets/odbc/tasks/no_pyodbc.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "67e03bf567e68aff49728425969ce1179bf82dc5e4ee75bccb88bcfb03e9de81",
- "format": 1
- },
- {
- "name": "tests/integration/targets/odbc/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7d3670fcf44107e0637dd3ba6cacc778244eadadc4cc233aaa6bbd8be133284b",
- "format": 1
- },
- {
- "name": "tests/integration/targets/one_host",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/one_host/files",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/one_host/files/testhost",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/one_host/files/testhost/tmp",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/one_host/files/testhost/tmp/opennebula-fixtures.json.gz",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "18dc59bac231e749997e1a5243db69081f63b757fd43c37de017e20d58d010d6",
- "format": 1
- },
- {
- "name": "tests/integration/targets/one_host/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/one_host/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3b981db212298d31645895377cac39ff68ed0d739270f19443250438ca66c47a",
- "format": 1
- },
- {
- "name": "tests/integration/targets/one_host/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/one_host/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "59991d28adcde9f920cf1af942fb08bc9204c3ef711016c145d8cd1f35123e65",
- "format": 1
- },
- {
- "name": "tests/integration/targets/one_host/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "768ebe421dd904b7142848f4bd6c9defa722632d9d4fdddb4a489c8ed755b825",
- "format": 1
- },
- {
- "name": "tests/integration/targets/one_template",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/one_template/files",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/one_template/files/testhost",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/one_template/files/testhost/tmp",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/one_template/files/testhost/tmp/opennebula-fixtures.json.gz",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0b303d1a0b4deab46c421512a15476493a895061e9d1f71f49ce03d78484e928",
- "format": 1
- },
- {
- "name": "tests/integration/targets/one_template/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/one_template/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3b981db212298d31645895377cac39ff68ed0d739270f19443250438ca66c47a",
- "format": 1
- },
- {
- "name": "tests/integration/targets/one_template/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/one_template/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "49b218551e1532d35c5e339addc1d3276bb9fbe167c441e98e1f32d9168afc5e",
- "format": 1
- },
- {
- "name": "tests/integration/targets/one_template/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "768ebe421dd904b7142848f4bd6c9defa722632d9d4fdddb4a489c8ed755b825",
- "format": 1
- },
- {
- "name": "tests/integration/targets/osx_defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/osx_defaults/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/osx_defaults/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f5199983dc3924b34cadcc6d04ea5197f9185191c3c4948330008b742cb59e20",
- "format": 1
- },
- {
- "name": "tests/integration/targets/osx_defaults/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "008395b49feeb25967b9261639f4bac0ce08f7e766019bd16bb5566c2f2035f7",
- "format": 1
- },
- {
- "name": "tests/integration/targets/pacman",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/pacman/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/pacman/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "89260dedfc1ff941c8d1ef25efee5993bb384dc40b5734ff86e87b7241c33309",
- "format": 1
- },
- {
- "name": "tests/integration/targets/pacman/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/pacman/tasks/basic.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6529f40764a35e02218e52cb762f6c7057e69ecdc960cdb754c8ea7129d89d68",
- "format": 1
- },
- {
- "name": "tests/integration/targets/pacman/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e2d083300d0e31d69b5b02293a78c1d69007b17085ff3ba4552150ac11a01b27",
- "format": 1
- },
- {
- "name": "tests/integration/targets/pacman/tasks/package_urls.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "69b99006f78c48c7d26ed22e6bf9bf2e39b5588424a9202f264472f351ea6604",
- "format": 1
- },
- {
- "name": "tests/integration/targets/pacman/tasks/remove_nosave.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "fb1dc7897bfc4fc9f4e32ce5d056dedbfde8b0b5f59d3c3c8caa11f9c1794bea",
- "format": 1
- },
- {
- "name": "tests/integration/targets/pacman/tasks/update_cache.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "bfdfd1b33760a2b4cc87012556b6cfa2bdc90d2ff3e6bdb4d27c2f9a6e087f70",
- "format": 1
- },
- {
- "name": "tests/integration/targets/pacman/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d502a4aa4e2eb90aa8ca5b3574abb191b74f24f1f4d15266457c616538367c26",
- "format": 1
- },
- {
- "name": "tests/integration/targets/pagerduty_user",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/pagerduty_user/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/pagerduty_user/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5feee27e45ba9e467becb074e8789133b40238558c0d0cd4dcc85f50e96017ba",
- "format": 1
- },
- {
- "name": "tests/integration/targets/pagerduty_user/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/pagerduty_user/vars/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "69321ab157a3edaa675ba5be65214e59d93f82f7db0cf5027c44e157576b7130",
- "format": 1
- },
- {
- "name": "tests/integration/targets/pagerduty_user/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f13674351f8231c9e60fc923652b1ce94b62357ee00fedaf23ad3af677aff656",
- "format": 1
- },
- {
- "name": "tests/integration/targets/pam_limits",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/pam_limits/files",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/pam_limits/files/test_pam_limits.conf",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c5df9a5dd3edde9b71d8b086db25cae0293c425c687da179877ac5bc8b2ffb30",
- "format": 1
- },
- {
- "name": "tests/integration/targets/pam_limits/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/pam_limits/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7191ea76b8b2d3375e64cd449bd3b51b75edad0dd77f227d65b299c1930c6ce0",
- "format": 1
- },
- {
- "name": "tests/integration/targets/pam_limits/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7366ec1af83f8212f093362620022f4a3e9d9de63b38c75466df933839cb1138",
- "format": 1
- },
- {
- "name": "tests/integration/targets/pamd",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/pamd/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/pamd/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5caf759f0397ffe1d1bb62d3f78bce6a86ea757caa6f2ec2197dab1bc3c5a6e8",
- "format": 1
- },
- {
- "name": "tests/integration/targets/pamd/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7366ec1af83f8212f093362620022f4a3e9d9de63b38c75466df933839cb1138",
- "format": 1
- },
- {
- "name": "tests/integration/targets/pids",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/pids/files",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/pids/files/obtainpid.sh",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ae4a9adefeed72dabcfc60eef1cfae673b7f0af5a25a4f0df685e431175e0b24",
- "format": 1
- },
- {
- "name": "tests/integration/targets/pids/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/pids/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1e8d632f9db7209967c5b2f6d734bede09841acc7b898dafc19f31c72cee9929",
- "format": 1
- },
- {
- "name": "tests/integration/targets/pids/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/pids/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e78297d4c4f795ed758263e06c79cec7f6957b896e3bab5e20d2766c31f6f03f",
- "format": 1
- },
- {
- "name": "tests/integration/targets/pids/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8a5657d4075cac91b7e2fc1ffaaa04d37136cc64e51c75c2d261147519bb12d6",
- "format": 1
- },
- {
- "name": "tests/integration/targets/pipx",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/pipx/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/pipx/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0135fea669795f622d1fbff138755111ffe00c8baa17468ff3b8c7167aa56363",
- "format": 1
- },
- {
- "name": "tests/integration/targets/pipx/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ce7ec9d8af44ff68ad94f4fd44b2962f33fac14fb1e1fb7de9f6cb0ac0fa84f8",
- "format": 1
- },
- {
- "name": "tests/integration/targets/pkgng",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/pkgng/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/pkgng/tasks/create-outofdate-pkg.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e0d942a40893b08957418783c1d522ee8e7c8e8b07b58bb4b90f011257251aa0",
- "format": 1
- },
- {
- "name": "tests/integration/targets/pkgng/tasks/freebsd.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e46af856915b539ae659377d87226dc48a046a42997b7d64bd0c67a95e40d050",
- "format": 1
- },
- {
- "name": "tests/integration/targets/pkgng/tasks/install_single_package.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "30f56b50eef27d80a39e074e6f5379a0e4841700964ee95e0e104f941e93dccf",
- "format": 1
- },
- {
- "name": "tests/integration/targets/pkgng/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a8eb5a3d187f0980a8e04e51d16c53f7bf7b84f139092ac0d5fb7d463000434d",
- "format": 1
- },
- {
- "name": "tests/integration/targets/pkgng/tasks/setup-testjail.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f11c00003a95031754025dc7b0257c9b03714667d178951287cc3794895e96d0",
- "format": 1
- },
- {
- "name": "tests/integration/targets/pkgng/templates",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/pkgng/templates/MANIFEST.json.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f7723b76e9aa5eec55d6911d8c10c742e99e38820ae0f54781373c458ef7379d",
- "format": 1
- },
- {
- "name": "tests/integration/targets/pkgng/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/pkgng/vars/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "936e3c9e0925512166f7bf2e389848412789b1f2dbd9a6ec719e7b0a75ff11c5",
- "format": 1
- },
- {
- "name": "tests/integration/targets/pkgng/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "80fcac1bf73988cab479c3db85a45b2bcf71260e19a6e691dca88213b7c83e42",
- "format": 1
- },
- {
- "name": "tests/integration/targets/pkgutil",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/pkgutil/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/pkgutil/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "fa1a76125fc665b4623a4d8720fb03e512cf38d631a2d8593641410b8cf22626",
- "format": 1
- },
- {
- "name": "tests/integration/targets/pkgutil/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "07e21b4fadf2da7cd49f0116e21341154d4ce15404a5defdf083b921516ee48e",
- "format": 1
- },
- {
- "name": "tests/integration/targets/proxmox",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/proxmox/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/proxmox/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6699c47bd7fd2f9624f0596548c50a2343f7867d49e2f8ebff7e2af12074ad3e",
- "format": 1
- },
- {
- "name": "tests/integration/targets/proxmox/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "083522361472fabfc894312a9185f6119978925f58fdc221fd9a79782f3190c8",
- "format": 1
- },
- {
- "name": "tests/integration/targets/python_requirements_info",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/python_requirements_info/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/python_requirements_info/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "952e2b5d7a62284a42c49502825aab37837fbb7638f4cf56d74cbc0002fa96e8",
- "format": 1
- },
- {
- "name": "tests/integration/targets/python_requirements_info/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e5a8866bb35edfbfde89676bc1698a7dd27c89530da249de619169892e7482d1",
- "format": 1
- },
- {
- "name": "tests/integration/targets/read_csv",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/read_csv/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/read_csv/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a68f6f094d91bccee8fbf7e4fe4bf3537d3d142a242548f83e563c91279a7606",
- "format": 1
- },
- {
- "name": "tests/integration/targets/read_csv/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e5a8866bb35edfbfde89676bc1698a7dd27c89530da249de619169892e7482d1",
- "format": 1
- },
- {
- "name": "tests/integration/targets/redis_info",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/redis_info/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/redis_info/defaults/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "27c8ef451c7c17943869974bc71737291faa7323907811694fb623700ceb3b9b",
- "format": 1
- },
- {
- "name": "tests/integration/targets/redis_info/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/redis_info/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7875cb86e0e75d552d4840ecc13a97c949eb6daaea319fd2f1301aa88701f593",
- "format": 1
- },
- {
- "name": "tests/integration/targets/redis_info/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/redis_info/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "11efc219e584c5bf8ad5daa37b3fcc3ce2d8c113cfda448dbba21a2da714ddff",
- "format": 1
- },
- {
- "name": "tests/integration/targets/redis_info/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "23e223be5470925ec403c01861adf6a9ce98b6627243d6ad2f58385fb4d694f4",
- "format": 1
- },
- {
- "name": "tests/integration/targets/rundeck",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/rundeck/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/rundeck/defaults/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a33d5bfb4706c92f7f057d17e70b799a6464fcceb2c3b304c51b5bf9f6cc8147",
- "format": 1
- },
- {
- "name": "tests/integration/targets/rundeck/files",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/rundeck/files/test_job.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ec1cd70a835eb80f5b14d948010e16855c7a814458c3ed4568acadec80510bf9",
- "format": 1
- },
- {
- "name": "tests/integration/targets/rundeck/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/rundeck/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "411c22afce0081c5544732f2174fd78c1c1287451d06be67b49d8779ded9acb0",
- "format": 1
- },
- {
- "name": "tests/integration/targets/rundeck/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/rundeck/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8d579efa9f70e445ef493e339e550593a57b1d5e3b9d3dc1afee4ac799141070",
- "format": 1
- },
- {
- "name": "tests/integration/targets/rundeck/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e7e7e1c03c776328595ce59e9cd526b5b09e671204d83909410d91f3af01a344",
- "format": 1
- },
- {
- "name": "tests/integration/targets/scaleway_compute",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/scaleway_compute/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/scaleway_compute/defaults/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1d3a9ff0da05429f37941231a7266e8a09cf2c716007611457b9a63e47226ccb",
- "format": 1
- },
- {
- "name": "tests/integration/targets/scaleway_compute/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/scaleway_compute/tasks/ip.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a3d99eb46240db6fbb210bdf710969729176a4dc0e4185ba51a3a882f054e659",
- "format": 1
- },
- {
- "name": "tests/integration/targets/scaleway_compute/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5129fae409fe4b9f3027ad5a696792c2059215cdce03a473beca9ea7638d5891",
- "format": 1
- },
- {
- "name": "tests/integration/targets/scaleway_compute/tasks/pagination.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "46ca5a3f14a6f20849abb0fe518a47b5b6a2a1b188c6bcaabd23f50e3ec4c52e",
- "format": 1
- },
- {
- "name": "tests/integration/targets/scaleway_compute/tasks/security_group.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "68f1db98323b94ed628040877036b264158152279fe11a7ef659b6ea237980b0",
- "format": 1
- },
- {
- "name": "tests/integration/targets/scaleway_compute/tasks/state.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "50868f360a93cdabe4951870e52df684d59d09ea5556b41e4a37e6db2e7be4ce",
- "format": 1
- },
- {
- "name": "tests/integration/targets/scaleway_compute/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7ba305d97432a37d4912f291b3f4fa4f76ab544dc9f2753910c35935dc3a3617",
- "format": 1
- },
- {
- "name": "tests/integration/targets/scaleway_database_backup",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/scaleway_database_backup/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/scaleway_database_backup/defaults/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "853493acec7a353030441990b6da91b04005237db30a3475a7782e568397ef78",
- "format": 1
- },
- {
- "name": "tests/integration/targets/scaleway_database_backup/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/scaleway_database_backup/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1c8c7c1f542d472c44c6e7da02d21be33974e90f6dff27f359ce7241b1afb693",
- "format": 1
- },
- {
- "name": "tests/integration/targets/scaleway_database_backup/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a8e636666bf86c5da11a1a862f00b9f523f3ec9d400239b99a47df4474fec963",
- "format": 1
- },
- {
- "name": "tests/integration/targets/scaleway_image_info",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/scaleway_image_info/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/scaleway_image_info/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7c3985e1f44c3c44321b6c0a2d578a12d898521f5c81f591b5669b7a022721d3",
- "format": 1
- },
- {
- "name": "tests/integration/targets/scaleway_image_info/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7ba305d97432a37d4912f291b3f4fa4f76ab544dc9f2753910c35935dc3a3617",
- "format": 1
- },
- {
- "name": "tests/integration/targets/scaleway_ip",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/scaleway_ip/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/scaleway_ip/defaults/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f6f0b020f9c3d4b1b4e1a14919644cc6c469401d2e7fe4ff57c75dfc3e366131",
- "format": 1
- },
- {
- "name": "tests/integration/targets/scaleway_ip/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/scaleway_ip/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ce9f220650b042c8eb5b61a02904715882335a702d41b7f5c99d1e907c8daff3",
- "format": 1
- },
- {
- "name": "tests/integration/targets/scaleway_ip/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7ba305d97432a37d4912f291b3f4fa4f76ab544dc9f2753910c35935dc3a3617",
- "format": 1
- },
- {
- "name": "tests/integration/targets/scaleway_ip_info",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/scaleway_ip_info/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/scaleway_ip_info/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7484dbefebee0ff6f0045d863158fac6e302433247d115f0e8144be531428ff1",
- "format": 1
- },
- {
- "name": "tests/integration/targets/scaleway_ip_info/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7ba305d97432a37d4912f291b3f4fa4f76ab544dc9f2753910c35935dc3a3617",
- "format": 1
- },
- {
- "name": "tests/integration/targets/scaleway_lb",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/scaleway_lb/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/scaleway_lb/defaults/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "64a63db5e3fcdd9491ae50fb45f1e01dbcbf5e8f5d52a89df9ff132226234f63",
- "format": 1
- },
- {
- "name": "tests/integration/targets/scaleway_lb/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/scaleway_lb/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "818c3c4aa67f6b54b1258d8594b60453f3383570316896f970fae6f5aee19222",
- "format": 1
- },
- {
- "name": "tests/integration/targets/scaleway_lb/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7ba305d97432a37d4912f291b3f4fa4f76ab544dc9f2753910c35935dc3a3617",
- "format": 1
- },
- {
- "name": "tests/integration/targets/scaleway_organization_info",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/scaleway_organization_info/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/scaleway_organization_info/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "cf1e66b4ef6c0a2a7d97b312278fad5d954953dbb878a4574b79706fee898aa1",
- "format": 1
- },
- {
- "name": "tests/integration/targets/scaleway_organization_info/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7ba305d97432a37d4912f291b3f4fa4f76ab544dc9f2753910c35935dc3a3617",
- "format": 1
- },
- {
- "name": "tests/integration/targets/scaleway_security_group",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/scaleway_security_group/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/scaleway_security_group/defaults/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8253d2c996e5fb60ecf54fcd9c13f8a15df898dd60f95c41aa2638bb34e0dfb4",
- "format": 1
- },
- {
- "name": "tests/integration/targets/scaleway_security_group/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/scaleway_security_group/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a812db5681f6477d26fd7955c0288e14da67cb1f5b151d3658e1b51324b9434d",
- "format": 1
- },
- {
- "name": "tests/integration/targets/scaleway_security_group/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7ba305d97432a37d4912f291b3f4fa4f76ab544dc9f2753910c35935dc3a3617",
- "format": 1
- },
- {
- "name": "tests/integration/targets/scaleway_security_group_info",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/scaleway_security_group_info/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/scaleway_security_group_info/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c3bcbd121e3abea9835e7f51e81b2da5a98e346a3d80e1850726ea23910201b2",
- "format": 1
- },
- {
- "name": "tests/integration/targets/scaleway_security_group_info/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7ba305d97432a37d4912f291b3f4fa4f76ab544dc9f2753910c35935dc3a3617",
- "format": 1
- },
- {
- "name": "tests/integration/targets/scaleway_security_group_rule",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/scaleway_security_group_rule/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/scaleway_security_group_rule/defaults/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "98a9538a4d2fc1f89399150252e84060730fb20d3e34d5eca3cc91b8fe4165d3",
- "format": 1
- },
- {
- "name": "tests/integration/targets/scaleway_security_group_rule/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/scaleway_security_group_rule/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1e2cbb083babb3647ae0eb48848b4f2b27e69f56930046dd5f15ce4c7c99cac0",
- "format": 1
- },
- {
- "name": "tests/integration/targets/scaleway_security_group_rule/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7ba305d97432a37d4912f291b3f4fa4f76ab544dc9f2753910c35935dc3a3617",
- "format": 1
- },
- {
- "name": "tests/integration/targets/scaleway_server_info",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/scaleway_server_info/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/scaleway_server_info/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "bd1f9b47cdd018934487d38089de553fb3b43ee71400906964004afd774aae2e",
- "format": 1
- },
- {
- "name": "tests/integration/targets/scaleway_server_info/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7ba305d97432a37d4912f291b3f4fa4f76ab544dc9f2753910c35935dc3a3617",
- "format": 1
- },
- {
- "name": "tests/integration/targets/scaleway_snapshot_info",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/scaleway_snapshot_info/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/scaleway_snapshot_info/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5a16f6e0af308cfd4dfc3843e498e5f6729990bef5c5ffc0b682e4e017bab314",
- "format": 1
- },
- {
- "name": "tests/integration/targets/scaleway_snapshot_info/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7ba305d97432a37d4912f291b3f4fa4f76ab544dc9f2753910c35935dc3a3617",
- "format": 1
- },
- {
- "name": "tests/integration/targets/scaleway_sshkey",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/scaleway_sshkey/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/scaleway_sshkey/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f6bdfb6b06c04764726c7a1ee3c97ac38476d2fe0d21de694a7f43d92ac48c20",
- "format": 1
- },
- {
- "name": "tests/integration/targets/scaleway_sshkey/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7ba305d97432a37d4912f291b3f4fa4f76ab544dc9f2753910c35935dc3a3617",
- "format": 1
- },
- {
- "name": "tests/integration/targets/scaleway_user_data",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/scaleway_user_data/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/scaleway_user_data/defaults/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f6cc6d53c9dad7749fa6cbac4a8201d0d26355ad194e184c5148a22d048d2e0e",
- "format": 1
- },
- {
- "name": "tests/integration/targets/scaleway_user_data/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/scaleway_user_data/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c1e1f3fe109406b19b4c82eaec06c7fdeabc3e3e627eff6961859dd8d6f28366",
- "format": 1
- },
- {
- "name": "tests/integration/targets/scaleway_user_data/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7ba305d97432a37d4912f291b3f4fa4f76ab544dc9f2753910c35935dc3a3617",
- "format": 1
- },
- {
- "name": "tests/integration/targets/scaleway_volume",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/scaleway_volume/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/scaleway_volume/defaults/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8253d2c996e5fb60ecf54fcd9c13f8a15df898dd60f95c41aa2638bb34e0dfb4",
- "format": 1
- },
- {
- "name": "tests/integration/targets/scaleway_volume/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/scaleway_volume/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "bc48f489deef2cff35e3b1fb618c6350ef36bf4b8f9848ef27787ff2992d7b9d",
- "format": 1
- },
- {
- "name": "tests/integration/targets/scaleway_volume/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7ba305d97432a37d4912f291b3f4fa4f76ab544dc9f2753910c35935dc3a3617",
- "format": 1
- },
- {
- "name": "tests/integration/targets/scaleway_volume_info",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/scaleway_volume_info/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/scaleway_volume_info/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "292b61a61eb9f906fe8341153f7608522fb698fb0509ecd5b3671e3d53de5789",
- "format": 1
- },
- {
- "name": "tests/integration/targets/scaleway_volume_info/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7ba305d97432a37d4912f291b3f4fa4f76ab544dc9f2753910c35935dc3a3617",
- "format": 1
- },
- {
- "name": "tests/integration/targets/sefcontext",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/sefcontext/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/sefcontext/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ae816995bad20b4c09762e88d447ddf2793e1df8f6d61c0fdc03a7432a06e75a",
- "format": 1
- },
- {
- "name": "tests/integration/targets/sefcontext/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/sefcontext/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3e917715fd6de57a163d1c2f41bea7b96e23e2ad34496f175fa069f1264988d7",
- "format": 1
- },
- {
- "name": "tests/integration/targets/sefcontext/tasks/sefcontext.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8b153e2c6e76b42e11ce0dc3efc209a845402c6cf3d1b17fd9934e8a1aa2088c",
- "format": 1
- },
- {
- "name": "tests/integration/targets/sefcontext/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "467b9bc1e410a98e565e4903966195b8b9a9d8c76e1f88bff6b1724369d244fa",
- "format": 1
- },
- {
- "name": "tests/integration/targets/sensu_client",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/sensu_client/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/sensu_client/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "fb7ca5e8206e2e10de97e975dc8f5de3cd43ebe27acb5eea3dba31132db7a10f",
- "format": 1
- },
- {
- "name": "tests/integration/targets/sensu_client/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c742157c2d638e509d901a06509289c0a19da50f514e2f059abb93d9f492d88f",
- "format": 1
- },
- {
- "name": "tests/integration/targets/sensu_handler",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/sensu_handler/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/sensu_handler/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "89b86a747d9095c1bceea2748aece92c504e4409ce53236c9898a64a3774a100",
- "format": 1
- },
- {
- "name": "tests/integration/targets/sensu_handler/tasks/pipe.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5b067919a3bee60aea22d1fbca8cfb57b99a8862d272c38f976a903ed8316d9b",
- "format": 1
- },
- {
- "name": "tests/integration/targets/sensu_handler/tasks/set.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "84cd589d9a596950c2360a26668980de7174d7bcbff08df6039ec310c578f5ef",
- "format": 1
- },
- {
- "name": "tests/integration/targets/sensu_handler/tasks/tcp.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "63e32525bd29b6c499bd01a0a3804c267f6c71de066c86a1fe6c796d59ee0c75",
- "format": 1
- },
- {
- "name": "tests/integration/targets/sensu_handler/tasks/transport.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "64757ff83593f669dfc25bc41c1abb935ecb8587841be41f2dffb01296f76250",
- "format": 1
- },
- {
- "name": "tests/integration/targets/sensu_handler/tasks/udp.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6e280697b30f3fbcd859f3f561d34cb51cff5acb3eccbfa8ba9b1598a032e860",
- "format": 1
- },
- {
- "name": "tests/integration/targets/sensu_handler/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c742157c2d638e509d901a06509289c0a19da50f514e2f059abb93d9f492d88f",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_cron",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_cron/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_cron/defaults/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4a7a029ca849c93b2891e0e35afb1ae117b815ff0c6696e4fa7d239b5a37bd47",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_cron/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_cron/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ae816995bad20b4c09762e88d447ddf2793e1df8f6d61c0fdc03a7432a06e75a",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_cron/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_cron/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e7676624791dec0e0e6c8ffcd677435ae9a1f02c52eaeb7daa7ba03b72d4c52d",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_cron/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_cron/vars/alpine.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9534ceabdafc0d6e7f20f96e64bce87cea6c93ff4fa4d6a9ac078f854631658a",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_cron/vars/archlinux.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "35491ab41fd690530bc6e3f1c7f17258001acf056c113d9f2e50b3824a204046",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_cron/vars/debian.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "aef7e744c306a83637e9395ff2f7aa375b2337fb8bf8b7656597f585ea469f11",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_cron/vars/default.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_cron/vars/fedora.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1f904e1f682ddb4e1dac223c46baaa1b22f6d0b1801aa6a3ff54d283f7b570dd",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_cron/vars/freebsd.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6dc1f19ad13663f68075ebfc933bdbef4532c527a440b1a82ecad85442dffb05",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_cron/vars/redhat.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c19debd2669548a152b9b2da7e584f86bb5d5e51ffe6612adff775056cbc876e",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_cron/vars/suse.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "bb743826acea65ecb3cec1af4c16692aaaf6a80bc42407f45a4cb219dd3e21b8",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_epel",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_epel/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_epel/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4013a5ad79e7944851ff6a5be0d25dbb2e4354aa6be08e3c435d7707e1d8576c",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_etcd3",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_etcd3/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_etcd3/defaults/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "cc96f60f9770d3926de79613a52d26a89400a88bbb2680746b6b8546b20d23c9",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_etcd3/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_etcd3/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ae816995bad20b4c09762e88d447ddf2793e1df8f6d61c0fdc03a7432a06e75a",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_etcd3/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_etcd3/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "fdcd7485be383a77858e967fd1d53038375736553dd7b8d5579c7d6e49d24c3d",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_etcd3/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_etcd3/vars/RedHat-7.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "2327ed2b66086f12d6644563781754dd9d8131ad9beef32f315e2f9861504deb",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_etcd3/vars/Suse-py3.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "cc8858e017dfc003a31b4bd8e20a7d442a996e94bca6882354d9cf9b7a43fabe",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_etcd3/vars/Suse.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "cc8858e017dfc003a31b4bd8e20a7d442a996e94bca6882354d9cf9b7a43fabe",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_etcd3/vars/default.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "42fb21e5a17c47ffdc62fa1ef9ae61f066ae3010150ad08f9ed877e440872167",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_flatpak_remote",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_flatpak_remote/files",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_flatpak_remote/files/repo.tar.xz",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "673cf7c93e0a5773c1aed10345ddeb3cb2fdaac193b311970f0a9f1929c1ddae",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_flatpak_remote/handlers",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_flatpak_remote/handlers/main.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f17b28729bd86bb98423f9bc6e16598b48540cb9cf6ed2d69f597b319aa452b1",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_flatpak_remote/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_flatpak_remote/meta/main.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5b3a4b611769090095ae6d6640f5a7ab139cbd83c9f1a06cef6668bcaab35d2a",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_flatpak_remote/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_flatpak_remote/tasks/main.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "385cae6eaa6fc7c94a7f5399540a4f268748a3ed8df6ee839d49102820ecb37d",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_flatpak_remote/README.md",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1ba3b350ef99a406c2000e141c829d66bfe3aa11a572072a00d1f4438886d6d4",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_flatpak_remote/create-repo.sh",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ab7ca88e86f4a63db15728eb50b27ec6604da8efd3fd89df8626494f3792b5d4",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_gnutar",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_gnutar/handlers",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_gnutar/handlers/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ea523c40ea286f1458a7f4135dcc548e50ef105b03aae76c11696661742ec2a7",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_gnutar/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_gnutar/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7e8fbc4c57e31732b07eecb5c841956fc63abb50a723f77779e510b9f118e8bb",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_influxdb",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_influxdb/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_influxdb/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "2fda0abb2d8bda4e8ca6ea30d7994b22a90871f7f7a3aeb7fbbc86c1d622fff5",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_influxdb/tasks/setup.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b0636fb6ff7220ecedf7d3b481e221726663d418caf6fe7e8f3f6b1acd30ce42",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_java_keytool",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_java_keytool/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_java_keytool/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "edd8a1449dca1cf2ff7cade76d428a198a21d20a8b7b989fc4fe8ce3b0a93f3e",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_java_keytool/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_java_keytool/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "acf40ef4897d85734cdfec310d31a05515b0552f9884e6afcdddfa3c25b57b11",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_java_keytool/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_java_keytool/vars/Alpine.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "bd76a70eed20070c8ddc348c569f514fa9c52c6dc90f6496b7dc0539bcb5a04e",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_java_keytool/vars/Archlinux.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "2910dbc11fb229686a3d1116e62a22a9f7b7f1d3d737c4b6ff16dcf351734fb4",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_java_keytool/vars/Debian.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5467bf8784847f9ae6e9da39e4935a32012900c7423e84e43560e588911c2e9c",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_java_keytool/vars/RedHat.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "502530ab512b2ecf51959f4e81d899537a6da192c7b4e6a88bf860cf950f2aba",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_java_keytool/vars/Suse.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "502530ab512b2ecf51959f4e81d899537a6da192c7b4e6a88bf860cf950f2aba",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_mosquitto",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_mosquitto/files",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_mosquitto/files/mosquitto.conf",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6b90092ad37894754edbb4c4ab997e12f96f8a93b26ee58dd547cda7e1ae04a8",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_mosquitto/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_mosquitto/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "018110439f4fa79f060ac894fe54567fd9c3eb410aedbdf0b4aaeee1ad5fd705",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_mosquitto/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_mosquitto/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6384c76b03ae445b891167e13407ca4915637387a6a9bc6c23bd1d3d92baffae",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_mosquitto/tasks/ubuntu.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "538431cedb49bda0ca4126c7091f3a46cf282e50094e14ebcde08e17aa55236a",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_openldap",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_openldap/files",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_openldap/files/initial_config.ldif",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ae3cbc203ebfd2d9a3810399be1923b2f8d41162068f92f0cd473033c86bc697",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_openldap/files/rootpw_cnconfig.ldif",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "35f150a5a1d546167e9bff24e558439396208879876812cd1bc210252a86274c",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_openldap/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_openldap/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c2af6405d42a5abd438f4a256a6a5c19b8fb5eb5559209c0cbabd150af911837",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_openldap/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_openldap/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ef161491eeeca8084a7a9b169bae69bf2edb6a1e84b56e40870e2aa9026533ae",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_openldap/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_openldap/vars/Debian.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6300a7fa4f660acb989c8a12d2fc55890c00c89e8ce430b82dc8ac9f7e75acd0",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_openldap/vars/Ubuntu.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6300a7fa4f660acb989c8a12d2fc55890c00c89e8ce430b82dc8ac9f7e75acd0",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_opennebula",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_opennebula/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_opennebula/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "87ce8a986194b78ccb7f6aa2fc8a64d36e6036e7f87f400530420961349b7962",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_opennebula/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_opennebula/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "fc13dbd2b6322320932fe91c3127dd7bdf0045fd5922f4b24c4ce2f8fa4f1ba7",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_opennebula/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_opennebula/vars/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0a4c50dabbd36c9c08ae74da3171fa2953eb30fa8b4ed2eb6ee31eddaf6938ea",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_openssl",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_openssl/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_openssl/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "edd8a1449dca1cf2ff7cade76d428a198a21d20a8b7b989fc4fe8ce3b0a93f3e",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_openssl/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_openssl/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f00155101aa0884fac290c2c6cfb78f3dd35bbd66ea323ae8c644fea513df808",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_openssl/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_openssl/vars/Alpine.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0cbe4792a30e8708b19d3e45506c379f7c7057259abcb1c15ec41568661b9dd1",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_openssl/vars/Archlinux.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "91888bbcdfcc4f68838c129eb0e8d38c877943c5d091fbfb034bf56c5fc91f73",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_openssl/vars/CentOS-8.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3cf78020540686456379949d5c5b5aa61fb58d67a0b8a1e78ca167b531639ec4",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_openssl/vars/Darwin.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "705c4892e0e01103b5a23fdd4105897a36dc9cf4c637c112941fa8335ed2f6cf",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_openssl/vars/Debian.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "493ddcecafb42d30d524867c87e06b25de9cea7ca3f70344204e553eb3be9e25",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_openssl/vars/FreeBSD.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d331cbb706303b121aa40b795b75d3e746d09944e98128fb6e1cfe6630f8991a",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_openssl/vars/RedHat.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "30af4802c88ed30b55292ed0be717bf65222adbe96b6139d1292c6b5b7f9064d",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_openssl/vars/Suse.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3115f51d4621ae5202defaf4df42219f25c2ac20115968d0b749644c58646e8d",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_pkg_mgr",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_pkg_mgr/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_pkg_mgr/tasks/archlinux.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c12b1f4826eb17a4e9d6787ca8b340a49704b71b65a023f9a2e2e6ee469af3e5",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_pkg_mgr/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4bfccf8ff60bf5f1b2b39ebe088eaabb616620191f3a4049b851ab41e0daec1e",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_postgresql_db",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_postgresql_db/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_postgresql_db/defaults/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0349988df512a65575f9594c62d6d8a0aa0cea38ef60f75236e6c9c1bb075d58",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_postgresql_db/files",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_postgresql_db/files/dummy--1.0.sql",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e4e7839720cd873fbfbe855a61c55f4d69bf5154c420a5a776daccba0db0326e",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_postgresql_db/files/dummy--2.0.sql",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "be49da51a69d0f8af9ad8bfd120189b95aa9feb2ea00be9e2f6e06af3a5c754b",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_postgresql_db/files/dummy--3.0.sql",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a16cb164d32705033b9e7a7c4e9b8050de79c561deddbcc8603e8d0d59cb563e",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_postgresql_db/files/dummy.control",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e8000b3819e84f8f1af575e137e4f478bc16cef5b0b11867f4d348840ea34bff",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_postgresql_db/files/pg_hba.conf",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a1d8fd0e6645d939cf0fc5a67738039e036f06c540efeb8a18bf9fed779ddb40",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_postgresql_db/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_postgresql_db/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ae816995bad20b4c09762e88d447ddf2793e1df8f6d61c0fdc03a7432a06e75a",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_postgresql_db/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_postgresql_db/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e70e5de9259cd000c3c58af07da1047f5101929c2cad733c2467096c2227a2dc",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_postgresql_db/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_postgresql_db/vars/Alpine-py3.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "74a386f6583f065cca5cd8c4b256bd500cd02b1daa82d1f8b23c213e961e0662",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_postgresql_db/vars/Archlinux-py3.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "2292d5f57c49a6845bb162adbe5fc94006eeb421c0022e65c3194930f2999d7f",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_postgresql_db/vars/Debian-11-py3.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "96f1178c9a80528ccb2681b5b0f5a8262eb0861f65cc27dea7831e49f26fd43e",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_postgresql_db/vars/Debian-8.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "cec5e777df87e1ef7dfac426286cc5a26d3ed9bc8d7e4e3a8c307f6d670b5edd",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_postgresql_db/vars/FreeBSD-11-py3.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d47932fab9e40019bcb0a06d39d747c1fd6e79147de58fd879b391f6e88b5b43",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_postgresql_db/vars/FreeBSD-11.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3aed68dc0d315a161453b95ef5d5fc2e386fe3569386bc1620128bd59e955afb",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_postgresql_db/vars/FreeBSD-12.0-py3.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d47932fab9e40019bcb0a06d39d747c1fd6e79147de58fd879b391f6e88b5b43",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_postgresql_db/vars/FreeBSD-12.0.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "641d72a4f0cd5bb640de8ed043befc0cadcf9c70cc399f0a1485483e32c35fe7",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_postgresql_db/vars/FreeBSD-12.1-py3.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "2c2ce9a5a95a2c26bc66112eec85850bb0c06252fcc7982bb02b7faa70de01c1",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_postgresql_db/vars/FreeBSD-12.1.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b8b67d7d2dc8c0caa1b89de5338dfabcc75e6480ecc6cd92bc26da43affd9568",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_postgresql_db/vars/RedHat-py3.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "40a6304f5e4cf6e2baaa10718ae657c1ca67bb1cf127bd971b2a438d6c64f215",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_postgresql_db/vars/RedHat.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "510078001c894619b1437c45e647391781d9fb1a17bcff5cb26d7939a4970a16",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_postgresql_db/vars/Ubuntu-12.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "83efd43f61060d2f160e5a2fa8fcd185029672112068fc103f0e35ab384bb8b2",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_postgresql_db/vars/Ubuntu-14.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "44b26cda219358db0bdac8d4df06f14be1405c0ec75c9cd066b79a84fd97990e",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_postgresql_db/vars/Ubuntu-16-py3.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3dff2ca237b634d4731dad7c812af330acd802a9aafa126c1ce623d80a2330b4",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_postgresql_db/vars/Ubuntu-16.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "be5506a46fa7e9d06827fb16fcbcb51093d07c4e50e476b928bd285e4d3d6d60",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_postgresql_db/vars/Ubuntu-18-py3.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "92b075e3829177e0a0163e38e7a65f108084d520ac1d4f55031c4b574654a7af",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_postgresql_db/vars/Ubuntu-20-py3.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d7d2e4563b1a21ad4f7bb3a0d0b6eb26ab6f9a51e86dc0ce3c7c44d3458b85db",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_postgresql_db/vars/default-py3.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "bd40f6ab072c5b9d5c83d83595fc6a6489dfc8ddeb4c470b01d8b6b3d539b361",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_postgresql_db/vars/default.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "64f6e20d24c3def6bae446791f056131c8d272d6dda5f354ae63bfdc415e0819",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_redis_replication",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_redis_replication/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_redis_replication/defaults/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d605d18b50ff780848a7647ab0ce8170fe8e3904c3a268ecf1f185aa33db7796",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_redis_replication/handlers",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_redis_replication/handlers/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "42190d97acfca9aee10d326ad2453f81345598d582785e90975e3ebee4f40884",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_redis_replication/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_redis_replication/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d1111d58412ebbf4e3cdb9e7aa784b73047b602f90c76c9151c6d4230d99f933",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_redis_replication/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_redis_replication/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "bcc713d4e0cc586d7f5fd383c716a4e8f3ac78f2bece80a8094afdb4a25be897",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_redis_replication/tasks/setup_redis_cluster.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0ee9f476b10468f8c9a116254fb6687d0fb6cbb364fb5e6e7bf3bd490bbd013b",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_remote_constraints",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_remote_constraints/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_remote_constraints/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1e8d632f9db7209967c5b2f6d734bede09841acc7b898dafc19f31c72cee9929",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_remote_constraints/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_remote_constraints/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b625673c8e7b2a5dced6117b01aec6f25452246117746bd23450dcf389a61883",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_remote_constraints/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b34f7e77b6117dd66a49957c17656b22e5dfa444d8f93af2b4e1d7f1450a5a3d",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_remote_tmp_dir",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_remote_tmp_dir/handlers",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_remote_tmp_dir/handlers/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "050157a29c48915cf220b3cdcf5a032e53e359bdc4a210cd457c4836e8e32a4d",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_remote_tmp_dir/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_remote_tmp_dir/tasks/default-cleanup.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e273324ab90d72180a971d99b9ab69f08689c8be2e6adb991154fc294cf1056e",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_remote_tmp_dir/tasks/default.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "2441ac1753320d2cd3bea299c160540e6ae31739ed235923ca478284d1fcfe09",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_remote_tmp_dir/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "bcb3221a68dc87c7eae0c7ea50c0c0e81380932bf2e21a3dfdee1acc2266c3f3",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_rundeck",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_rundeck/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_rundeck/defaults/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "45abf38083475f7c347146e289533e59b374dd8735446a71f301de517b031375",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_rundeck/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_rundeck/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c2af6405d42a5abd438f4a256a6a5c19b8fb5eb5559209c0cbabd150af911837",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_rundeck/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_rundeck/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a3a79997e5e6fa57f17238080b4bee234aa15cf9f978f37e99be60765711640e",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_rundeck/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_rundeck/vars/Alpine.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "659f8db0e03052dde7c92fc94faed130a80f8e56e7ea4d3b6bdf1d1c14484faf",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_rundeck/vars/Archlinux.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "324b75fe99cce7878ca4b253ef023c4341fe9c5482a371da999e5ef37d2a24bd",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_rundeck/vars/Debian.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "85bef73efccc60527136788d1e9599414cbe28283c1f2ef50f493ce4886e34bc",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_rundeck/vars/RedHat.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "02353942fe24748ec6c1bf247afa3ec507e28954e28b6dcf60264b4988bf7c98",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_snap",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_snap/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_snap/defaults/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "488ff8657b223511262ef52793496b9efca1ae9fd468dedaa9bc48950f73978b",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_snap/handlers",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_snap/handlers/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a8a550631c3cf2d8a29d3bb2ca1fde7756fe733a497220a4ba47e5d3192ea000",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_snap/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_snap/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ff95c2df57e2d968c47a85a41a0586572bbe3772cc9870e0045f4b3fd83fd58b",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_snap/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_snap/tasks/D-Fedora.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "546288ee457cb0cb0940bc34228080a6f215b4638f1ad5c61a9135cfd68ba529",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_snap/tasks/D-RedHat-8.2.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "710488192c243e9d7ca534a825549d15d454d53b756477d78bcda7fab446ba4a",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_snap/tasks/D-RedHat-8.3.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "710488192c243e9d7ca534a825549d15d454d53b756477d78bcda7fab446ba4a",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_snap/tasks/Debian.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "546288ee457cb0cb0940bc34228080a6f215b4638f1ad5c61a9135cfd68ba529",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_snap/tasks/RedHat.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "546288ee457cb0cb0940bc34228080a6f215b4638f1ad5c61a9135cfd68ba529",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_snap/tasks/default.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "546288ee457cb0cb0940bc34228080a6f215b4638f1ad5c61a9135cfd68ba529",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_snap/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "00b7fac9bbd3a4ee7fee27c210992cded4d331dd1cd4d9a5409be22cb91748b1",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_snap/tasks/nothing.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "710488192c243e9d7ca534a825549d15d454d53b756477d78bcda7fab446ba4a",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_tls",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_tls/files",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_tls/files/ca_certificate.pem",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "47ddc514d030d2dd28b98eb257b690b8aa94abc7b657b43caf6e32e2e5a6bf9d",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_tls/files/ca_key.pem",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0157029faae2207eaec99b67360db8ca46fe6964eb98165a0ca4ac56cbed7ebb",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_tls/files/client_certificate.pem",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1c88ee01e59fe19f497b74f0fb15a6d705bbac6df554d16f2f80fc25d2723bad",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_tls/files/client_key.pem",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1ffc8420355a69fecd60242feb89bfef5517292aa9129ea79e99bb36ffd80dc6",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_tls/files/server_certificate.pem",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a93a860161059bf8b6d065d2b01a5218a7beefdb075fa704e0139d4f96bdb61c",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_tls/files/server_key.pem",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0bb0b33983d37d5b6404c0feb969e80d0787331f774d2b8024570133d65851f6",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_tls/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_tls/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3b7623cd0bfcfa8b836f0eed7e40c6546f781ea549220f409320fd3517590694",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_wildfly_server",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_wildfly_server/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_wildfly_server/defaults/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "10a80fb6bf47c167c0d3546ec8b477a32c8d9a92767d62d3c1d0a77132838e42",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_wildfly_server/files",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_wildfly_server/files/wildfly.conf",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f8c07bbd656b9d303974d536af56b75593c9b831d17ca17ba7af2c14502b7be2",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_wildfly_server/handlers",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_wildfly_server/handlers/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "cfb6a9b45a8f36d652d845b282219a344b7a53c7474b27533e7231a1c736dca7",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_wildfly_server/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_wildfly_server/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "847bb6c4dae501f75ec017de8302d70c08bf23548a82058650b1fbd1180cd218",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_wildfly_server/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_wildfly_server/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6e26de7ad0b193acfcc863b4342855ec844466c84d864c21da7aa05c0d00cfd7",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_wildfly_server/templates",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_wildfly_server/templates/launch.sh.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1aaff5c06ef04fcbcd51df947fd85c94dede66e35d188166a03678720ba6bc56",
- "format": 1
- },
- {
- "name": "tests/integration/targets/setup_wildfly_server/templates/wildfly.service.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6199adb74eafdedff83e41c4377b4c778d1c10773461f479c3b63eb2de90014e",
- "format": 1
- },
- {
- "name": "tests/integration/targets/shutdown",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/shutdown/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/shutdown/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "236815aaae155925406da88d56b84bbdf2c51c3cdd1385ca2b4107e761de4950",
- "format": 1
- },
- {
- "name": "tests/integration/targets/shutdown/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "811322fad85866ac31f9501f8da0c7072809065397d53c06712447946b746b6b",
- "format": 1
- },
- {
- "name": "tests/integration/targets/snap",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/snap/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/snap/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "83529c5d557f459b40be09fc10dfc4cc4e688e127ff456b6aa129b9bf9dd1c90",
- "format": 1
- },
- {
- "name": "tests/integration/targets/snap/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/snap/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ded2d739d28cfa5095266e709cf4cac36227dec931d1736a3feefa264e8c62d1",
- "format": 1
- },
- {
- "name": "tests/integration/targets/snap/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "145304c99af09bfdf3c3cea216e59ebcd872946b2750c1a8ad5f295e2260b979",
- "format": 1
- },
- {
- "name": "tests/integration/targets/snap_alias",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/snap_alias/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/snap_alias/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "83529c5d557f459b40be09fc10dfc4cc4e688e127ff456b6aa129b9bf9dd1c90",
- "format": 1
- },
- {
- "name": "tests/integration/targets/snap_alias/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/snap_alias/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "82b3d7dc5692c55c426dd75d19eb982564948283f89724d302c758cb21bd4953",
- "format": 1
- },
- {
- "name": "tests/integration/targets/snap_alias/tasks/test.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "296d6119668067dcedf82891126264a32e2173788ea36061bf76bff8396f13b1",
- "format": 1
- },
- {
- "name": "tests/integration/targets/snap_alias/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "145304c99af09bfdf3c3cea216e59ebcd872946b2750c1a8ad5f295e2260b979",
- "format": 1
- },
- {
- "name": "tests/integration/targets/spectrum_model_attrs",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/spectrum_model_attrs/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/spectrum_model_attrs/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "13092237372d3237348001411f7d1248f09ed49eb37723cbc04a777c57437ca3",
- "format": 1
- },
- {
- "name": "tests/integration/targets/spectrum_model_attrs/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f13674351f8231c9e60fc923652b1ce94b62357ee00fedaf23ad3af677aff656",
- "format": 1
- },
- {
- "name": "tests/integration/targets/ssh_config",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/ssh_config/files",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/ssh_config/files/fake_id_rsa",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/integration/targets/ssh_config/files/ssh_config_test",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/integration/targets/ssh_config/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/ssh_config/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "441dc00ccedb1b0ca62ecbd9c59b32154abb321c558e19e0a8ba04a8ad213e34",
- "format": 1
- },
- {
- "name": "tests/integration/targets/ssh_config/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/ssh_config/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9fdaa5df59486f2bdf7e9e56a9136b60182bf48bd326b62d4d28db615ec96c95",
- "format": 1
- },
- {
- "name": "tests/integration/targets/ssh_config/tasks/options.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "abecc6dcf4a246368324d00a9f981a0f5b3b5b370ac85cabd0ae133cc9dfb93b",
- "format": 1
- },
- {
- "name": "tests/integration/targets/ssh_config/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1880a68a324959394802537865a680603cbce4018675f70f6329281d365d96a4",
- "format": 1
- },
- {
- "name": "tests/integration/targets/sudoers",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/sudoers/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/sudoers/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "2b9e883c9eef20446e3a3e460c1b4c391680e57f06dc6459a848abac06462539",
- "format": 1
- },
- {
- "name": "tests/integration/targets/sudoers/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e5a8866bb35edfbfde89676bc1698a7dd27c89530da249de619169892e7482d1",
- "format": 1
- },
- {
- "name": "tests/integration/targets/supervisorctl",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/supervisorctl/files",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/supervisorctl/files/sendProcessStdin.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "bb8184512251663d37e1c79ba14c7d962dbac0cf77a1af593456823c223e293c",
- "format": 1
- },
- {
- "name": "tests/integration/targets/supervisorctl/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/supervisorctl/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "769fd1fe7bec847f2f097de2fc87ddc3062a542f24ea15a783fcea9e18b038f7",
- "format": 1
- },
- {
- "name": "tests/integration/targets/supervisorctl/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/supervisorctl/tasks/install_Darwin.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8193ffa8c321692d3f855b32deb703157078f27b7e661207713fff4e3f10322d",
- "format": 1
- },
- {
- "name": "tests/integration/targets/supervisorctl/tasks/install_FreeBSD.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8193ffa8c321692d3f855b32deb703157078f27b7e661207713fff4e3f10322d",
- "format": 1
- },
- {
- "name": "tests/integration/targets/supervisorctl/tasks/install_RedHat.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8193ffa8c321692d3f855b32deb703157078f27b7e661207713fff4e3f10322d",
- "format": 1
- },
- {
- "name": "tests/integration/targets/supervisorctl/tasks/install_Suse.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8193ffa8c321692d3f855b32deb703157078f27b7e661207713fff4e3f10322d",
- "format": 1
- },
- {
- "name": "tests/integration/targets/supervisorctl/tasks/uninstall_Darwin.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7760bbf840a5a5b5273e5b518ff7d4305ebcbea60e3de38af307bf2bf798e663",
- "format": 1
- },
- {
- "name": "tests/integration/targets/supervisorctl/tasks/uninstall_FreeBSD.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7760bbf840a5a5b5273e5b518ff7d4305ebcbea60e3de38af307bf2bf798e663",
- "format": 1
- },
- {
- "name": "tests/integration/targets/supervisorctl/tasks/uninstall_RedHat.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7760bbf840a5a5b5273e5b518ff7d4305ebcbea60e3de38af307bf2bf798e663",
- "format": 1
- },
- {
- "name": "tests/integration/targets/supervisorctl/tasks/uninstall_Suse.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7760bbf840a5a5b5273e5b518ff7d4305ebcbea60e3de38af307bf2bf798e663",
- "format": 1
- },
- {
- "name": "tests/integration/targets/supervisorctl/tasks/install_Linux.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0fc88192e6478dd7c0224de6281f6a690a753ffbb6df1d4e114a7e3034679e27",
- "format": 1
- },
- {
- "name": "tests/integration/targets/supervisorctl/tasks/install_pip.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8193ffa8c321692d3f855b32deb703157078f27b7e661207713fff4e3f10322d",
- "format": 1
- },
- {
- "name": "tests/integration/targets/supervisorctl/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "89d2fd307d63ea189cba1c12f93e7fff2e03e6abb68bd0efcacb0e5f77a1efbf",
- "format": 1
- },
- {
- "name": "tests/integration/targets/supervisorctl/tasks/start_supervisord.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7761c3bde37c76ac85bcc6de9bf55467f8942084209edb5f52d9cfdd9af76a0e",
- "format": 1
- },
- {
- "name": "tests/integration/targets/supervisorctl/tasks/stop_supervisord.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "adb0abf951962116ca6e1843f1721b4490170b61dfb1a3ac4ea6f4aa3bf5f706",
- "format": 1
- },
- {
- "name": "tests/integration/targets/supervisorctl/tasks/test.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ea89555b19dc74fff88355d178fdf021dc33def2f6caae51f2c40b029e897b43",
- "format": 1
- },
- {
- "name": "tests/integration/targets/supervisorctl/tasks/test_start.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "40c50eeba50d448a234a6c7fb6c769c813c9d5b6041a27d58528b2e120efdbe8",
- "format": 1
- },
- {
- "name": "tests/integration/targets/supervisorctl/tasks/test_stop.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6a2d240f9951ce2c4bb2363a6d70e5038da5607af86055c08cbf064034551dc0",
- "format": 1
- },
- {
- "name": "tests/integration/targets/supervisorctl/tasks/uninstall_Linux.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "151565b1b6437c5330d7b1619da7dd7ed96393e5366d7eed6f6bb023ec0d7b90",
- "format": 1
- },
- {
- "name": "tests/integration/targets/supervisorctl/tasks/uninstall_pip.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7760bbf840a5a5b5273e5b518ff7d4305ebcbea60e3de38af307bf2bf798e663",
- "format": 1
- },
- {
- "name": "tests/integration/targets/supervisorctl/templates",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/supervisorctl/templates/supervisord.conf",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "877476ccc7f51038a097f2e52ac552297c387e71c37a25d523ce6f551b6e9591",
- "format": 1
- },
- {
- "name": "tests/integration/targets/supervisorctl/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/supervisorctl/vars/Debian.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4b73003c59b63fbb82a349cc0693c5a352b9a232ff520bbde67f0a76b947c909",
- "format": 1
- },
- {
- "name": "tests/integration/targets/supervisorctl/vars/defaults.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9a0893de74514778586ad0c0240c024b26d16c0c8c4c7eec89fe89d7a475b752",
- "format": 1
- },
- {
- "name": "tests/integration/targets/supervisorctl/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b268603706f3146929caf02b9766fd864cb33d10c1be9d581ef762640ad0dc26",
- "format": 1
- },
- {
- "name": "tests/integration/targets/sysrc",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/sysrc/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/sysrc/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "dd52e39be13eaea8f94a65caf2c9bce54bc0917122ec860e494daf41b91d100b",
- "format": 1
- },
- {
- "name": "tests/integration/targets/sysrc/tasks/setup-testjail.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "711db7c1d698d974d4b3d58a0ecd3beaae49aa9dfea4f89e9e305c45e28dbc06",
- "format": 1
- },
- {
- "name": "tests/integration/targets/sysrc/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "80fcac1bf73988cab479c3db85a45b2bcf71260e19a6e691dca88213b7c83e42",
- "format": 1
- },
- {
- "name": "tests/integration/targets/test_a_module",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/test_a_module/collections",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/test_a_module/collections/ansible_collections",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/test_a_module/collections/ansible_collections/testns",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/test_a_module/collections/ansible_collections/testns/testcoll",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/test_a_module/collections/ansible_collections/testns/testcoll/plugins",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/test_a_module/collections/ansible_collections/testns/testcoll/plugins/modules",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/test_a_module/collections/ansible_collections/testns/testcoll/plugins/modules/collection_module.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c3f0114d080c409c58c8846be8da7b91137b38eaf2d24f72a4a61a303f925f4d",
- "format": 1
- },
- {
- "name": "tests/integration/targets/test_a_module/collections/ansible_collections/testns/testcoll/galaxy.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "69c0cb85c493f4a56758eb814a9c36104cf36c449a0e54d1a6b4b72bbda01ec1",
- "format": 1
- },
- {
- "name": "tests/integration/targets/test_a_module/library",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/test_a_module/library/local_module.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "853a4708a3d35eec2ffe537982f938eb947da2faf1b405a4690b5b6a2ed5dc7c",
- "format": 1
- },
- {
- "name": "tests/integration/targets/test_a_module/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8a5657d4075cac91b7e2fc1ffaaa04d37136cc64e51c75c2d261147519bb12d6",
- "format": 1
- },
- {
- "name": "tests/integration/targets/test_a_module/runme.sh",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "737ebeb3884ca0395d497b4d357dfff97dcd1463a494b2fbb2e8dfaaf1b4251a",
- "format": 1
- },
- {
- "name": "tests/integration/targets/test_a_module/runme.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "566f7be489b33ddce7b23f85cbb3bc4793cc0317f72ea634af8eb40efb5a199c",
- "format": 1
- },
- {
- "name": "tests/integration/targets/timezone",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/timezone/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/timezone/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c2af6405d42a5abd438f4a256a6a5c19b8fb5eb5559209c0cbabd150af911837",
- "format": 1
- },
- {
- "name": "tests/integration/targets/timezone/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/timezone/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "47f761013cffa238fe7683007d036d0e5a25fcef8b473dfbaaf1c26ce5f20265",
- "format": 1
- },
- {
- "name": "tests/integration/targets/timezone/tasks/test.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "73ea1b70fd2b5411eca231acea9212ac2e2a0a3eb2ca93618638bd88108bfb4f",
- "format": 1
- },
- {
- "name": "tests/integration/targets/timezone/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "de4b021a7ffae564d5e878e8a94a4121b367cb3b51a9b0dfe2b08943cdafc492",
- "format": 1
- },
- {
- "name": "tests/integration/targets/ufw",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/ufw/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/ufw/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ae816995bad20b4c09762e88d447ddf2793e1df8f6d61c0fdc03a7432a06e75a",
- "format": 1
- },
- {
- "name": "tests/integration/targets/ufw/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/ufw/tasks/tests",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/ufw/tasks/tests/basic.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "cbfd03a4a6a79672ed38e204abed663ea00315e59a26d7d7b5acd166efc16de9",
- "format": 1
- },
- {
- "name": "tests/integration/targets/ufw/tasks/tests/global-state.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6adba710aa58f28cd81d6a0e3620c2fc38587ef14b3e26a85f41a7dd2814b20d",
- "format": 1
- },
- {
- "name": "tests/integration/targets/ufw/tasks/tests/insert_relative_to.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b28169e97fa8a69653ad91f5bc21cc746d26c1c310170652b5a94d9161fa6064",
- "format": 1
- },
- {
- "name": "tests/integration/targets/ufw/tasks/tests/interface.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3bc22ae0cc6f0384c2f29530b9cce48be2fa992ca72367fec2887f367f6899fc",
- "format": 1
- },
- {
- "name": "tests/integration/targets/ufw/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "071fa18e8ee40f0e0aadffab2ad453eba19945ab310fe864df2b478e3006ad9d",
- "format": 1
- },
- {
- "name": "tests/integration/targets/ufw/tasks/run-test.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "bab5dae8b59202497a94d3a13d9ed34aa200543b7ea8d8f0cb3a24d16b115fee",
- "format": 1
- },
- {
- "name": "tests/integration/targets/ufw/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5a8fe3f6351eba91318e83afee3686003438a97bf10fa9d77330e99742a5445d",
- "format": 1
- },
- {
- "name": "tests/integration/targets/wakeonlan",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/wakeonlan/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/wakeonlan/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0b3c431835de84d4f4f33d43d8e3a9ce9fabe19ff24b0fc617876c11b7e97208",
- "format": 1
- },
- {
- "name": "tests/integration/targets/wakeonlan/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ffe208f20c1cb038da9750e438e2377f03d31145867413d919e6a025c15d270b",
- "format": 1
- },
- {
- "name": "tests/integration/targets/xattr",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/xattr/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/xattr/defaults/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e1d8dfee07d5c920e531fe96a06e527189deaf13fd4d684ea513339a64dd29a1",
- "format": 1
- },
- {
- "name": "tests/integration/targets/xattr/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/xattr/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "769fd1fe7bec847f2f097de2fc87ddc3062a542f24ea15a783fcea9e18b038f7",
- "format": 1
- },
- {
- "name": "tests/integration/targets/xattr/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/xattr/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6409d500eead57967d4fd868cb5852adc1822fe58bd1ed0f92f2ea764de50c54",
- "format": 1
- },
- {
- "name": "tests/integration/targets/xattr/tasks/setup.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8fd614bf34876618e9ca28dc4c49818fdfc0a7982ac0814e28df3741af5930df",
- "format": 1
- },
- {
- "name": "tests/integration/targets/xattr/tasks/test.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a010da6a9ac6709b5e4fb53ebd960462e0e8afb9d5564dadb4dc013b21b91c3d",
- "format": 1
- },
- {
- "name": "tests/integration/targets/xattr/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1fbcab4b8d4b681f9278736b73af5b7e26c18d133f4c6df700158b2be244904f",
- "format": 1
- },
- {
- "name": "tests/integration/targets/xfs_quota",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/xfs_quota/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/xfs_quota/defaults/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7fcbdf40bed8e4180a0f571f5f979872d1aea52c476a80994e7f4e3a488e9225",
- "format": 1
- },
- {
- "name": "tests/integration/targets/xfs_quota/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/xfs_quota/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1e8d632f9db7209967c5b2f6d734bede09841acc7b898dafc19f31c72cee9929",
- "format": 1
- },
- {
- "name": "tests/integration/targets/xfs_quota/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/xfs_quota/tasks/gquota.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4f78ba14c8dd91da0a4fca770577755f07aeecbad8df8acd5a96b7dda65c05cc",
- "format": 1
- },
- {
- "name": "tests/integration/targets/xfs_quota/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1f3028e8eb861d0a8a56a98f1835b4bbe784726482a4dbeaef5a2eeedb28f26f",
- "format": 1
- },
- {
- "name": "tests/integration/targets/xfs_quota/tasks/pquota.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f5144a0c5a5f2fc1f0181a699949544342979e6d6e20668070bd263b07744747",
- "format": 1
- },
- {
- "name": "tests/integration/targets/xfs_quota/tasks/uquota.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d796e8378720c441626f4f9dff7e13f5f7d7aa16e3acd73a4618c01abd8e289b",
- "format": 1
- },
- {
- "name": "tests/integration/targets/xfs_quota/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d2726eddce66cc8903ec22708217894083028934ccc9874b779234699c822298",
- "format": 1
- },
- {
- "name": "tests/integration/targets/xml",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/xml/fixtures",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/xml/fixtures/ansible-xml-beers-unicode.xml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b88157804ecb91179f87676a83ba7980af70efe935b17d39c18d05c298f57cf5",
- "format": 1
- },
- {
- "name": "tests/integration/targets/xml/fixtures/ansible-xml-beers.xml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c66414fe6d4b64014dbf96c994c07cd97b38e03e6f476cc0d9f0ef27ddc96df2",
- "format": 1
- },
- {
- "name": "tests/integration/targets/xml/fixtures/ansible-xml-namespaced-beers.xml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "63dbf18a99b1f1eb359e912bea594f9d2450438068213158c145d3c815c9f0dc",
- "format": 1
- },
- {
- "name": "tests/integration/targets/xml/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/xml/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ae816995bad20b4c09762e88d447ddf2793e1df8f6d61c0fdc03a7432a06e75a",
- "format": 1
- },
- {
- "name": "tests/integration/targets/xml/results",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/xml/results/test-add-children-elements-unicode.xml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "18833a8f2893edcb2ae267ed7f0580d06475e7da070c4eecabe16435fd98b0e8",
- "format": 1
- },
- {
- "name": "tests/integration/targets/xml/results/test-add-children-elements.xml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6d479f1699c9dfed26820f218e0642689c9a7b75f9df8a49d22158ec117f0a43",
- "format": 1
- },
- {
- "name": "tests/integration/targets/xml/results/test-add-children-from-groupvars.xml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "463714365dadbb9920967fa83c913702ffa1e5488e86624456970b4ab8928b9f",
- "format": 1
- },
- {
- "name": "tests/integration/targets/xml/results/test-add-children-insertafter.xml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f12f8469913b495e3138ad3207c4a228eb99c584b016021afff0ebd565033e36",
- "format": 1
- },
- {
- "name": "tests/integration/targets/xml/results/test-add-children-insertbefore.xml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "54632a063875c7558eddb674a785edd2ae3d6360d0988912ae3ee3f50c6f0082",
- "format": 1
- },
- {
- "name": "tests/integration/targets/xml/results/test-add-children-with-attributes-unicode.xml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ff7167bef3d711a8dec1572ed1128746ea63cc69ba51257bf59b56f00113846b",
- "format": 1
- },
- {
- "name": "tests/integration/targets/xml/results/test-add-children-with-attributes.xml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "adc4d6df33b14a6b2dcbf9e2df9ee41c12f31f2690f7b198b7ee810ec29329c1",
- "format": 1
- },
- {
- "name": "tests/integration/targets/xml/results/test-add-element-implicitly.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "2f51c7ddee9d1cd6e1bd7ab58dfca1bd58d56f1a27bd3bdecc49428a6a58778a",
- "format": 1
- },
- {
- "name": "tests/integration/targets/xml/results/test-add-namespaced-children-elements.xml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e50992baa59f7a611e4ef08211dce8847618ecbd0b786fc01a17b41330405200",
- "format": 1
- },
- {
- "name": "tests/integration/targets/xml/results/test-pretty-print-only.xml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "36e6ffd3c5397494980ebfe9771ba624b7d920e3ce3d7bb843f364675fbcddb3",
- "format": 1
- },
- {
- "name": "tests/integration/targets/xml/results/test-pretty-print.xml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "824b2a646c1c901e70bccfb7e1ee63721c9e8cee7994133dd166178d53e67065",
- "format": 1
- },
- {
- "name": "tests/integration/targets/xml/results/test-remove-attribute.xml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7e38225db2b6f8a1c485a328ad08a8b0c24ca3b017dfa68366cd609991b9104f",
- "format": 1
- },
- {
- "name": "tests/integration/targets/xml/results/test-remove-element.xml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ecc4e61ff85be0098aada5efc9e3c0d8424c98baff4f8901d991ae08c08416f2",
- "format": 1
- },
- {
- "name": "tests/integration/targets/xml/results/test-remove-namespaced-attribute.xml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "77a5e85cecebfe2a8fc432d8bbae1aee6b313956f3f2c12383310ad151b6fcb6",
- "format": 1
- },
- {
- "name": "tests/integration/targets/xml/results/test-remove-namespaced-element.xml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e2571bd28e49e82ce1b494fd2162f89bb82947615a9e775a1f7399df435f3e19",
- "format": 1
- },
- {
- "name": "tests/integration/targets/xml/results/test-set-attribute-value-unicode.xml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ae94b872105fd18f6cee5e0cf4b9b0527757a6da94e52638f2e492dea88f2034",
- "format": 1
- },
- {
- "name": "tests/integration/targets/xml/results/test-set-attribute-value.xml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c25c127ada5a292507a55c36919bc801edac4bcd6f85807727e1cd76e1e5cb4a",
- "format": 1
- },
- {
- "name": "tests/integration/targets/xml/results/test-set-children-elements-level.xml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "91ad02bae7452891174b2178d2f6556e0dfc07a5e8f491d98f0e11efece9b1ca",
- "format": 1
- },
- {
- "name": "tests/integration/targets/xml/results/test-set-children-elements-unicode.xml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "600316e3ef64bba85725621be258991fad9477b00b25422461aa59e703363291",
- "format": 1
- },
- {
- "name": "tests/integration/targets/xml/results/test-set-children-elements.xml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e70378485034552978d9bd0b525729717e75c33c73e947f4c02779f6ca8a9fa0",
- "format": 1
- },
- {
- "name": "tests/integration/targets/xml/results/test-set-element-value-empty.xml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b492c8b81778ca3793f788cdc189c07f9170e4b75e63b49f09768611b2953868",
- "format": 1
- },
- {
- "name": "tests/integration/targets/xml/results/test-set-element-value-unicode.xml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "aec28ed22238a57708d1c7e4b7416e3fd64b91d7ea679df3d511d0ff4d78d794",
- "format": 1
- },
- {
- "name": "tests/integration/targets/xml/results/test-set-element-value.xml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9d68db62d1a5fbad0082338ef7f7743ff4c60e1ba452d507496a0df95980060b",
- "format": 1
- },
- {
- "name": "tests/integration/targets/xml/results/test-set-namespaced-attribute-value.xml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a06edcd61c987b3076806449dc923591867f6a38b059ee1e9582a165f7f6fec8",
- "format": 1
- },
- {
- "name": "tests/integration/targets/xml/results/test-set-namespaced-element-value.xml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e5e5f1d03f86bec1b16af94fea829c2303a1fe38050cd7453de87290e7b2d0dd",
- "format": 1
- },
- {
- "name": "tests/integration/targets/xml/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/xml/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0b6b1416a0a45fcc8d0a948ad10fc71fc24801dad272ed44be6c585708ae997c",
- "format": 1
- },
- {
- "name": "tests/integration/targets/xml/tasks/test-add-children-elements-unicode.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d0f0891aca5c0e0b9951ec8a11f3ad5b90fbcf83ebef449808119d8a6cf370be",
- "format": 1
- },
- {
- "name": "tests/integration/targets/xml/tasks/test-add-children-elements.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "89a2e6ade7c9a228edf872e867ae671ba76eef9395212e1f12f5520ab6bd3f0a",
- "format": 1
- },
- {
- "name": "tests/integration/targets/xml/tasks/test-add-children-from-groupvars.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "14216bc42837eff570a9c5e9296356beca1ca001546b7a0a150dd1f51128af89",
- "format": 1
- },
- {
- "name": "tests/integration/targets/xml/tasks/test-add-children-insertafter.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f24542f28b9d92973666d0c144e787f0fc0e328f9aa7fb2b9854e2a851a0cf51",
- "format": 1
- },
- {
- "name": "tests/integration/targets/xml/tasks/test-add-children-insertbefore.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "97be423b5077229069a36e3aabf83c2eac677b1c2d634fa6e4f94c3c0ce988b9",
- "format": 1
- },
- {
- "name": "tests/integration/targets/xml/tasks/test-add-children-with-attributes-unicode.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f1854586757f483170a4dd5e866f3bea185499b3c89babae051da70e585814e1",
- "format": 1
- },
- {
- "name": "tests/integration/targets/xml/tasks/test-add-children-with-attributes.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6d089d03f6f644ad6a6406a80cd3190acbbf4e5e518883264e30c0dd27180510",
- "format": 1
- },
- {
- "name": "tests/integration/targets/xml/tasks/test-add-element-implicitly.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "84bbfaf0c30b5f06dc2b6e8b59f74acc6c239262af8e5cf3ed7a3a9fee63ce02",
- "format": 1
- },
- {
- "name": "tests/integration/targets/xml/tasks/test-add-namespaced-children-elements.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c98aa13d97b97120b17e3c73c2e2ad16b17918393241788ae2f91483fe02a755",
- "format": 1
- },
- {
- "name": "tests/integration/targets/xml/tasks/test-children-elements-xml.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e57b3bfdca4086e6518a93ce5043cde55573df0d2143eb6013263ea5b5b9090e",
- "format": 1
- },
- {
- "name": "tests/integration/targets/xml/tasks/test-count-unicode.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ad8e481bfaeaf23fbd794eec46f28637ff748b6b012d59f316bb860897148158",
- "format": 1
- },
- {
- "name": "tests/integration/targets/xml/tasks/test-count.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7cbaa1b1e1c4b2aedffd7b185541cc20d61ba770e7ddb068db1371b230327b8d",
- "format": 1
- },
- {
- "name": "tests/integration/targets/xml/tasks/test-get-element-content-unicode.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3635e6d9ed3c8735c69bdd94961c1d553ba78428f041f297c7017eda55fa52e2",
- "format": 1
- },
- {
- "name": "tests/integration/targets/xml/tasks/test-get-element-content.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a8a1b7f7542375ec806a24385f2b73c9359f8b7839c115b634a9b4430da7298a",
- "format": 1
- },
- {
- "name": "tests/integration/targets/xml/tasks/test-mutually-exclusive-attributes.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ac7da5aec1036eb83ead426dd96ec6d31eaeccaf842e79cdd5a9ef74baeac172",
- "format": 1
- },
- {
- "name": "tests/integration/targets/xml/tasks/test-pretty-print-only.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8b5e11acdf67fb9c058cd2702505465a988e4b52b1cb3be7ae45adda1dfd3395",
- "format": 1
- },
- {
- "name": "tests/integration/targets/xml/tasks/test-pretty-print.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "464e1ebfd5c4e66743097b5e73ea130595c6c0fced413a52250598ae47aaaef6",
- "format": 1
- },
- {
- "name": "tests/integration/targets/xml/tasks/test-remove-attribute-nochange.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f32305bf4e3cb3b859cbd431b871017a5ef8d2240c5bb4084f34696ee1032730",
- "format": 1
- },
- {
- "name": "tests/integration/targets/xml/tasks/test-remove-attribute.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "2674349d50096cef7523abf3c2e6ba919b11d14d93a2b0c4f0fc0ec8704c2440",
- "format": 1
- },
- {
- "name": "tests/integration/targets/xml/tasks/test-remove-element-nochange.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4b1a3ed2ab4af9dc63189ec807f7b468cc2d4b80ea3b6421e1b71127a3d5b5c9",
- "format": 1
- },
- {
- "name": "tests/integration/targets/xml/tasks/test-remove-element.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0f4083387b79d4c7ae030d9027e4a7838d01250f783290f0d3a88542e2c52777",
- "format": 1
- },
- {
- "name": "tests/integration/targets/xml/tasks/test-remove-namespaced-attribute-nochange.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c56834fe7ee6542e55b63f8c869463c749891d2bc5c496dd12d568a4d1b3dc7c",
- "format": 1
- },
- {
- "name": "tests/integration/targets/xml/tasks/test-remove-namespaced-attribute.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a33854d8fecf679f3d76c642fd1379b3c844dbbff6329c44033eca5eefb16e79",
- "format": 1
- },
- {
- "name": "tests/integration/targets/xml/tasks/test-remove-namespaced-element-nochange.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "393cf5c8c8d8e602289848e184f51f57384ce3543ee2d1f2a2e2d79e3e75c6a8",
- "format": 1
- },
- {
- "name": "tests/integration/targets/xml/tasks/test-remove-namespaced-element.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "811d231361ffef9be6f36a8b3fe56e4941b50178336a894a0b4887174bddb0f0",
- "format": 1
- },
- {
- "name": "tests/integration/targets/xml/tasks/test-set-attribute-value-unicode.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6cf55191c2f195079fbeb6270c8b4170f07d8653283beef8888a5e05351448fa",
- "format": 1
- },
- {
- "name": "tests/integration/targets/xml/tasks/test-set-attribute-value.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "47198cb89249a2c4ed003387b1cd9887ffacc925d42c78ac3a6a0edb15d5af77",
- "format": 1
- },
- {
- "name": "tests/integration/targets/xml/tasks/test-set-children-elements-level.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "13a9de3dcb2c8f6b8b737d77fb2cca32f98d1d8647cb85b1309e13071e461952",
- "format": 1
- },
- {
- "name": "tests/integration/targets/xml/tasks/test-set-children-elements-unicode.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a38cc08c8a9a3bf53564a9955fb3387fa64a4bfa37c9d79d49b01297b614b562",
- "format": 1
- },
- {
- "name": "tests/integration/targets/xml/tasks/test-set-children-elements.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7b97434f0c2a16a4aa3ad6e51ee2d6665f68a2acfba75f30bd793f660d4033d6",
- "format": 1
- },
- {
- "name": "tests/integration/targets/xml/tasks/test-set-element-value-empty.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "306e870f3299ef224e8888ea3715fa0dc9c69942797fe4486ff03d734d00cfe8",
- "format": 1
- },
- {
- "name": "tests/integration/targets/xml/tasks/test-set-element-value-unicode.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "dd98195dfb5a4f33831ce14e47e2ac74cafa70282c1f6b187baec61fdddbe6c2",
- "format": 1
- },
- {
- "name": "tests/integration/targets/xml/tasks/test-set-element-value.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a335de101750c51dbcf94f68764561d04f2a0907ab21ae99ce9a3fea43ef030a",
- "format": 1
- },
- {
- "name": "tests/integration/targets/xml/tasks/test-set-namespaced-attribute-value.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "cdbcc1db25334596337622fbdbe457587be5eeccec5fbbcc84f4cd9f925c7f4d",
- "format": 1
- },
- {
- "name": "tests/integration/targets/xml/tasks/test-set-namespaced-children-elements.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b264d9c3a40cd702c036714a2342ab876615252435a2e7edb58c90d924a38f7d",
- "format": 1
- },
- {
- "name": "tests/integration/targets/xml/tasks/test-set-namespaced-element-value.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a0e7d085bdd8ca410bcae7e7cce35b27216509c24de1136f86e89a0e23e6e08c",
- "format": 1
- },
- {
- "name": "tests/integration/targets/xml/tasks/test-xmlstring.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "11ddcc6fb2e10c2c7f38f95585cf69b4c04161a1a1be1d01d344063ef8bdeb9b",
- "format": 1
- },
- {
- "name": "tests/integration/targets/xml/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/xml/vars/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1dfa0429b7bec0c9e705c2b649cd7a1c5a3a9301984b6b61ebb93a932acb0b60",
- "format": 1
- },
- {
- "name": "tests/integration/targets/xml/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b6e6b3eab89eec157e047b733c9e9c8b2ae7ec87e514ef9057018fee6fca9ba2",
- "format": 1
- },
- {
- "name": "tests/integration/targets/yarn",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/yarn/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/yarn/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d8fc45c0fa92f9e855eaef341b5d10d9ee2630f93bc4bd041a943aa8c1169b3d",
- "format": 1
- },
- {
- "name": "tests/integration/targets/yarn/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/yarn/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b265495436a6c3f9542ae62e07d8178e4f24705abc5368238169aa87f30203a5",
- "format": 1
- },
- {
- "name": "tests/integration/targets/yarn/tasks/run.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "69cedca1e383fb004b707ed3b1363bc9ba6ffd06cfbc44d9e047c8baa394a4ba",
- "format": 1
- },
- {
- "name": "tests/integration/targets/yarn/templates",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/yarn/templates/package.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "664c8935b09b77dee0b4068cd201aa26923567264026632473acaec6f90fc4b9",
- "format": 1
- },
- {
- "name": "tests/integration/targets/yarn/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4e99bd95e5ccbbc3157a4036b5a91bd46bb22457eca85b713b441cb0d4f0b9e5",
- "format": 1
- },
- {
- "name": "tests/integration/targets/yum_versionlock",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/yum_versionlock/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/yum_versionlock/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4a9c743574545c7d170ccaf37f4359e6d922a6b62461b4e63389b84826290db1",
- "format": 1
- },
- {
- "name": "tests/integration/targets/yum_versionlock/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c1086be775ef488ec8b2373381fecc0f4383a03b80abb70a520198fe460e16df",
- "format": 1
- },
- {
- "name": "tests/integration/targets/zypper",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/zypper/files",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/zypper/files/empty.spec",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "77f59f1c05484da8dd181c8158c7ac48b5540a9a308c5f3872c52960c6317450",
- "format": 1
- },
- {
- "name": "tests/integration/targets/zypper/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/zypper/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1e8d632f9db7209967c5b2f6d734bede09841acc7b898dafc19f31c72cee9929",
- "format": 1
- },
- {
- "name": "tests/integration/targets/zypper/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/zypper/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "978cdbad8d0ab5434c81bf5ebdaa7b66b1b99388a742abc871dacf11709311c5",
- "format": 1
- },
- {
- "name": "tests/integration/targets/zypper/tasks/zypper.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "956d4cfb98b14577dcbba4615a22ca3adc54ae919f3dbd7f4affea1ffb1d8d60",
- "format": 1
- },
- {
- "name": "tests/integration/targets/zypper/templates",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/zypper/templates/duplicate.spec.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c5401528520b9f3ee1e8ebba24e66ad649c2e95130f184508d023b82be001c7b",
- "format": 1
- },
- {
- "name": "tests/integration/targets/zypper/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d502a4aa4e2eb90aa8ca5b3574abb191b74f24f1f4d15266457c616538367c26",
- "format": 1
- },
- {
- "name": "tests/integration/targets/zypper_repository",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/zypper_repository/files",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/zypper_repository/files/systemsmanagement_Uyuni_Utils.repo",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b306054fb8fa3adc485920c54b66b47255846e7cf279569c53a03903b46fa4b7",
- "format": 1
- },
- {
- "name": "tests/integration/targets/zypper_repository/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/zypper_repository/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1e8d632f9db7209967c5b2f6d734bede09841acc7b898dafc19f31c72cee9929",
- "format": 1
- },
- {
- "name": "tests/integration/targets/zypper_repository/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/zypper_repository/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "925247336b1a937fc2d08e3c873d4598d214a3796b51da04d3835321bc41ce30",
- "format": 1
- },
- {
- "name": "tests/integration/targets/zypper_repository/tasks/test.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f2e1d876236ad6d7016d9b17f72b16d759b6860927fbe4ec225531ec83667ec4",
- "format": 1
- },
- {
- "name": "tests/integration/targets/zypper_repository/tasks/zypper_repository.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4c5ee78440146ebc0bcac0e6d6f920a19ef05ca479174cd99b163f647ee136bd",
- "format": 1
- },
- {
- "name": "tests/integration/targets/zypper_repository/aliases",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d502a4aa4e2eb90aa8ca5b3574abb191b74f24f1f4d15266457c616538367c26",
- "format": 1
- },
- {
- "name": "tests/integration/targets/__init__.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/sanity",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/sanity/extra",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/sanity/extra/aliases.json",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d825699416551c4276f44b44c20aeef37c9b357d7711c55cd15ee12dea293907",
- "format": 1
- },
- {
- "name": "tests/sanity/extra/aliases.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f2116ad77622cd360af4506c299d64679e2346b58f03359a344a871ff8247b1b",
- "format": 1
- },
- {
- "name": "tests/sanity/extra/botmeta.json",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e56a83a42ca5002a300003f6ea560a036c684768e839c228af08ce501ac03b89",
- "format": 1
- },
- {
- "name": "tests/sanity/extra/botmeta.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f4de513bc44d78d4805850530229cb86922f5430c8fa276d0c329c4430f4a1fc",
- "format": 1
- },
- {
- "name": "tests/sanity/extra/extra-docs.json",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "48c2f15e3fe2e2bcd9b8c8bd7f5f1643d78f16b822d63befd88795fe29bdac3c",
- "format": 1
- },
- {
- "name": "tests/sanity/extra/extra-docs.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "2e5e4a1b1fa91ad02620188230cc87a7e4f89532e572168590dc93227050f98c",
- "format": 1
- },
- {
- "name": "tests/sanity/extra/no-unwanted-files.json",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a3d3b17f699b042958c7cd845a9d685bc935d83062e0bcf077f2c7200e2c0bac",
- "format": 1
- },
- {
- "name": "tests/sanity/extra/no-unwanted-files.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "15053d269214d7c1427fe861351a3bca96cf1ff9026f8aa8e8c73ba5f3cbd95d",
- "format": 1
- },
- {
- "name": "tests/sanity/ignore-2.10.txt",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8f0361a30842d521f9fa3377121416fb10cac52299375d2e145295ae81e0af56",
- "format": 1
- },
- {
- "name": "tests/sanity/ignore-2.11.txt",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f9261ff244e74897601beadd8c3baa049e8bd209371b4524acf8d21990052210",
- "format": 1
- },
- {
- "name": "tests/sanity/ignore-2.12.txt",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5d301f91150599b4bb12fa753980841c5f3d2d75dcad6bb60d19066c81dc1d78",
- "format": 1
- },
- {
- "name": "tests/sanity/ignore-2.13.txt",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5d301f91150599b4bb12fa753980841c5f3d2d75dcad6bb60d19066c81dc1d78",
- "format": 1
- },
- {
- "name": "tests/sanity/ignore-2.9.txt",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "84c5951d7a09b87481a326a1ee032cf759090ff113e04a8716db5ce930db7ceb",
- "format": 1
- },
- {
- "name": "tests/unit",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/unit/compat",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/unit/compat/__init__.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/unit/compat/builtins.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0ca4cac919e166b25e601e11acb01f6957dddd574ff0a62569cb994a5ecb63e1",
- "format": 1
- },
- {
- "name": "tests/unit/compat/mock.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0af958450cf6de3fbafe94b1111eae8ba5a8dbe1d785ffbb9df81f26e4946d99",
- "format": 1
- },
- {
- "name": "tests/unit/compat/unittest.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5401a046e5ce71fa19b6d905abd0f9bdf816c0c635f7bdda6730b3ef06e67096",
- "format": 1
- },
- {
- "name": "tests/unit/mock",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/unit/mock/__init__.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/unit/mock/loader.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3452ac615f89c99a76d1df4ab1ad84d1aff546e5b5fde18034a241239690d05a",
- "format": 1
- },
- {
- "name": "tests/unit/mock/path.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f048a12629a6297a676ea56529ecf766cff30bcaa873c6659ac5b7f6e29472b1",
- "format": 1
- },
- {
- "name": "tests/unit/mock/procenv.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e79b2fe520af92318da175c231296e16bf047842a93b1bfa4e1a5afc453baa03",
- "format": 1
- },
- {
- "name": "tests/unit/mock/vault_helper.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0562db7b9972e6378701e3492c623e5f881732c4792e096032b72c2e54d22298",
- "format": 1
- },
- {
- "name": "tests/unit/mock/yaml_helper.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "cd95a4807e52e9123a8d40132a5f52b75cbc1496e1a32b104b2655bf631cfee4",
- "format": 1
- },
- {
- "name": "tests/unit/plugins",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/unit/plugins/become",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/unit/plugins/become/__init__.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/become/conftest.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "917507083eead1d34596d6b31a4a3600a780f477bc8856ef326c4b18a1dd2053",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/become/helper.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6da8cd096bd56d3f206b879eaaa93fd1732df17ba15d10a524549df46185dafc",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/become/test_doas.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "2bdc1de37449ed84ce41b44565a575e8ee619a055ced31cf62d2c55a44b64f99",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/become/test_dzdo.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f26500ca45cdedc6a217cdd18e0a1fdfeff72415c006cf78f0f4c25476b98ff7",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/become/test_ksu.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b7b1b2f3a6e77846a3adab6f323ce7cbcdb0ce65fbc2d4bc8ae66f10e8a8a488",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/become/test_pbrun.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6f581f310504561f10a22a512343d2ae213e0d73eed950bd79fe35916f56589e",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/become/test_pfexec.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6e7d4ccdbece51e1c3426e58225426cb3bfd5c6f475243f9dc9554a4a39f2509",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/become/test_sudosu.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "268e56de2b8fcb36c79368ae1a72d408d845262fbceb7c6bc65844de24d30b50",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/cache",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/unit/plugins/cache/__init__.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/cache/test_memcached.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "bcfc3a29068b2600ce1ba45b5b9d1ba0beff9e231b6ed491d17eb09f37eb56f3",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/cache/test_redis.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6175849033bdb3dee3dcf16c0910361ded349f0cf7ca73f29e819d145864d020",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/callback",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/unit/plugins/callback/__init__.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/callback/test_elastic.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4ce3edd28ec0cf7c37fdcdc9e4a63c8d670bf4e66ede233df388e32c46f673cb",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/callback/test_loganalytics.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "766e354e8049ff4e256d02d9f63baeb97b092bee6641bf8361e6c239f57dcd86",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/callback/test_opentelemetry.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a176001eebc2a4bfbadb591ebfea86e2cf1fac7eb269e5e6a893b196c81cf3ac",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/callback/test_splunk.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "305ae4d1806d366ac83bb9d03418c5073287e973ddf481264a70fdb781a68623",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/connection",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/unit/plugins/connection/__init__.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/connection/test_lxc.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "fe90b5d5eb17eab987cd0022995eb85b6c8f0e90d20aee7b8fc0d6945041ab00",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/inventory",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/unit/plugins/inventory/fixtures",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/unit/plugins/inventory/fixtures/lxd_inventory.atd",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "162213d31385d92e0c3c9eee2f02189646a384b14132e7a3952afc23ffeb33a4",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/inventory/__init__.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/inventory/test_cobbler.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "97a48c825926b35f9c32303d7c88d0590a43935f2749d5de4f5b0b5ef721d444",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/inventory/test_icinga2.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ac2ffef2b6843298fe98692a3631a101e4179c59c64306be4d356d9f99b8bab0",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/inventory/test_linode.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "bfa9d219ee27513f7adc5e2e27b57e82e838f5fc473722202160456ba5e52aa2",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/inventory/test_lxd.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5e1fc38581497412ecc4e9a4d6662995910edc71c7cdfc5c5f0e03b03ed9bd7e",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/inventory/test_opennebula.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "62664bab411df8055d9a52e9c2dc5033c23366ea857758603d513c995e4ea9b9",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/inventory/test_proxmox.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "2ee244ed220f06691dc75a5b302be5461d124073fb4753eafa6ef9d7b40bc070",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/inventory/test_stackpath_compute.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "becd4388974334f46b58a19ae404eb50649fe9acba9f3bffed7e6cbf2767f97e",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/inventory/test_xen_orchestra.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "804e1802b18939d5b2ffe781a5e150b9b0b4c7ac70a61fd3364eb433aee16df0",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/lookup",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/unit/plugins/lookup/__init__.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/lookup/test_dependent.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c5caedd0ff8644aa8c62c6b98c8ae0a66d905ee2f20666a047592f7b10171ab4",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/lookup/test_dsv.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c25e7fc101df073b45b7eb7b048d7bcd8b3f477e6021c27dbe974945fd8299dd",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/lookup/test_etcd3.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d79104d0074682c5308648f66eabf50569ddc34f32ab916a02a4425077043083",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/lookup/test_lastpass.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "53b61e83a357f3c9bd28795c8f16238a55f5cd9031783f06f00785d50e02dec8",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/lookup/test_manifold.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d5d2b651cf2fc37f4f8ff632aea0237ac7fb35ac5f03a643d7cffc5f1ed0db2c",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/lookup/test_onepassword.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "97052edbff97b2b30d11ab8b147fe733bb382a02db51b4604bf0407a01fe2ef2",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/lookup/test_revbitspss.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "609ea39bd85c2575bdcb475c2a26ddc85e119bb02fb9f8f6d16a42d4e6aa853d",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/lookup/test_tss.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7a725b4c2a4e0d32f67d3a3b19bde98df85146309e949842f423db5c52c9dc19",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/module_utils",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/unit/plugins/module_utils/cloud",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/unit/plugins/module_utils/cloud/__init__.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/module_utils/cloud/test_backoff.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c43c5418bed4f056752610c28cdc7b9ff535a1c29238996444a21fc6b47502c5",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/module_utils/hwc",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/unit/plugins/module_utils/hwc/__init__.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/module_utils/hwc/test_dict_comparison.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c844c6b0656694a4828278a84f6ebe4d6850f022076d86aaf3b68c2fac685311",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/module_utils/hwc/test_hwc_utils.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0fb975491eb25d8f8e74373095a0cd87e12e1a7d6acd4282b1aa1101142f2b87",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/module_utils/identity",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/unit/plugins/module_utils/identity/keycloak",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/unit/plugins/module_utils/identity/keycloak/__init__.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/module_utils/identity/keycloak/test_keycloak_connect.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6e0678b4f3b5e1a84586ba094c97498c7fae33ef3dd404c736058e314b62b075",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/module_utils/identity/__init__.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/module_utils/net_tools",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/unit/plugins/module_utils/net_tools/pritunl",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/unit/plugins/module_utils/net_tools/pritunl/__init__.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/module_utils/net_tools/pritunl/test_api.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "726f4d91b7a2f95d1bf59e5481c2a8e46ce79790a9d09c0c39afe2c562cb02eb",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/module_utils/net_tools/__init__.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/module_utils/remote_management",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/unit/plugins/module_utils/remote_management/__init__.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/module_utils/xenserver",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/unit/plugins/module_utils/xenserver/fixtures",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-1-facts.json",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "58e21893fa65459c9869fdbcc9c79299cc01183e3a10cf575cd75a62ff366e58",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-1-params.json",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "61a935bdae44191686b63826996abbf2431834febaa54e4a9e523aec016cdd61",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-2-facts.json",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "728bad77904f8e3d2539810fd0dfcec6bb24621c78406daf4434dd611042da5e",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-2-params.json",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b994736a7e80d02c759c7b19977101c0c04ebc1c8460258f5b96f595b9daf037",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-3-facts.json",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "92c62d837dcef25a817ac3a9903d6a430b0deb44848d29ab5ac5bb6eafcda526",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-3-params.json",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "2a8de78de7ba45268294a48c99a82a957ecb3da299ac9036264308392b14106b",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/module_utils/xenserver/FakeAnsibleModule.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8249860417aebbb656541d50db86e9b3314c58fd6608aa4cf87b29cb530e1cac",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/module_utils/xenserver/FakeXenAPI.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "cfe4d1e5778e71f0beacda11226b934635d87a2194be94af275d09701f501e01",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/module_utils/xenserver/__init__.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/module_utils/xenserver/common.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5beba741c380832f920ce31d44c0061e44cd9301469262e080d83336713ac65c",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/module_utils/xenserver/conftest.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "cdc32c1f62b6cd60be98a4b5531ab3231d79055650df266371420afb052c1f93",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/module_utils/xenserver/test_gather_vm_params_and_facts.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "19e5f84c0f0d0f1316516689819332972c3f21b6175f6e9748a5319b68e5a2ab",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/module_utils/xenserver/test_get_object_ref.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "51388a0d4511aa3ab1ddf74b2ec0c603ed46e88741d90b8c202725d7c303b89d",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/module_utils/xenserver/test_misc.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4e7c63d3bbf78b71353572f6ee50a49f633a371b9506523cb5e9541df82837c9",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/module_utils/xenserver/test_netaddr_functions.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "34b0b4122166239da7c963580b38ee3beb3657815825b57c8c37349fafb55cb9",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/module_utils/xenserver/test_set_vm_power_state.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5539b93046d0b10ed2823aa1d89efcc6969c154d00b4a92443157f6e4ed75313",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/module_utils/xenserver/test_wait_for_functions.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3dcd632f7e357295691f1dd4f1c5ef041bc76b28b3362ab91aa1a8b2be8bca08",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/module_utils/xenserver/test_xapi.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c32fee1f92b6049387e5af9a600b80e302cf08e3831a866af986d70a44700237",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/module_utils/xenserver/test_xenserverobject.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "2c7d8c81a73c9ab1c69a8e610c788a41f2817a56678475a1e36267cf8037b5a6",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/module_utils/__init__.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/module_utils/conftest.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c50d5804d586f18df17d15e48332dc0c78239283d0becd6cd7eec8ed8dbd8032",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/module_utils/test_csv.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "434a6147e2e3d20fb0c773fa83bcb76eeab520745158c79cbbdb021fca271b64",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/module_utils/test_database.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "dcf067c65ac448adaee89a093902592e7b79a3ae95b5cf47098cd729f7912727",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/module_utils/test_known_hosts.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "856a89896c248a26157903f168873c8b575ac208c15d4b7071cbcae711ec51c9",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/module_utils/test_module_helper.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7bee762074a200012e3f7613f9b9bcd778947aa8cff5c317a44dc32bcc2f9bdd",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/module_utils/test_saslprep.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1bff0807c10f8d51746ee02e3e25654d07004f4b35cad01baacd6b24f3d342bb",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/module_utils/test_utm_utils.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ac7a58ed47d4ba383a871571bfbbb8e447e42019d555d3c5ccb5f31b79510a33",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/cloud",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/cloud/linode",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/cloud/linode/__init__.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/cloud/linode/conftest.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4cb56f4daa028f7157bd3dd75b46162c565beb5120862c1585f890d51223209a",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/cloud/linode/test_linode.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9c1b4d4e134d45209b594a04eda78afc8c7abcfd0a25b61a4c34137db0db6adf",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/cloud/linode/test_linode_v4.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "148d277d41d7dcec822fa651bc589a72362e520f156c201f979d84768237dc4f",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/cloud/misc",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/cloud/misc/__init__.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/cloud/misc/test_proxmox_kvm.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e2c49840bea31374cd2752508c1738eb38cdb3778b2b0a7bab83a3d87469d210",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/cloud/misc/test_proxmox_snap.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d841cbd18de08fe6679d1ad0a27dd7009e9de0892a938a3ac4d0d0f652086535",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/cloud/misc/test_proxmox_tasks_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "80d995f32b1d8b030952739951a4167f997d1a52bba989650db01dd0f47e8a32",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/cloud/misc/test_terraform.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9e60bfd9c9c35e83426606b13a75e8f6d2adcf1245ed1feae7c3030810061bac",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/cloud/scaleway",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/cloud/scaleway/test_scaleway_private_network.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "312a23cd0fda058815138a619db9141693ebbec38f7402acbec607af12f431ad",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/cloud/xenserver",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/cloud/xenserver/FakeAnsibleModule.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8249860417aebbb656541d50db86e9b3314c58fd6608aa4cf87b29cb530e1cac",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/cloud/xenserver/FakeXenAPI.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "cfe4d1e5778e71f0beacda11226b934635d87a2194be94af275d09701f501e01",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/cloud/xenserver/__init__.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/cloud/xenserver/common.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "958eafb650d09fa883cc9b3d9cf14d493723d91dcc1da9b9ee57c6dc70bdd57d",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/cloud/xenserver/conftest.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6cc2b211f0a74a9ec3994c584d26b423d3b9cc6671eeed0db7d8821865479d58",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/cloud/xenserver/test_xenserver_guest_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f2599c3f551c4736f1e09629c7e2fcf44b6c8564022bb3dee6a8df2b728ba29f",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/cloud/xenserver/test_xenserver_guest_powerstate.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4b6ed5382c32dcf082bb72b27c567214b0da7943fd224f54a674d71142f7b26c",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/cloud/__init__.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/database",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/database/misc",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/database/misc/__init__.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/database/misc/test_redis_data.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "eee39ec82280d3f7022bb7ff3e1185a65fddcd61e2c12609aa23ace15cbc3f45",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/database/misc/test_redis_data_incr.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "96bd3dc851896186fee6461b2efacd33d31f941f44e31467023bf2eb92928973",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/database/misc/test_redis_data_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "23635e273c72e6ad03076970ba14374d1e6d8062637683493ea2fd3066607237",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/database/misc/test_redis_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "aeac4234cfbb8535ebce9141aef81fa21dfc731b5ee925776b8cae12cceac44f",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/database/saphana",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/database/saphana/__init__.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/database/saphana/test_hana_query.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "2e810f7e83e330a5b02b00f1ab898d54e846e299cbbdd69d08109030e9d735ba",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/database/__init__.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/files",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/files/__init__.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/files/test_archive.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "109fbb9746b3e00b9acb4e3bcadfc89dbcf52d969ddd9227c481e4bedd75795e",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/files/test_sapcar_extract.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "20f63c642d96d97ff5ab7467ddaf2b921490455bb369a9fd771d6fc18131cf80",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/identity",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/identity/ipa",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/identity/ipa/__init__.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/identity/ipa/test_ipa_otpconfig.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9703a225774b826b0f178cf8781bfbbdab4cbf7ba6f37433f00ec4ad940fa2da",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/identity/ipa/test_ipa_otptoken.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ba517f3a51db4c0497e2a0d0a91d02e4e7ad847d979c43befb2fa3f7374833a0",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/identity/ipa/test_ipa_pwpolicy.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7ea0e6554df05d0e32e427eb2b3e0667140c02bdf82d3719da32f3df5001e402",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/identity/keycloak",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/identity/keycloak/__init__.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/identity/keycloak/test_keycloak_authentication.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "73a85a14ac98adf5769bf68632ad478fdc67a74b4cf4c7fbe0116221b367714d",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/identity/keycloak/test_keycloak_client.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ada64a3590eb467544b696ef0713e039af89258ee406bc83ef7ea975bc7c4144",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/identity/keycloak/test_keycloak_client_rolemapping.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "456032e70e0d94453cfc8a786e966b2f8e971a94e0b33a2ffda7c2cc8d21891d",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/identity/keycloak/test_keycloak_clientscope.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8feaa49ad0f2e92e628367565d073adffc3da26b90c2256456cfcb18d966842e",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/identity/keycloak/test_keycloak_identity_provider.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1cae35ac5a12f439e22fd06fb4c182884d077d837111550b07e80dc45b83f5e8",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/identity/keycloak/test_keycloak_realm.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0e65ff0e2f5cdd49b8fddbf650cdd4de13bf022993f8dab53a23ec7d0c0494b7",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/identity/keycloak/test_keycloak_realm_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8f7a24d3e87d2f95b5d0de1ddd935525ab773af36faf7e0cc22f32f50214e95c",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/identity/keycloak/test_keycloak_role.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "fad6787ad91ea7534129abc9bcabfb946f769b03241f7405640610d3488c4875",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/identity/keycloak/test_keycloak_user_federation.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "31d8a0b3e8230e4b75fb12566b200ef97bf33e832a4a682b7410063fc0aef6f1",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/identity/__init__.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/messaging",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/messaging/__init__.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/monitoring",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/monitoring/__init__.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/monitoring/test_circonus_annotation.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3f8d99c1af7336345ce7027f2e1d5055b156c46fa9aa98241c8bee6619bbffe",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/monitoring/test_datadog_downtime.py.disabled",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9ce888d2663874c4fedef718c0e555adfa305452e7e890757c68ee739f9e3609",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/monitoring/test_icinga2_feature.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "784eaf2c4bb9687f5d501e243a92c970e6332682c10e223c0199f501d19ae05a",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/monitoring/test_monit.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "89d4b9fc28b1836ed677ab50c2875b57c133da78fea0ccd33334aadb644ccd7a",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/monitoring/test_pagerduty.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "51231e2644a421337619f184be06832ccffb8fdd9bc898989a18951df9a515c8",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/monitoring/test_pagerduty_alert.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e86552e30a2074ae8275eb78b2e19376442b9b5c3d59e99b2b91119678875013",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/monitoring/test_pagerduty_change.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c1f5661bedd93f7a7119c04fe3f15629fe77d21b27b3cb2c3f8af72fa2eb2f28",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/monitoring/test_statsd.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9510b952fa3eb04470bc26387967412e9357dcc7e833533fb0a4ebf7f65cec67",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/net_tools",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/net_tools/pritunl",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/net_tools/pritunl/test_pritunl_org.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "080e132c436f7ab1194c69fcdffda7390c5b162f2f2e0aa7c010ab6862bba9bb",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/net_tools/pritunl/test_pritunl_org_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "482ad29c2981c7443a1b5e7726c867ab8a39bd879549d2486ea5bad48b678643",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/net_tools/pritunl/test_pritunl_user.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "2c8fec7225e1d485276b68f36825da0f954862626196fd6c932291a32533e497",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/net_tools/pritunl/test_pritunl_user_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "414b651d4ebba223926c1f6320f254a5ac79e21695fc3e16c27bdfb5a632b02f",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/net_tools/__init__.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/net_tools/test_dnsimple.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "23f506451ff108d1362644d2957ded2b5fa826184f4aed897a95865b352c154e",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/net_tools/test_dnsimple_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "193ae47e0e05f1c385711ddb6e6d3d2b138f712708af2b3748c4a6742e8e9f04",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/net_tools/test_nmcli.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a025f14b41fb7559dc1ed65721df3bf1d5e714a8af51caf564c8a21872c35ee4",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/notification",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/notification/__init__.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/notification/test_campfire.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "818eda9c60aa41fce3ed8a7a88df3de57caa7c0c3267670295bd44ee96a0f8be",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/notification/test_discord.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4303a88177ab58c876cac262e1b012a6546d299e0b0d3cf54fc70da77ac4e4a7",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/notification/test_slack.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ea7a7424df543b70c068f8efb29a909a3935d8f7dced00bcec1e969c5fb334c0",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/packaging",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/packaging/language",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/packaging/language/__init__.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/packaging/language/test_cpanm.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a74507fb1f23d0d00a38191215142359309f6e015136c12d1ddee26ee47d3096",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/packaging/language/test_gem.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0484019fba974202d150ffafd84fe4ef416641854debf6505581c6eade9b7079",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/packaging/language/test_maven_artifact.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "179825e10a242f386aba87c4c8e1f5740653cd8962b07e37dbd588af37121a13",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/packaging/language/test_npm.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "26444c135b62fcb26cd7d857f32c93b49b88cad3575075719b1bb820669b0e09",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/packaging/os",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/packaging/os/__init__.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/packaging/os/conftest.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "504f9b1ae9baf976f44e4ef62ed7c616129d090b3170571279e1bd8d43409be9",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/packaging/os/test_apk.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ad806b3c277e03d16fc7a2506353c46b8574e3cb82504362c6028c33fddc7de5",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/packaging/os/test_homebrew.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1a9a041047a61d0801c467295ebb1e238c530bc7555a1d70f4d59efc306f2549",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/packaging/os/test_homebrew_cask.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7548ab19616febd1347d9324d0d67723e6025617a012b25a485a7eed5c9fcfc3",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/packaging/os/test_macports.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ddd9585216de66bc18b9731523ae67ca2ba352070e58b7515b5b11b80422e2cb",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/packaging/os/test_pacman.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "24ec8cba0acaf52e7cff3de25e4604fc331ea805095959578b1e00169282fa68",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/packaging/os/test_pacman_key.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "29f0850e972b152e926a6b51cc3473b9c28353b04e59f0eee833d867ad687410",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/packaging/os/test_pkgin.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f8ee6102605e34a4de8445118f42e2caccbc164da2672b27ccffe00c72a3f8b1",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/packaging/os/test_redhat_subscription.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "983aa5a904863d04b7a5e0af396c17e742131690ed55d7d26a80a8800b255cb2",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/packaging/os/test_rhn_channel.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3ee90c66745027bbaba686da547bf7d1645f975be32918dfb5e3b7afe69cd71",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/packaging/os/test_rhn_register.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5a6f919936578fb0758385bbbc0e4084930c36985cd808dd9bfb0edfd0503eb5",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/packaging/os/test_rhsm_release.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "cf69e341046ff756177406f4964be17c84d38242336692e69c08da324b2955aa",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/packaging/os/test_rpm_ostree_pkg.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "13d047dfa2a3920faba147c2174bf872f4b5763d2902e860dffa7b1f551ee3bc",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/packaging/__init__.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/remote_management",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/remote_management/lenovoxcc",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/remote_management/lenovoxcc/test_xcc_redfish_command.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "44b040b74ed7312498b4fff23ebf329f3e1504f3a7558afd3082f4f76bf1bda5",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/remote_management/lxca",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/remote_management/lxca/__init__.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/remote_management/lxca/test_lxca_cmms.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6dc89465a3f3f8a766411db1bddd2a0b7ff0d2b39bcf392b3c6a6d9707665e2f",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/remote_management/lxca/test_lxca_nodes.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b3d733b956abd26280a2322a9f11c04f52069df59c8c9bfe34af52325af97141",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/remote_management/oneview",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/remote_management/oneview/__init__.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/remote_management/oneview/conftest.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9df5d55d4865eec15d7799e2f889a1d539954f248564ca80aa3d38efb7fece3c",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/remote_management/oneview/hpe_test_utils.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e0dc69496825ed8bf2f13d3dff2a27ba98685130a59fa971f1e6e0e9e83aff57",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/remote_management/oneview/oneview_module_loader.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "02ae4523f1519d32f33f51f51420f9a48f75f32b55dbc6ee9ec3ead164e35ab5",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/remote_management/oneview/test_oneview_datacenter_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "aba6e5371afc2bf09637d070139007bcbd45a9db409c6540e31e19ca21cd608d",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/remote_management/oneview/test_oneview_enclosure_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "da6d11c5cacef736152e2289c2621c0ae630f2bcd2de6791c67a432335c77e96",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/remote_management/oneview/test_oneview_ethernet_network.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d36247aa35c29fc75c354e4d0ab45cf689c45c559876b03b3c80a9c5f48ba761",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/remote_management/oneview/test_oneview_ethernet_network_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7e599e39df535672407b42860879a28ae6b83fa32cc4e927bff82ed17ce394ac",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/remote_management/oneview/test_oneview_fc_network.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "19f461d1441aeef057bd0b2fa9a5c0ca98cc973414accf93cf583bef0f7726a7",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/remote_management/oneview/test_oneview_fc_network_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5dd17dfd91d095476e740d32e543dcd51ed436d187fcb4e32e0c3411e4217fff",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/remote_management/oneview/test_oneview_fcoe_network.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9cf8e3857c596a63e055bcafed62b35d05f7f5a2f3a472839493dc3b9dae0222",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/remote_management/oneview/test_oneview_fcoe_network_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "53334346c654b1a2b755bb908aaad43a7691d44b537b550f0ca8f794281ee4b1",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/remote_management/oneview/test_oneview_logical_interconnect_group.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "844029b96cc4dbb5449d89eb357f73e16c0233e557820635293dcb35e1884e63",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/remote_management/oneview/test_oneview_logical_interconnect_group_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6409f79838322fbc92cc554b45398a6d5ee0b6d447ac3107d82806695e11aeb1",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/remote_management/oneview/test_oneview_network_set.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9d98e413d2eb2b62cd4954127914838408793b8182dcf2916dfbe1c79efbffea",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/remote_management/oneview/test_oneview_network_set_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f3182e532dc20243b2bcee01228fd4430de5bf11971afe051650375ace450b46",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/remote_management/oneview/test_oneview_san_manager.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b9c21da4069dd0e9e9d22d07d3969c8b6e0fa638520d990e9c5e88859f626003",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/remote_management/oneview/test_oneview_san_manager_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "daab0258103b8bbc5fe2a0499857127177ded33f1bb8cd423795950537693138",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/remote_management/__init__.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/source_control",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/source_control/bitbucket",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/source_control/bitbucket/__init__.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/source_control/bitbucket/test_bitbucket_access_key.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "094ff4ee68f8cf526ba9f01191089c7691e1bc64dc8d90941b516f24297ad340",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/source_control/bitbucket/test_bitbucket_pipeline_key_pair.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3a2a6ff5f09c328a20be078dcb07181464186ee07adb1b60506a11785046397b",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/source_control/bitbucket/test_bitbucket_pipeline_known_host.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "97dcef1a318d02c39e09eba33d88bbd4fa80273d689a8ff8f3ddf3528e6c8857",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/source_control/bitbucket/test_bitbucket_pipeline_variable.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "677afe076c6bc19d72d8389fbccdc92bcc46f5e34794e663b81c0d77ccc94c54",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/source_control/github",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/source_control/github/__init__.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/source_control/github/test_github_repo.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d796e9c56592e0cef63e5e152ed0a4ffe80a102d7385e6e7bd1d8fbd82935b7d",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/source_control/gitlab",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/source_control/gitlab/__init__.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/source_control/gitlab/gitlab.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "74bf2e627f1596b6ca846cde3af9f1abca586e8cfb9f4c72abf2014a7e41a6bb",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/source_control/gitlab/test_gitlab_deploy_key.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "679b2354ea81dd87e4c9302182ba4454f31f9a6e41176c9681ef7f55cdb4b946",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/source_control/gitlab/test_gitlab_group.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "67cc9724895cfcd3dfc98038a102824fa60cfabb0b1669491fba418c4ae17e63",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/source_control/gitlab/test_gitlab_hook.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "259547068279dcbb64a126a0fdf1df9984b18895fe52b69fe82dfd18fa5105c1",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/source_control/gitlab/test_gitlab_project.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "01081c5c1e073e383475bac5ebc69899745760c46dc178372a176c3e60c2f5d7",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/source_control/gitlab/test_gitlab_protected_branch.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1461b0b331c386dd5393b388c35cf1dceaf1c2707acc2db8416d179c793be1d7",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/source_control/gitlab/test_gitlab_runner.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "57adc6a8f1f625731859de384e7bd8ea006066a318f6db6a3f43c8cd1f85b7a7",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/source_control/gitlab/test_gitlab_user.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3aae06f2d12a7dff8b564f8ebd1f073f0acea01378108ac4ca045d721c6830b",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/source_control/__init__.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/storage",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/storage/hpe3par",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/storage/hpe3par/__init__.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/storage/hpe3par/test_ss_3par_cpg.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "457fe72bb1be07d527f12757607fb8baa504fe99fedd3c5f193f2a961745a67d",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/storage/pmem",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/storage/pmem/test_pmem.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b78afde8141f157a3ddafcceea1ac1919a20f67bb8ae165bf502916a577e1ec5",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/storage/__init__.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family.test_no_changes",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d3e14c9b11e9a08419fca885ab31abd8e775053b8c2b608037626ecd7ed01595",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family.test_no_changes.json",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "13b4c7681e2369ea5765cefd22862511d38955b986f9033752faba1cd54ccd03",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_add_aggi_up",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d3e14c9b11e9a08419fca885ab31abd8e775053b8c2b608037626ecd7ed01595",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_add_aggi_up.exceptions.txt",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e4f339f4a90e118d5b1b3b4e3fd59a3eb7460d743f3dfb1be34a9e0656f1e117",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_add_aggi_up.json",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "13b4c7681e2369ea5765cefd22862511d38955b986f9033752faba1cd54ccd03",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_add_aggi_up_twice",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d3e14c9b11e9a08419fca885ab31abd8e775053b8c2b608037626ecd7ed01595",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_add_aggi_up_twice.exceptions.txt",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0ed67f65186f6ec2c5e67868d7d786e229cba4b67dc9848803e3f6bb844bfedd",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_add_aggi_up_twice.json",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "13b4c7681e2369ea5765cefd22862511d38955b986f9033752faba1cd54ccd03",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_add_and_delete_aggi_up",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d3e14c9b11e9a08419fca885ab31abd8e775053b8c2b608037626ecd7ed01595",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_add_and_delete_aggi_up.exceptions.txt",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "732c90c6031d571c37e260b91d453408a7eb6a9b7bcef6ab5dcad7153dd653a0",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_add_and_delete_aggi_up.json",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "13b4c7681e2369ea5765cefd22862511d38955b986f9033752faba1cd54ccd03",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_aggi_remove_dup",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d3e14c9b11e9a08419fca885ab31abd8e775053b8c2b608037626ecd7ed01595",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_aggi_remove_dup.exceptions.txt",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c82db6ff2a9ce205ec496728645aac7a08c6c699746cd8f981e60c8728062285",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_aggi_remove_dup.json",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "13b4c7681e2369ea5765cefd22862511d38955b986f9033752faba1cd54ccd03",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv4",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6653eeb79ba4c679c224c9c43b4a0bde5075c9795cd5f446d95560c883df1c67",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv4.exceptions.txt",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv4.json",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "13b4c7681e2369ea5765cefd22862511d38955b986f9033752faba1cd54ccd03",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv4_post_up",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b4dccfc3f80598ea3f2f32f6661b3b5fa6997e6d6a0e9e2f3cc4648505ec7f52",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv4_post_up.exceptions.txt",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv4_post_up.json",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "13b4c7681e2369ea5765cefd22862511d38955b986f9033752faba1cd54ccd03",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv4_pre_up",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "53abda66ee9a035f805bb82fc7cfae6e0b17f42663158bd6c7af5fa2b90aea88",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv4_pre_up.exceptions.txt",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv4_pre_up.json",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "13b4c7681e2369ea5765cefd22862511d38955b986f9033752faba1cd54ccd03",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv6",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "134f64892e64e650bffb29c928ec0ab72e397c122f178417e99cb56fab5c3b2b",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv6.exceptions.txt",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv6.json",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "13b4c7681e2369ea5765cefd22862511d38955b986f9033752faba1cd54ccd03",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv6_post_up",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "acfcc70084877cfb0c89871e02d24ec9711b22085f5f5fbe4ca8a69cf0336dcf",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv6_post_up.exceptions.txt",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv6_post_up.json",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "13b4c7681e2369ea5765cefd22862511d38955b986f9033752faba1cd54ccd03",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv6_pre_up",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "08747ecd380971329c1bfe12df432f00c64dbbcf346f4c14ec799dfba42b2b1f",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv6_pre_up.exceptions.txt",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_ipv6_pre_up.json",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "13b4c7681e2369ea5765cefd22862511d38955b986f9033752faba1cd54ccd03",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_method",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d3e14c9b11e9a08419fca885ab31abd8e775053b8c2b608037626ecd7ed01595",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_method.exceptions.txt",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b20955f532135df51de900c83c10a6dd087cd30d0df0bfc238a5a7e057492778",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_change_method.json",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "13b4c7681e2369ea5765cefd22862511d38955b986f9033752faba1cd54ccd03",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_revert",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d3e14c9b11e9a08419fca885ab31abd8e775053b8c2b608037626ecd7ed01595",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_revert.exceptions.txt",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_revert.json",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "13b4c7681e2369ea5765cefd22862511d38955b986f9033752faba1cd54ccd03",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_set_aggi_and_eth0_mtu",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4eaf8c9708b20dab8fc90b8b2b5716167e2bc92c1c1b0638ca82e11323f78199",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_set_aggi_and_eth0_mtu.exceptions.txt",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "dc4f1ab45fe950aaa0dd6e61e3eb13423b0e1d98202a2f2b15cf78458eff5c48",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_set_aggi_and_eth0_mtu.json",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "13b4c7681e2369ea5765cefd22862511d38955b986f9033752faba1cd54ccd03",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_set_aggi_slaves",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d3e14c9b11e9a08419fca885ab31abd8e775053b8c2b608037626ecd7ed01595",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_set_aggi_slaves.exceptions.txt",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4fac1d8f53319085621e778b7012c376068ede405dd18f2a8a1a06a5f378b00a",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/address_family_set_aggi_slaves.json",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "13b4c7681e2369ea5765cefd22862511d38955b986f9033752faba1cd54ccd03",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp.test_no_changes",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f68a4e8b47807998b4faafd014844326622ef227b2fae85afdb982288e59d47b",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp.test_no_changes.json",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "06a6a72ab203e90b10afc3dc292acd7509237b956d0867bd04722ce16fe1627f",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_add_aggi_up",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f68a4e8b47807998b4faafd014844326622ef227b2fae85afdb982288e59d47b",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_add_aggi_up.exceptions.txt",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e4f339f4a90e118d5b1b3b4e3fd59a3eb7460d743f3dfb1be34a9e0656f1e117",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_add_aggi_up.json",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "06a6a72ab203e90b10afc3dc292acd7509237b956d0867bd04722ce16fe1627f",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_add_aggi_up_twice",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f68a4e8b47807998b4faafd014844326622ef227b2fae85afdb982288e59d47b",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_add_aggi_up_twice.exceptions.txt",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0ed67f65186f6ec2c5e67868d7d786e229cba4b67dc9848803e3f6bb844bfedd",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_add_aggi_up_twice.json",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "06a6a72ab203e90b10afc3dc292acd7509237b956d0867bd04722ce16fe1627f",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_add_and_delete_aggi_up",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f68a4e8b47807998b4faafd014844326622ef227b2fae85afdb982288e59d47b",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_add_and_delete_aggi_up.exceptions.txt",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "732c90c6031d571c37e260b91d453408a7eb6a9b7bcef6ab5dcad7153dd653a0",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_add_and_delete_aggi_up.json",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "06a6a72ab203e90b10afc3dc292acd7509237b956d0867bd04722ce16fe1627f",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_aggi_remove_dup",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f68a4e8b47807998b4faafd014844326622ef227b2fae85afdb982288e59d47b",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_aggi_remove_dup.exceptions.txt",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c82db6ff2a9ce205ec496728645aac7a08c6c699746cd8f981e60c8728062285",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_aggi_remove_dup.json",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "06a6a72ab203e90b10afc3dc292acd7509237b956d0867bd04722ce16fe1627f",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv4",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1fda07e9a6f93949f6f53ba8b71054114024b9d1d612c4455b1ca5effe630e5e",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv4.exceptions.txt",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv4.json",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "06a6a72ab203e90b10afc3dc292acd7509237b956d0867bd04722ce16fe1627f",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv4_post_up",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "529e2e7b36f6ec834edb09878ead526156aa9d5349a5cedc1194796d30c7b7e4",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv4_post_up.exceptions.txt",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv4_post_up.json",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "06a6a72ab203e90b10afc3dc292acd7509237b956d0867bd04722ce16fe1627f",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv4_pre_up",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9db4000a5df22bf6923e3c3fae4171698ec097639c4e94297297af729fc0dbe7",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv4_pre_up.exceptions.txt",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv4_pre_up.json",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "06a6a72ab203e90b10afc3dc292acd7509237b956d0867bd04722ce16fe1627f",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f68a4e8b47807998b4faafd014844326622ef227b2fae85afdb982288e59d47b",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6.exceptions.txt",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0287f7b3a5351740d00394005ce8f49ae1a13484eaafb4c41776acf4e56c706d",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6.json",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "06a6a72ab203e90b10afc3dc292acd7509237b956d0867bd04722ce16fe1627f",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6_post_up",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f68a4e8b47807998b4faafd014844326622ef227b2fae85afdb982288e59d47b",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6_post_up.exceptions.txt",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "96446e58502f5e9c1e50b4b3b64d639eed3afa6958e3aa5531225a24b8a94063",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6_post_up.json",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "06a6a72ab203e90b10afc3dc292acd7509237b956d0867bd04722ce16fe1627f",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6_pre_up",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f68a4e8b47807998b4faafd014844326622ef227b2fae85afdb982288e59d47b",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6_pre_up.exceptions.txt",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b05dd67b937088e5673b7fb10978bfc40f35eb4d5f5f5682936a9c1a5053db99",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_ipv6_pre_up.json",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "06a6a72ab203e90b10afc3dc292acd7509237b956d0867bd04722ce16fe1627f",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_method",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f68a4e8b47807998b4faafd014844326622ef227b2fae85afdb982288e59d47b",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_method.exceptions.txt",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b20955f532135df51de900c83c10a6dd087cd30d0df0bfc238a5a7e057492778",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_change_method.json",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "06a6a72ab203e90b10afc3dc292acd7509237b956d0867bd04722ce16fe1627f",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_revert",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f68a4e8b47807998b4faafd014844326622ef227b2fae85afdb982288e59d47b",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_revert.exceptions.txt",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_revert.json",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "06a6a72ab203e90b10afc3dc292acd7509237b956d0867bd04722ce16fe1627f",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_set_aggi_and_eth0_mtu",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "70b95830220d518dae6662f2e1ca836dd9c8adc1823351048cc53db8c865c33a",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_set_aggi_and_eth0_mtu.exceptions.txt",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "dc4f1ab45fe950aaa0dd6e61e3eb13423b0e1d98202a2f2b15cf78458eff5c48",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_set_aggi_and_eth0_mtu.json",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "06a6a72ab203e90b10afc3dc292acd7509237b956d0867bd04722ce16fe1627f",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_set_aggi_slaves",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f68a4e8b47807998b4faafd014844326622ef227b2fae85afdb982288e59d47b",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_set_aggi_slaves.exceptions.txt",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4fac1d8f53319085621e778b7012c376068ede405dd18f2a8a1a06a5f378b00a",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/default_dhcp_set_aggi_slaves.json",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "06a6a72ab203e90b10afc3dc292acd7509237b956d0867bd04722ce16fe1627f",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com.test_no_changes",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "bfe51ee900cfe3dc3229efbc0906a41210a1a2ce90da7721c8b3e753b2cda522",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com.test_no_changes.json",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "97d0f214b0d74d4253d3152f125a27c2f876bee8ccf6f1357bef463e7b9ce66f",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_add_aggi_up",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7931947554451b1a241d07eacac42d91143414f385e5ed6e99b5c6039d26fb0c",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_add_aggi_up.exceptions.txt",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_add_aggi_up.json",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "97d0f214b0d74d4253d3152f125a27c2f876bee8ccf6f1357bef463e7b9ce66f",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_add_aggi_up_twice",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7931947554451b1a241d07eacac42d91143414f385e5ed6e99b5c6039d26fb0c",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_add_aggi_up_twice.exceptions.txt",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_add_aggi_up_twice.json",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "97d0f214b0d74d4253d3152f125a27c2f876bee8ccf6f1357bef463e7b9ce66f",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_add_and_delete_aggi_up",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "bfe51ee900cfe3dc3229efbc0906a41210a1a2ce90da7721c8b3e753b2cda522",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_add_and_delete_aggi_up.exceptions.txt",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_add_and_delete_aggi_up.json",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "97d0f214b0d74d4253d3152f125a27c2f876bee8ccf6f1357bef463e7b9ce66f",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_aggi_remove_dup",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7931947554451b1a241d07eacac42d91143414f385e5ed6e99b5c6039d26fb0c",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_aggi_remove_dup.exceptions.txt",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_aggi_remove_dup.json",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "97d0f214b0d74d4253d3152f125a27c2f876bee8ccf6f1357bef463e7b9ce66f",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "bfe51ee900cfe3dc3229efbc0906a41210a1a2ce90da7721c8b3e753b2cda522",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4.exceptions.txt",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8ac948a5ada90b50ea34d1e31ed4657f220a7153ee2908b880f3dbcf4b1b417a",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4.json",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "97d0f214b0d74d4253d3152f125a27c2f876bee8ccf6f1357bef463e7b9ce66f",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4_post_up",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "bfe51ee900cfe3dc3229efbc0906a41210a1a2ce90da7721c8b3e753b2cda522",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4_post_up.exceptions.txt",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "93e09e5b99049be103115e7ede6022cfd51cff8543cfc4f2552f5315e9e7ea75",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4_post_up.json",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "97d0f214b0d74d4253d3152f125a27c2f876bee8ccf6f1357bef463e7b9ce66f",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4_pre_up",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "bfe51ee900cfe3dc3229efbc0906a41210a1a2ce90da7721c8b3e753b2cda522",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4_pre_up.exceptions.txt",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a5bcf21eb70f131e027c0a1236d2264b0db9de60c2d8ac9df860b83839e7a757",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv4_pre_up.json",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "97d0f214b0d74d4253d3152f125a27c2f876bee8ccf6f1357bef463e7b9ce66f",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "bfe51ee900cfe3dc3229efbc0906a41210a1a2ce90da7721c8b3e753b2cda522",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6.exceptions.txt",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0287f7b3a5351740d00394005ce8f49ae1a13484eaafb4c41776acf4e56c706d",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6.json",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "97d0f214b0d74d4253d3152f125a27c2f876bee8ccf6f1357bef463e7b9ce66f",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6_post_up",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "bfe51ee900cfe3dc3229efbc0906a41210a1a2ce90da7721c8b3e753b2cda522",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6_post_up.exceptions.txt",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "96446e58502f5e9c1e50b4b3b64d639eed3afa6958e3aa5531225a24b8a94063",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6_post_up.json",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "97d0f214b0d74d4253d3152f125a27c2f876bee8ccf6f1357bef463e7b9ce66f",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6_pre_up",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "bfe51ee900cfe3dc3229efbc0906a41210a1a2ce90da7721c8b3e753b2cda522",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6_pre_up.exceptions.txt",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b05dd67b937088e5673b7fb10978bfc40f35eb4d5f5f5682936a9c1a5053db99",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_ipv6_pre_up.json",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "97d0f214b0d74d4253d3152f125a27c2f876bee8ccf6f1357bef463e7b9ce66f",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_method",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "eb32a11d2175d165ac30d4d96265aa7890de42aad1e4c03fe862db31a9b609f6",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_method.exceptions.txt",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_change_method.json",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7df04865747cdaf41c044674909f7f9d789de4c721aab7638549d28106f4eb7e",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_revert",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "bfe51ee900cfe3dc3229efbc0906a41210a1a2ce90da7721c8b3e753b2cda522",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_revert.exceptions.txt",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "806e3459fe842c37406690b7ea1c112832ac485e8e10876495c671241ae7ab29",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_revert.json",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "97d0f214b0d74d4253d3152f125a27c2f876bee8ccf6f1357bef463e7b9ce66f",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_set_aggi_and_eth0_mtu",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "07945be2848b249d636ea429313c539ea4c9f921780e1d912b6472561821143c",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_set_aggi_and_eth0_mtu.exceptions.txt",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ac877a74278c9ed870b0358447d9c05e8dc910d4b3594bf04c63699d16d8f688",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_set_aggi_and_eth0_mtu.json",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "97d0f214b0d74d4253d3152f125a27c2f876bee8ccf6f1357bef463e7b9ce66f",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_set_aggi_slaves",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4d65afd09be4ed2e70dadbbcc3691e8170b1e819256795dfcffb128a41a880d3",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_set_aggi_slaves.exceptions.txt",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/servers.com_set_aggi_slaves.json",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "97d0f214b0d74d4253d3152f125a27c2f876bee8ccf6f1357bef463e7b9ce66f",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup.test_no_changes",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a26e395e1723d7835d1dc9c170c2b3818b2adc4d9c49651fd6d7641524cb1a15",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup.test_no_changes.json",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f3900942f0fe09bd906f70cb99b75be5c6b5f0081a0a06cb8efd66630f8b89fe",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup_add_aggi_up",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a26e395e1723d7835d1dc9c170c2b3818b2adc4d9c49651fd6d7641524cb1a15",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup_add_aggi_up.exceptions.txt",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup_add_aggi_up.json",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f3900942f0fe09bd906f70cb99b75be5c6b5f0081a0a06cb8efd66630f8b89fe",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup_add_aggi_up_twice",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a26e395e1723d7835d1dc9c170c2b3818b2adc4d9c49651fd6d7641524cb1a15",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup_add_aggi_up_twice.exceptions.txt",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup_add_aggi_up_twice.json",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f3900942f0fe09bd906f70cb99b75be5c6b5f0081a0a06cb8efd66630f8b89fe",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup_add_and_delete_aggi_up",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "691c3c19b3d8ad7ab347c24c006da07ed165f4f6161216dfb90da0f2ac922768",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup_add_and_delete_aggi_up.exceptions.txt",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup_add_and_delete_aggi_up.json",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f3900942f0fe09bd906f70cb99b75be5c6b5f0081a0a06cb8efd66630f8b89fe",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup_aggi_remove_dup",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f98b88779a53ef7a5c4b2dbcdf2229493bb1b9eff316d9b0fab32e2bf45ca774",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup_aggi_remove_dup.exceptions.txt",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup_aggi_remove_dup.json",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f3900942f0fe09bd906f70cb99b75be5c6b5f0081a0a06cb8efd66630f8b89fe",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup_change_ipv4",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a26e395e1723d7835d1dc9c170c2b3818b2adc4d9c49651fd6d7641524cb1a15",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup_change_ipv4.exceptions.txt",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8ac948a5ada90b50ea34d1e31ed4657f220a7153ee2908b880f3dbcf4b1b417a",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup_change_ipv4.json",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f3900942f0fe09bd906f70cb99b75be5c6b5f0081a0a06cb8efd66630f8b89fe",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup_change_ipv4_post_up",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a26e395e1723d7835d1dc9c170c2b3818b2adc4d9c49651fd6d7641524cb1a15",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup_change_ipv4_post_up.exceptions.txt",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "93e09e5b99049be103115e7ede6022cfd51cff8543cfc4f2552f5315e9e7ea75",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup_change_ipv4_post_up.json",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f3900942f0fe09bd906f70cb99b75be5c6b5f0081a0a06cb8efd66630f8b89fe",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup_change_ipv4_pre_up",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a26e395e1723d7835d1dc9c170c2b3818b2adc4d9c49651fd6d7641524cb1a15",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup_change_ipv4_pre_up.exceptions.txt",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a5bcf21eb70f131e027c0a1236d2264b0db9de60c2d8ac9df860b83839e7a757",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup_change_ipv4_pre_up.json",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f3900942f0fe09bd906f70cb99b75be5c6b5f0081a0a06cb8efd66630f8b89fe",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup_change_ipv6",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a26e395e1723d7835d1dc9c170c2b3818b2adc4d9c49651fd6d7641524cb1a15",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup_change_ipv6.exceptions.txt",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0287f7b3a5351740d00394005ce8f49ae1a13484eaafb4c41776acf4e56c706d",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup_change_ipv6.json",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f3900942f0fe09bd906f70cb99b75be5c6b5f0081a0a06cb8efd66630f8b89fe",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup_change_ipv6_post_up",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a26e395e1723d7835d1dc9c170c2b3818b2adc4d9c49651fd6d7641524cb1a15",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup_change_ipv6_post_up.exceptions.txt",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "96446e58502f5e9c1e50b4b3b64d639eed3afa6958e3aa5531225a24b8a94063",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup_change_ipv6_post_up.json",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f3900942f0fe09bd906f70cb99b75be5c6b5f0081a0a06cb8efd66630f8b89fe",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup_change_ipv6_pre_up",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a26e395e1723d7835d1dc9c170c2b3818b2adc4d9c49651fd6d7641524cb1a15",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup_change_ipv6_pre_up.exceptions.txt",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b05dd67b937088e5673b7fb10978bfc40f35eb4d5f5f5682936a9c1a5053db99",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup_change_ipv6_pre_up.json",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f3900942f0fe09bd906f70cb99b75be5c6b5f0081a0a06cb8efd66630f8b89fe",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup_change_method",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a26e395e1723d7835d1dc9c170c2b3818b2adc4d9c49651fd6d7641524cb1a15",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup_change_method.exceptions.txt",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b20955f532135df51de900c83c10a6dd087cd30d0df0bfc238a5a7e057492778",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup_change_method.json",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f3900942f0fe09bd906f70cb99b75be5c6b5f0081a0a06cb8efd66630f8b89fe",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup_revert",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a26e395e1723d7835d1dc9c170c2b3818b2adc4d9c49651fd6d7641524cb1a15",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup_revert.exceptions.txt",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "806e3459fe842c37406690b7ea1c112832ac485e8e10876495c671241ae7ab29",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup_revert.json",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f3900942f0fe09bd906f70cb99b75be5c6b5f0081a0a06cb8efd66630f8b89fe",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup_set_aggi_and_eth0_mtu",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "16edb798abcd4c903a6812211f3b9f3ee149161f86a0036af50ce1df0f7b224a",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup_set_aggi_and_eth0_mtu.exceptions.txt",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ac877a74278c9ed870b0358447d9c05e8dc910d4b3594bf04c63699d16d8f688",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup_set_aggi_and_eth0_mtu.json",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f3900942f0fe09bd906f70cb99b75be5c6b5f0081a0a06cb8efd66630f8b89fe",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup_set_aggi_slaves",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3b4f53dddf4a96187f7318bbc97ed3774b0f66b870a3e1cc0dfc2862832fa516",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup_set_aggi_slaves.exceptions.txt",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/golden_output/up_down_dup_set_aggi_slaves.json",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f3900942f0fe09bd906f70cb99b75be5c6b5f0081a0a06cb8efd66630f8b89fe",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/input",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/input/address_family",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d3e14c9b11e9a08419fca885ab31abd8e775053b8c2b608037626ecd7ed01595",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/input/default_dhcp",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f68a4e8b47807998b4faafd014844326622ef227b2fae85afdb982288e59d47b",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/input/servers.com",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "bfe51ee900cfe3dc3229efbc0906a41210a1a2ce90da7721c8b3e753b2cda522",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/input/up_down_dup",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a26e395e1723d7835d1dc9c170c2b3818b2adc4d9c49651fd6d7641524cb1a15",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/fixtures/__init__.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/README.md",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1df465805a0f52344c2272a514db0c9d9b2187e851c0cf58b985b012aeb29289",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/__init__.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/interfaces_file/test_interfaces_file.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "95a94bc4c73e8018ff73d9930ebfa34d2bc441319619a63adcab35b1393cec18",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/__init__.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/test_java_keystore.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d3c966aeb2a8cd93f2fe6b205edda80f08022f9d39b3e03afb7b32c67b02d90e",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/test_modprobe.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b3436221c926fc320aac044be960dcc9804cff9273c7c5469c8f301466400b4d",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/test_pamd.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c3b765fdcdbfdae2c24f92e5b3b35e9f1247f3f3456d1d24a075941ea7eceb95",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/test_parted.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d9867bddb7cc565543bdfc4424b582ae22257f23d7f4b912b19c6eac25c2aa59",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/test_sap_task_list_execute.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e16d645d6fde15d7d5dce315d4c9ee7c9bc124d77bf1646d4c25e96d7e9015fb",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/test_solaris_zone.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "97894d2e554664a5b1ae9654cf55276e8d8ea302c12b1a52537c1315906f604f",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/test_sysupgrade.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d64272139629294cb611a70cc5bc8ab03efeef5a8c20f306f0bf109ce5f57d32",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/test_ufw.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "df3d10c68232b533ce8a18ce408ebb8b8a5b7e5bf5bbdbe0c5d6a800ed6cbdc3",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/test_xfconf.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9791a52ec609c94ad8c194e00d4febb881024d2757d87b288ad61aa6682faa95",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/system/test_xfconf_info.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c9bca59b7f036aef685fee67c7eb5be3661c63798ba1149ad4ffb84f1c3e3c90",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/web_infrastructure",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/web_infrastructure/__init__.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/web_infrastructure/test_apache2_module.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "cd9ebad3842220ea8251164d45b9cb1d85197ef69cd1e76f80621bf396960d8f",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/web_infrastructure/test_jenkins_build.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6b8417ebd37e3c8e8b020bee5c2fc85c2f4eddedf7035623f7de2d9c25c6879d",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/web_infrastructure/test_jenkins_plugin.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0e11908d05982be48f663298dfa98903502893e1301e0b4edb970e594306998c",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/__init__.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/conftest.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b1465f0ed10cc15a6d7578fc527055c28729c9fa9d94856eafc4aada3b3f42a6",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/utils.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3ebeccd641cf5de2b25ae1bf4e153492bb499c641c5b5465ca25168f08b1a1ac",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/__init__.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/unit/__init__.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/unit/requirements.txt",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0afbc983609066c6ad6d21722430516d6f7b1c1751f6d57606195c0ca500f0f0",
- "format": 1
- },
- {
- "name": "tests/utils",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/utils/shippable",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/utils/shippable/aix.sh",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "388d8b9bd2a30f80ca3ee9dc64b629b9ec777383c92edff0165d11d785d4f3a4",
- "format": 1
- },
- {
- "name": "tests/utils/shippable/freebsd.sh",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "388d8b9bd2a30f80ca3ee9dc64b629b9ec777383c92edff0165d11d785d4f3a4",
- "format": 1
- },
- {
- "name": "tests/utils/shippable/macos.sh",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "388d8b9bd2a30f80ca3ee9dc64b629b9ec777383c92edff0165d11d785d4f3a4",
- "format": 1
- },
- {
- "name": "tests/utils/shippable/osx.sh",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "388d8b9bd2a30f80ca3ee9dc64b629b9ec777383c92edff0165d11d785d4f3a4",
- "format": 1
- },
- {
- "name": "tests/utils/shippable/rhel.sh",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "388d8b9bd2a30f80ca3ee9dc64b629b9ec777383c92edff0165d11d785d4f3a4",
- "format": 1
- },
- {
- "name": "tests/utils/shippable/cloud.sh",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "dd953f7e779b9962e76492c389142e03174e84a8115f53e56628e2af9e66b818",
- "format": 1
- },
- {
- "name": "tests/utils/shippable/linux-community.sh",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4a48ce3df89f871db4f26f1a1ae16a362ff8219be874fd150037866f7e0fb64d",
- "format": 1
- },
- {
- "name": "tests/utils/shippable/linux.sh",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "07aa5e07a0b732a671bf9fdadfe073dd310b81857b897328ce2fa829e2c76315",
- "format": 1
- },
- {
- "name": "tests/utils/shippable/remote.sh",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "388d8b9bd2a30f80ca3ee9dc64b629b9ec777383c92edff0165d11d785d4f3a4",
- "format": 1
- },
- {
- "name": "tests/utils/shippable/sanity.sh",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6920f7ea186e75399d34231105c81a523ea5ff938d11e1f425d4fc7bf5d013b8",
- "format": 1
- },
- {
- "name": "tests/utils/shippable/shippable.sh",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "346be0b4487b58d1848ffa2ac6c9ce3b2fb1c5b719b80a79542f790124b975b5",
- "format": 1
- },
- {
- "name": "tests/utils/shippable/units.sh",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a1375f7024d5e574f8daabab3b3f3a0aeb72b2abc6b65854e150b0479fb19a84",
- "format": 1
- },
- {
- "name": "tests/utils/constraints.txt",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e37959909060dc6d51fbcf125a021df0889954e7cd3b2f5721a88709a1dcee78",
- "format": 1
- },
- {
- "name": "tests/.gitignore",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b5726d3ec9335a09c124469eca039523847a6b0f08a083efaefd002b83326600",
- "format": 1
- },
- {
- "name": "tests/config.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "79299e4b233d86b7d878b2b35b6548347c28fd71a1166078a9958e6d8e6749c7",
- "format": 1
- },
- {
- "name": "tests/requirements.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3b49e42f53135c80834a1472578c15823c22181988ebf3da36c28389c690d9f7",
- "format": 1
- },
- {
- "name": ".gitignore",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a3ff9f861480bfc0c17c530909c12e4cce972529d64f9d1abd8d8a8ac0a54c97",
- "format": 1
- },
- {
- "name": "CHANGELOG.rst",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "da7e93a8de28ead878a2eb528a5a50e6b3718b871c7dc958154ba66088e0d05f",
- "format": 1
- },
- {
- "name": "CONTRIBUTING.md",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1989d42706447d097ab9288cc3f5180ead69d96b4f86b74bb6eb8c1252aa947c",
- "format": 1
- },
- {
- "name": "COPYING",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0ae0485a5bd37a63e63603596417e4eb0e653334fa6c7f932ca3a0e85d4af227",
- "format": 1
- },
- {
- "name": "README.md",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a5aedf8ce17c36b016fccdd4966f57aea7028c0b0620065b79024e9eb56c49ca",
- "format": 1
- },
- {
- "name": "commit-rights.md",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5e2b9e5fde379299fff928bd16b21f9d8bd83744d228f8dc719f7c478080ac1e",
- "format": 1
- }
- ],
- "format": 1
-}
\ No newline at end of file
diff --git a/ansible_collections/community/general/MANIFEST.json b/ansible_collections/community/general/MANIFEST.json
deleted file mode 100644
index 4742a4f8..00000000
--- a/ansible_collections/community/general/MANIFEST.json
+++ /dev/null
@@ -1,30 +0,0 @@
-{
- "collection_info": {
- "namespace": "community",
- "name": "general",
- "version": "4.6.1",
- "authors": [
- "Ansible (https://github.com/ansible)"
- ],
- "readme": "README.md",
- "tags": [
- "community"
- ],
- "description": null,
- "license": [],
- "license_file": "COPYING",
- "dependencies": {},
- "repository": "https://github.com/ansible-collections/community.general",
- "documentation": "https://docs.ansible.com/ansible/latest/collections/community/general/",
- "homepage": "https://github.com/ansible-collections/community.general",
- "issues": "https://github.com/ansible-collections/community.general/issues"
- },
- "file_manifest_file": {
- "name": "FILES.json",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "cb0591f9fb0d5cf7f64a85aa53cbfc5e62b3ea8c4233de5bdfb1f1230c884ea0",
- "format": 1
- },
- "format": 1
-}
\ No newline at end of file
diff --git a/ansible_collections/community/general/README.md b/ansible_collections/community/general/README.md
deleted file mode 100644
index d417466a..00000000
--- a/ansible_collections/community/general/README.md
+++ /dev/null
@@ -1,130 +0,0 @@
-# Community General Collection
-
-[![Build Status](https://dev.azure.com/ansible/community.general/_apis/build/status/CI?branchName=stable-4)](https://dev.azure.com/ansible/community.general/_build?definitionId=31)
-[![Codecov](https://img.shields.io/codecov/c/github/ansible-collections/community.general)](https://codecov.io/gh/ansible-collections/community.general)
-
-This repository contains the `community.general` Ansible Collection. The collection is a part of the Ansible package and includes many modules and plugins supported by Ansible community which are not part of more specialized community collections.
-
-You can find [documentation for this collection on the Ansible docs site](https://docs.ansible.com/ansible/latest/collections/community/general/).
-
-Please note that this collection does **not** support Windows targets. Only connection plugins included in this collection might support Windows targets, and will explicitly mention that in their documentation if they do so.
-
-## Code of Conduct
-
-We follow [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html) in all our interactions within this project.
-
-If you encounter abusive behavior violating the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html), please refer to the [policy violations](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html#policy-violations) section of the Code of Conduct for information on how to raise a complaint.
-
-## Tested with Ansible
-
-Tested with the current Ansible 2.9, ansible-base 2.10, ansible-core 2.11, ansible-core 2.12 releases and the current development version of ansible-core. Ansible versions before 2.9.10 are not supported.
-
-## External requirements
-
-Some modules and plugins require external libraries. Please check the requirements for each plugin or module you use in the documentation to find out which requirements are needed.
-
-## Included content
-
-Please check the included content on the [Ansible Galaxy page for this collection](https://galaxy.ansible.com/community/general) or the [documentation on the Ansible docs site](https://docs.ansible.com/ansible/latest/collections/community/general/).
-
-## Using this collection
-
-This collection is shipped with the Ansible package. So if you have it installed, no more action is required.
-
-If you have a minimal installation (only Ansible Core installed) or you want to use the latest version of the collection along with the whole Ansible package, you need to install the collection from [Ansible Galaxy](https://galaxy.ansible.com/community/general) manually with the `ansible-galaxy` command-line tool:
-
- ansible-galaxy collection install community.general
-
-You can also include it in a `requirements.yml` file and install it via `ansible-galaxy collection install -r requirements.yml` using the format:
-
-```yaml
-collections:
-- name: community.general
-```
-
-Note that if you install the collection manually, it will not be upgraded automatically when you upgrade the Ansible package. To upgrade the collection to the latest available version, run the following command:
-
-```bash
-ansible-galaxy collection install community.general --upgrade
-```
-
-You can also install a specific version of the collection, for example, if you need to downgrade when something is broken in the latest version (please report an issue in this repository). Use the following syntax where `X.Y.Z` can be any [available version](https://galaxy.ansible.com/community/general):
-
-```bash
-ansible-galaxy collection install community.general:==X.Y.Z
-```
-
-See [Ansible Using collections](https://docs.ansible.com/ansible/latest/user_guide/collections_using.html) for more details.
-
-## Contributing to this collection
-
-The content of this collection is made by good people just like you, a community of individuals collaborating on making the world better through developing automation software.
-
-We are actively accepting new contributors.
-
-All types of contributions are very welcome.
-
-You don't know how to start? Refer to our [contribution guide](https://github.com/ansible-collections/community.general/blob/stable-4/CONTRIBUTING.md)!
-
-The current maintainers are listed in the [commit-rights.md](https://github.com/ansible-collections/community.general/blob/stable-4/commit-rights.md#people) file. If you have questions or need help, feel free to mention them in the proposals.
-
-You can find more information in the [developer guide for collections](https://docs.ansible.com/ansible/devel/dev_guide/developing_collections.html#contributing-to-collections), and in the [Ansible Community Guide](https://docs.ansible.com/ansible/latest/community/index.html).
-
-Also for some notes specific to this collection see [our CONTRIBUTING documentation](https://github.com/ansible-collections/community.general/blob/stable-4/CONTRIBUTING.md).
-
-### Running tests
-
-See [here](https://docs.ansible.com/ansible/devel/dev_guide/developing_collections.html#testing-collections).
-
-## Collection maintenance
-
-To learn how to maintain / become a maintainer of this collection, refer to:
-
-* [Committer guidelines](https://github.com/ansible-collections/community.general/blob/stable-4/commit-rights.md).
-* [Maintainer guidelines](https://github.com/ansible/community-docs/blob/main/maintaining.rst).
-
-It is necessary for maintainers of this collection to be subscribed to:
-
-* The collection itself (the `Watch` button → `All Activity` in the upper right corner of the repository's homepage).
-* The "Changes Impacting Collection Contributors and Maintainers" [issue](https://github.com/ansible-collections/overview/issues/45).
-
-They also should be subscribed to Ansible's [The Bullhorn newsletter](https://docs.ansible.com/ansible/devel/community/communication.html#the-bullhorn).
-
-## Communication
-
-We announce important development changes and releases through Ansible's [The Bullhorn newsletter](https://eepurl.com/gZmiEP). If you are a collection developer, be sure you are subscribed.
-
-Join us in the `#ansible` (general use questions and support), `#ansible-community` (community and collection development questions), and other [IRC channels](https://docs.ansible.com/ansible/devel/community/communication.html#irc-channels) on [Libera.chat](https://libera.chat).
-
-We take part in the global quarterly [Ansible Contributor Summit](https://github.com/ansible/community/wiki/Contributor-Summit) virtually or in-person. Track [The Bullhorn newsletter](https://eepurl.com/gZmiEP) and join us.
-
-For more information about communities, meetings and agendas see [Community Wiki](https://github.com/ansible/community/wiki/Community).
-
-For more information about communication, refer to Ansible's the [Communication guide](https://docs.ansible.com/ansible/devel/community/communication.html).
-
-## Publishing New Version
-
-See the [Releasing guidelines](https://github.com/ansible/community-docs/blob/main/releasing_collections.rst) to learn how to release this collection.
-
-## Release notes
-
-See the [changelog](https://github.com/ansible-collections/community.general/blob/stable-4/CHANGELOG.rst).
-
-## Roadmap
-
-In general, we plan to release a major version every six months, and minor versions every two months. Major versions can contain breaking changes, while minor versions only contain new features and bugfixes.
-
-See [this issue](https://github.com/ansible-collections/community.general/issues/582) for information on releasing, versioning, and deprecation.
-
-## More information
-
-- [Ansible Collection overview](https://github.com/ansible-collections/overview)
-- [Ansible User guide](https://docs.ansible.com/ansible/latest/user_guide/index.html)
-- [Ansible Developer guide](https://docs.ansible.com/ansible/latest/dev_guide/index.html)
-- [Ansible Community code of conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html)
-
-## Licensing
-
-GNU General Public License v3.0 or later.
-
-See [COPYING](https://www.gnu.org/licenses/gpl-3.0.txt) to see the full text.
diff --git a/ansible_collections/community/general/changelogs/.gitignore b/ansible_collections/community/general/changelogs/.gitignore
deleted file mode 100644
index 6be6b533..00000000
--- a/ansible_collections/community/general/changelogs/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-/.plugin-cache.yaml
diff --git a/ansible_collections/community/general/changelogs/changelog.yaml b/ansible_collections/community/general/changelogs/changelog.yaml
deleted file mode 100644
index 2a42f45c..00000000
--- a/ansible_collections/community/general/changelogs/changelog.yaml
+++ /dev/null
@@ -1,1605 +0,0 @@
-ancestor: 3.0.0
-releases:
- 4.0.0:
- changes:
- breaking_changes:
- - archive - adding idempotency checks for changes to file names and content
- within the ``destination`` file (https://github.com/ansible-collections/community.general/pull/3075).
- - lxd inventory plugin - when used with Python 2, the plugin now needs ``ipaddress``
- installed `from pypi `_ (https://github.com/ansible-collections/community.general/pull/2441).
- - scaleway_security_group_rule - when used with Python 2, the module now needs
- ``ipaddress`` installed `from pypi `_
- (https://github.com/ansible-collections/community.general/pull/2441).
- bugfixes:
- - _mount module utils - fixed the sanity checks (https://github.com/ansible-collections/community.general/pull/2883).
- - ali_instance_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084).
- - ansible_galaxy_install - the output value ``cmd_args`` was bringing the intermediate
- command used to gather the state, instead of the command that actually performed
- the state change (https://github.com/ansible-collections/community.general/pull/3655).
- - apache2_module - fix ``a2enmod``/``a2dismod`` detection, and error message
- when not found (https://github.com/ansible-collections/community.general/issues/3253).
- - archive - fixed ``exclude_path`` values causing incorrect archive root (https://github.com/ansible-collections/community.general/pull/2816).
- - archive - fixed improper file names for single file zip archives (https://github.com/ansible-collections/community.general/issues/2818).
- - archive - fixed incorrect ``state`` result value documentation (https://github.com/ansible-collections/community.general/pull/2816).
- - archive - fixed task failure when using the ``remove`` option with a ``path``
- containing nested files for ``format``s other than ``zip`` (https://github.com/ansible-collections/community.general/issues/2919).
- - archive - fixing archive root determination when longest common root is ``/``
- (https://github.com/ansible-collections/community.general/pull/3036).
- - composer - use ``no-interaction`` option when discovering available options
- to avoid an issue where composer hangs (https://github.com/ansible-collections/community.general/pull/2348).
- - consul_acl - update the hcl allowlist to include all supported options (https://github.com/ansible-collections/community.general/pull/2495).
- - consul_kv lookup plugin - allow to set ``recurse``, ``index``, ``datacenter``
- and ``token`` as keyword arguments (https://github.com/ansible-collections/community.general/issues/2124).
- - copr - fix chroot naming issues, ``centos-stream`` changed naming to ``centos-stream-``
- (for exmaple ``centos-stream-8``) (https://github.com/ansible-collections/community.general/issues/2084,
- https://github.com/ansible-collections/community.general/pull/3237).
- - cpanm - also use ``LC_ALL`` to enforce locale choice (https://github.com/ansible-collections/community.general/pull/2731).
- - deploy_helper - improved parameter checking by using standard Ansible construct
- (https://github.com/ansible-collections/community.general/pull/3104).
- - django_manage - argument ``command`` is being splitted again as it should
- (https://github.com/ansible-collections/community.general/issues/3215).
- - django_manage - parameters ``apps`` and ``fixtures`` are now splitted instead
- of being used as a single argument (https://github.com/ansible-collections/community.general/issues/3333).
- - django_manage - refactor to call ``run_command()`` passing command as a list
- instead of string (https://github.com/ansible-collections/community.general/pull/3098).
- - ejabberd_user - replaced in-code check with ``required_if``, using ``get_bin_path()``
- for the command, passing args to ``run_command()`` as list instead of string
- (https://github.com/ansible-collections/community.general/pull/3093).
- - filesystem - repair ``reiserfs`` fstype support after adding it to integration
- tests (https://github.com/ansible-collections/community.general/pull/2472).
- - gitlab_deploy_key - fix idempotency on projects with multiple deploy keys
- (https://github.com/ansible-collections/community.general/pull/3473).
- - gitlab_deploy_key - fix the SSH Deploy Key being deleted accidentally while
- running task in check mode (https://github.com/ansible-collections/community.general/issues/3621,
- https://github.com/ansible-collections/community.general/pull/3622).
- - gitlab_group - avoid passing wrong value for ``require_two_factor_authentication``
- on creation when the option has not been specified (https://github.com/ansible-collections/community.general/pull/3453).
- - gitlab_group_members - ``get_group_id`` return the group ID by matching ``full_path``,
- ``path`` or ``name`` (https://github.com/ansible-collections/community.general/pull/3400).
- - gitlab_group_members - fixes issue when gitlab group has more then 20 members,
- pagination problem (https://github.com/ansible-collections/community.general/issues/3041).
- - gitlab_project - user projects are created using namespace ID now, instead
- of user ID (https://github.com/ansible-collections/community.general/pull/2881).
- - gitlab_project_members - ``get_project_id`` return the project id by matching
- ``full_path`` or ``name`` (https://github.com/ansible-collections/community.general/pull/3602).
- - gitlab_project_members - fixes issue when gitlab group has more then 20 members,
- pagination problem (https://github.com/ansible-collections/community.general/issues/3041).
- - idrac_redfish_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084).
- - influxdb_retention_policy - fix bug where ``INF`` duration values failed parsing
- (https://github.com/ansible-collections/community.general/pull/2385).
- - influxdb_user - allow creation of admin users when InfluxDB authentication
- is enabled but no other user exists on the database. In this scenario, InfluxDB
- 1.x allows only ``CREATE USER`` queries and rejects any other query (https://github.com/ansible-collections/community.general/issues/2364).
- - influxdb_user - fix bug where an influxdb user has no privileges for 2 or
- more databases (https://github.com/ansible-collections/community.general/pull/2499).
- - influxdb_user - fix bug which removed current privileges instead of appending
- them to existing ones (https://github.com/ansible-collections/community.general/issues/2609,
- https://github.com/ansible-collections/community.general/pull/2614).
- - ini_file - fix Unicode processing for Python 2 (https://github.com/ansible-collections/community.general/pull/2875).
- - ini_file - fix inconsistency between empty value and no value (https://github.com/ansible-collections/community.general/issues/3031).
- - interfaces_file - no longer reporting change when none happened (https://github.com/ansible-collections/community.general/pull/3328).
- - inventory and vault scripts - change file permissions to make vendored inventory
- and vault scripts exectuable (https://github.com/ansible-collections/community.general/pull/2337).
- - ipa_* modules - fix environment fallback for ``ipa_host`` option (https://github.com/ansible-collections/community.general/issues/3560).
- - ipa_sudorule - call ``sudorule_add_allow_command`` method instead of ``sudorule_add_allow_command_group``
- (https://github.com/ansible-collections/community.general/issues/2442).
- - iptables_state - call ``async_status`` action plugin rather than its module
- (https://github.com/ansible-collections/community.general/issues/2700).
- - iptables_state - fix a 'FutureWarning' in a regex and do some basic code clean
- up (https://github.com/ansible-collections/community.general/pull/2525).
- - iptables_state - fix a broken query of ``async_status`` result with current
- ansible-core development version (https://github.com/ansible-collections/community.general/issues/2627,
- https://github.com/ansible-collections/community.general/pull/2671).
- - iptables_state - fix initialization of iptables from null state when adressing
- more than one table (https://github.com/ansible-collections/community.general/issues/2523).
- - java_cert - fix issue with incorrect alias used on PKCS#12 certificate import
- (https://github.com/ansible-collections/community.general/pull/2560).
- - java_cert - import private key as well as public certificate from PKCS#12
- (https://github.com/ansible-collections/community.general/issues/2460).
- - java_keystore - add parameter ``keystore_type`` to control output file format
- and override ``keytool``'s default, which depends on Java version (https://github.com/ansible-collections/community.general/issues/2515).
- - jboss - fix the deployment file permission issue when Jboss server is running
- under non-root user. The deployment file is copied with file content only.
- The file permission is set to ``440`` and belongs to root user. When the JBoss
- ``WildFly`` server is running under non-root user, it is unable to read the
- deployment file (https://github.com/ansible-collections/community.general/pull/3426).
- - jenkins_build - examine presence of ``build_number`` before deleting a jenkins
- build (https://github.com/ansible-collections/community.general/pull/2850).
- - jenkins_plugin - use POST method for sending request to jenkins API when ``state``
- option is one of ``enabled``, ``disabled``, ``pinned``, ``unpinned``, or ``absent``
- (https://github.com/ansible-collections/community.general/issues/2510).
- - json_query filter plugin - avoid 'unknown type' errors for more Ansible internal
- types (https://github.com/ansible-collections/community.general/pull/2607).
- - keycloak_authentication - fix bug when two identical executions are in the
- same authentication flow (https://github.com/ansible-collections/community.general/pull/2904).
- - keycloak_authentication - fix bug, the requirement was always on ``DISABLED``
- when creating a new authentication flow (https://github.com/ansible-collections/community.general/pull/3330).
- - keycloak_client - update the check mode to not show differences resulting
- from sorting and default values relating to the properties, ``redirectUris``,
- ``attributes``, and ``protocol_mappers`` (https://github.com/ansible-collections/community.general/pull/3610).
- - keycloak_identity_provider - fix change detection when updating identity provider
- mappers (https://github.com/ansible-collections/community.general/pull/3538,
- https://github.com/ansible-collections/community.general/issues/3537).
- - keycloak_realm - ``ssl_required`` changed from a boolean type to accept the
- strings ``none``, ``external`` or ``all``. This is not a breaking change since
- the module always failed when a boolean was supplied (https://github.com/ansible-collections/community.general/pull/2693).
- - keycloak_realm - element type for ``events_listeners`` parameter should be
- ``string`` instead of ``dict`` (https://github.com/ansible-collections/community.general/pull/3231).
- - keycloak_realm - remove warning that ``reset_password_allowed`` needs to be
- marked as ``no_log`` (https://github.com/ansible-collections/community.general/pull/2694).
- - keycloak_role - quote role name when used in URL path to avoid errors when
- role names contain special characters (https://github.com/ansible-collections/community.general/issues/3535,
- https://github.com/ansible-collections/community.general/pull/3536).
- - launchd - fixed sanity check in the module's code (https://github.com/ansible-collections/community.general/pull/2960).
- - launchd - use private attribute to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194).
- - linode inventory plugin - fix default value of new option ``ip_style`` (https://github.com/ansible-collections/community.general/issues/3337).
- - linode_v4 - changed the error message to point to the correct bugtracker URL
- (https://github.com/ansible-collections/community.general/pull/2430).
- - logdns callback plugin - improve split call to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194).
- - logstash callback plugin - replace ``_option`` with ``context.CLIARGS`` to
- fix the plugin on ansible-base and ansible-core (https://github.com/ansible-collections/community.general/issues/2692).
- - lvol - fixed rounding errors (https://github.com/ansible-collections/community.general/issues/2370).
- - lvol - fixed size unit capitalization to match units used between different
- tools for comparison (https://github.com/ansible-collections/community.general/issues/2360).
- - lvol - honor ``check_mode`` on thinpool (https://github.com/ansible-collections/community.general/issues/2934).
- - macports - add ``stdout`` and ``stderr`` to return values (https://github.com/ansible-collections/community.general/issues/3499).
- - maven_artifact - improve split call to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194).
- - memcached cache plugin - change function argument names to fix sanity errors
- (https://github.com/ansible-collections/community.general/pull/3194).
- - memset_memstore_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084).
- - memset_server_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084).
- - modprobe - added additional checks to ensure module load/unload is effective
- (https://github.com/ansible-collections/community.general/issues/1608).
- - module_helper module utils - ``CmdMixin`` must also use ``LC_ALL`` to enforce
- locale choice (https://github.com/ansible-collections/community.general/pull/2731).
- - module_helper module utils - avoid failing when non-zero ``rc`` is present
- on regular exit (https://github.com/ansible-collections/community.general/pull/2912).
- - module_helper module utils - fixed change-tracking for dictionaries and lists
- (https://github.com/ansible-collections/community.general/pull/2951).
- - netapp module utils - remove always-true conditional to fix sanity errors
- (https://github.com/ansible-collections/community.general/pull/3194).
- - netcup_dns - use ``str(ex)`` instead of unreliable ``ex.message`` in exception
- handling to fix ``AttributeError`` in error cases (https://github.com/ansible-collections/community.general/pull/2590).
- - nmap inventory plugin - fix local variable error when cache is disabled (https://github.com/ansible-collections/community.general/issues/2512).
- - nmcli - added ip4/ip6 configuration arguments for ``sit`` and ``ipip`` tunnels
- (https://github.com/ansible-collections/community.general/issues/3238, https://github.com/ansible-collections/community.general/pull/3239).
- - nmcli - compare MAC addresses case insensitively to fix idempotency issue
- (https://github.com/ansible-collections/community.general/issues/2409).
- - nmcli - fixed ``dns6`` option handling so that it is treated as a list internally
- (https://github.com/ansible-collections/community.general/pull/3563).
- - nmcli - fixed ``ipv4.route-metric`` being in properties of type list (https://github.com/ansible-collections/community.general/pull/3563).
- - nmcli - fixes team-slave configuration by adding connection.slave-type (https://github.com/ansible-collections/community.general/issues/766).
- - nmcli - if type is ``bridge-slave`` add ``slave-type bridge`` to ``nmcli``
- command (https://github.com/ansible-collections/community.general/issues/2408).
- - npm - correctly handle cases where a dependency does not have a ``version``
- property because it is either missing or invalid (https://github.com/ansible-collections/community.general/issues/2917).
- - npm - when the ``version`` option is used the comparison of installed vs missing
- will use name@version instead of just name, allowing version specific updates
- (https://github.com/ansible-collections/community.general/issues/2021).
- - one_image - fix error message when renaming an image (https://github.com/ansible-collections/community.general/pull/3626).
- - one_template - change function argument name to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194).
- - one_vm - Allow missing NIC keys (https://github.com/ansible-collections/community.general/pull/2435).
- - oneview_datacenter_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084).
- - oneview_enclosure_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084).
- - oneview_ethernet_network_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084).
- - oneview_fc_network_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084).
- - oneview_fcoe_network_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084).
- - oneview_logical_interconnect_group_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084).
- - oneview_network_set_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084).
- - oneview_san_manager_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084).
- - online inventory plugin - improve split call to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194).
- - online module utils - improve split call to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194).
- - open_iscsi - calling ``run_command`` with arguments as ``list`` instead of
- ``str`` (https://github.com/ansible-collections/community.general/pull/3286).
- - openbsd_pkg - fix crash from ``KeyError`` exception when package installs,
- but ``pkg_add`` returns with a non-zero exit code (https://github.com/ansible-collections/community.general/pull/3336).
- - openbsd_pkg - fix regexp matching crash. This bug could trigger on package
- names with special characters, for example ``g++`` (https://github.com/ansible-collections/community.general/pull/3161).
- - opentelemetry callback plugin - validated the task result exception without
- crashing. Also simplifying code a bit (https://github.com/ansible-collections/community.general/pull/3450,
- https://github.com/ansible/ansible/issues/75726).
- - openwrt_init - calling ``run_command`` with arguments as ``list`` instead
- of ``str`` (https://github.com/ansible-collections/community.general/pull/3284).
- - ovir4 inventory script - improve configparser creation to avoid crashes for
- options without values (https://github.com/ansible-collections/community.general/issues/674).
- - packet_device - use generator to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194).
- - packet_sshkey - use generator to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194).
- - pacman - fix changed status when ignorepkg has been defined (https://github.com/ansible-collections/community.general/issues/1758).
- - pamd - code for ``state=updated`` when dealing with the pam module arguments,
- made no distinction between ``None`` and an empty list (https://github.com/ansible-collections/community.general/issues/3260).
- - pamd - fixed problem with files containing only one or two lines (https://github.com/ansible-collections/community.general/issues/2925).
- - pids - avoid crashes for older ``psutil`` versions, like on RHEL6 and RHEL7
- (https://github.com/ansible-collections/community.general/pull/2808).
- - pipx - ``state=inject`` was failing to parse the list of injected packages
- (https://github.com/ansible-collections/community.general/pull/3611).
- - pipx - set environment variable ``USE_EMOJI=0`` to prevent errors in platforms
- that do not support ``UTF-8`` (https://github.com/ansible-collections/community.general/pull/3611).
- - pipx - the output value ``cmd_args`` was bringing the intermediate command
- used to gather the state, instead of the command that actually performed the
- state change (https://github.com/ansible-collections/community.general/pull/3655).
- - pkgin - Fix exception encountered when all packages are already installed
- (https://github.com/ansible-collections/community.general/pull/3583).
- - pkgng - ``name=* state=latest`` check for upgrades did not count "Number of
- packages to be reinstalled" as a `changed` action, giving incorrect results
- in both regular and check mode (https://github.com/ansible-collections/community.general/pull/3526).
- - pkgng - an `earlier PR `_
- broke check mode so that the module always reports `not changed`. This is
- now fixed so that the module reports number of upgrade or install actions
- that would be performed (https://github.com/ansible-collections/community.general/pull/3526).
- - pkgng - the ``annotation`` functionality was broken and is now fixed, and
- now also works with check mode (https://github.com/ansible-collections/community.general/pull/3526).
- - proxmox inventory plugin - fixed parsing failures when some cluster nodes
- are offline (https://github.com/ansible-collections/community.general/issues/2931).
- - proxmox inventory plugin - fixed plugin failure when a ``qemu`` guest has
- no ``template`` key (https://github.com/ansible-collections/community.general/pull/3052).
- - proxmox_group_info - fix module crash if a ``group`` parameter is used (https://github.com/ansible-collections/community.general/pull/3649).
- - proxmox_kvm - clone operation should return the VMID of the target VM and
- not that of the source VM. This was failing when the target VM with the chosen
- name already existed (https://github.com/ansible-collections/community.general/pull/3266).
- - proxmox_kvm - fix parsing of Proxmox VM information with device info not containing
- a comma, like disks backed by ZFS zvols (https://github.com/ansible-collections/community.general/issues/2840).
- - proxmox_kvm - fix result of clone, now returns ``newid`` instead of ``vmid``
- (https://github.com/ansible-collections/community.general/pull/3034).
- - proxmox_kvm - fixed ``vmid`` return value when VM with ``name`` already exists
- (https://github.com/ansible-collections/community.general/issues/2648).
- - puppet - replace ``console` with ``stdout`` in ``logdest`` option when ``all``
- has been chosen (https://github.com/ansible-collections/community.general/issues/1190).
- - rax_facts - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084).
- - redfish_command - fix extraneous error caused by missing ``bootdevice`` argument
- when using the ``DisableBootOverride`` sub-command (https://github.com/ansible-collections/community.general/issues/3005).
- - redfish_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084).
- - redfish_utils module utils - do not attempt to change the boot source override
- mode if not specified by the user (https://github.com/ansible-collections/community.general/issues/3509/).
- - redfish_utils module utils - if a manager network property is not specified
- in the service, attempt to change the requested settings (https://github.com/ansible-collections/community.general/issues/3404/).
- - redfish_utils module utils - if given, add account ID of user that should
- be created to HTTP request (https://github.com/ansible-collections/community.general/pull/3343/).
- - redis cache - improved connection string parsing (https://github.com/ansible-collections/community.general/issues/497).
- - rhsm_release - fix the issue that module considers 8, 7Client and 7Workstation
- as invalid releases (https://github.com/ansible-collections/community.general/pull/2571).
- - saltstack connection plugin - fix function signature (https://github.com/ansible-collections/community.general/pull/3194).
- - scaleway module utils - improve split call to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194).
- - scaleway plugin inventory - fix ``JSON object must be str, not 'bytes'`` with
- Python 3.5 (https://github.com/ansible-collections/community.general/issues/2769).
- - smartos_image_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084).
- - snap - also use ``LC_ALL`` to enforce locale choice (https://github.com/ansible-collections/community.general/pull/2731).
- - snap - fix formatting of ``--channel`` argument when the ``channel`` option
- is used (https://github.com/ansible-collections/community.general/pull/3028).
- - snap - fix various bugs which prevented the module from working at all, and
- which resulted in ``state=absent`` fail on absent snaps (https://github.com/ansible-collections/community.general/issues/2835,
- https://github.com/ansible-collections/community.general/issues/2906, https://github.com/ansible-collections/community.general/pull/2912).
- - snap - fixed the order of the ``--classic`` parameter in the command line
- invocation (https://github.com/ansible-collections/community.general/issues/2916).
- - snap_alias - the output value ``cmd_args`` was bringing the intermediate command
- used to gather the state, instead of the command that actually performed the
- state change (https://github.com/ansible-collections/community.general/pull/3655).
- - snmp_facts - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084).
- - ssh_config - reduce stormssh searches based on host (https://github.com/ansible-collections/community.general/pull/2568/).
- - stacki_host - when adding a new server, ``rack`` and ``rank`` must be passed,
- and network parameters are optional (https://github.com/ansible-collections/community.general/pull/2681).
- - stackpath_compute inventory script - fix broken validation checks for client
- ID and client secret (https://github.com/ansible-collections/community.general/pull/2448).
- - supervisorctl - state ``signalled`` was not working (https://github.com/ansible-collections/community.general/pull/3068).
- - svr4pkg - convert string to a bytes-like object to avoid ``TypeError`` with
- Python 3 (https://github.com/ansible-collections/community.general/issues/2373).
- - taiga - some constructs in the module fixed to work also in Python 3 (https://github.com/ansible-collections/community.general/pull/3067).
- - terraform - ensure the workspace is set back to its previous value when the
- apply fails (https://github.com/ansible-collections/community.general/pull/2634).
- - tss lookup plugin - fixed backwards compatibility issue with ``python-tss-sdk``
- version <=0.0.5 (https://github.com/ansible-collections/community.general/issues/3192,
- https://github.com/ansible-collections/community.general/pull/3199).
- - tss lookup plugin - fixed incompatibility with ``python-tss-sdk`` version
- 1.0.0 (https://github.com/ansible-collections/community.general/issues/3057,
- https://github.com/ansible-collections/community.general/pull/3139).
- - udm_dns_record - fixed managing of PTR records, which can never have worked
- before (https://github.com/ansible-collections/community.general/pull/3256).
- - ufw - use generator to fix sanity errors (https://github.com/ansible-collections/community.general/pull/3194).
- - utm_aaa_group_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084).
- - utm_ca_host_key_cert_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084).
- - utm_network_interface_address_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084).
- - utm_proxy_frontend_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084).
- - utm_proxy_location_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084).
- - vdo - boolean arguments now compared with proper ``true`` and ``false`` values
- instead of string representations like ``"yes"`` or ``"no"`` (https://github.com/ansible-collections/community.general/pull/3191).
- - xenserver_facts - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084).
- - xfconf - also use ``LC_ALL`` to enforce locale choice (https://github.com/ansible-collections/community.general/issues/2715).
- - xfconf_info - added support to check mode (https://github.com/ansible-collections/community.general/pull/3084).
- - yaml callback plugin - avoid modifying PyYAML so that other plugins using
- it on the controller, like the ``to_yaml`` filter, do not produce different
- output (https://github.com/ansible-collections/community.general/issues/3471,
- https://github.com/ansible-collections/community.general/pull/3478).
- - yum_versionlock - fix idempotency when using wildcard (asterisk) in ``name``
- option (https://github.com/ansible-collections/community.general/issues/2761).
- - zfs - certain ZFS properties, especially sizes, would lead to a task being
- falsely marked as "changed" even when no actual change was made (https://github.com/ansible-collections/community.general/issues/975,
- https://github.com/ansible-collections/community.general/pull/2454).
- - zfs - treated received properties as local (https://github.com/ansible-collections/community.general/pull/502).
- - zypper_repository - fix idempotency on adding repository with ``$releasever``
- and ``$basearch`` variables (https://github.com/ansible-collections/community.general/issues/1985).
- - zypper_repository - when an URL to a .repo file was provided in option ``repo=``
- and ``state=present`` only the first run was successful, future runs failed
- due to missing checks prior starting zypper. Usage of ``state=absent`` in
- combination with a .repo file was not working either (https://github.com/ansible-collections/community.general/issues/1791,
- https://github.com/ansible-collections/community.general/issues/3466).
- deprecated_features:
- - ali_instance_info - marked removal version of deprecated parameters ``availability_zone``
- and ``instance_names`` (https://github.com/ansible-collections/community.general/issues/2429).
- - bitbucket_* modules - ``username`` options have been deprecated in favor of
- ``workspace`` and will be removed in community.general 6.0.0 (https://github.com/ansible-collections/community.general/pull/2045).
- - dnsimple - python-dnsimple < 2.0.0 is deprecated and support for it will be
- removed in community.general 5.0.0 (https://github.com/ansible-collections/community.general/pull/2946#discussion_r667624693).
- - gitlab_group_members - setting ``gitlab_group`` to ``name`` or ``path`` is
- deprecated. Use ``full_path`` instead (https://github.com/ansible-collections/community.general/pull/3451).
- - keycloak_authentication - the return value ``flow`` is now deprecated and
- will be removed in community.general 6.0.0; use ``end_state`` instead (https://github.com/ansible-collections/community.general/pull/3280).
- - keycloak_group - the return value ``group`` is now deprecated and will be
- removed in community.general 6.0.0; use ``end_state`` instead (https://github.com/ansible-collections/community.general/pull/3280).
- - linode - parameter ``backupsenabled`` is deprecated and will be removed in
- community.general 5.0.0 (https://github.com/ansible-collections/community.general/pull/2410).
- - lxd_container - the current default value ``true`` of ``ignore_volatile_options``
- is deprecated and will change to ``false`` in community.general 6.0.0 (https://github.com/ansible-collections/community.general/pull/3429).
- - serverless - deprecating parameter ``functions`` because it was not used in
- the code (https://github.com/ansible-collections/community.general/pull/2845).
- - xfconf - deprecate the ``get`` state. The new module ``xfconf_info`` should
- be used instead (https://github.com/ansible-collections/community.general/pull/3049).
- major_changes:
- - 'bitbucket_* modules - ``client_id`` is no longer marked as ``no_log=true``.
- If you relied on its value not showing up in logs and output, please mark
- the whole tasks with ``no_log: true`` (https://github.com/ansible-collections/community.general/pull/2045).'
- minor_changes:
- - Avoid internal ansible-core module_utils in favor of equivalent public API
- available since at least Ansible 2.9 (https://github.com/ansible-collections/community.general/pull/2877).
- - ModuleHelper module utils - improved mechanism for customizing the calculation
- of ``changed`` (https://github.com/ansible-collections/community.general/pull/2514).
- - Remove unnecessary ``__init__.py`` files from ``plugins/`` (https://github.com/ansible-collections/community.general/pull/2632).
- - apache2_module - minor refactoring improving code quality, readability and
- speed (https://github.com/ansible-collections/community.general/pull/3106).
- - archive - added ``dest_state`` return value to describe final state of ``dest``
- after successful task execution (https://github.com/ansible-collections/community.general/pull/2913).
- - archive - added ``exclusion_patterns`` option to exclude files or subdirectories
- from archives (https://github.com/ansible-collections/community.general/pull/2616).
- - archive - refactoring prior to fix for idempotency checks. The fix will be
- a breaking change and only appear in community.general 4.0.0 (https://github.com/ansible-collections/community.general/pull/2987).
- - bitbucket_* modules - add ``user`` and ``password`` options for Basic authentication
- (https://github.com/ansible-collections/community.general/pull/2045).
- - chroot connection - minor refactor to make lints and IDEs happy (https://github.com/ansible-collections/community.general/pull/2520).
- - cloud_init_data_facts - minor refactor (https://github.com/ansible-collections/community.general/pull/2557).
- - cmd (Module Helper) module utils - ``CmdMixin`` now pulls the value for ``run_command()``
- params from ``self.vars``, as opposed to previously retrieving those from
- ``self.module.params`` (https://github.com/ansible-collections/community.general/pull/2517).
- - composer - add ``composer_executable`` option (https://github.com/ansible-collections/community.general/issues/2649).
- - datadog_event - adding parameter ``api_host`` to allow selecting a datadog
- API endpoint instead of using the default one (https://github.com/ansible-collections/community.general/issues/2774,
- https://github.com/ansible-collections/community.general/pull/2775).
- - datadog_monitor - allow creation of composite datadog monitors (https://github.com/ansible-collections/community.general/issues/2956).
- - dig lookup plugin - add ``retry_servfail`` option (https://github.com/ansible-collections/community.general/pull/3247).
- - dnsimple - module rewrite to include support for python-dnsimple>=2.0.0; also
- add ``sandbox`` parameter (https://github.com/ansible-collections/community.general/pull/2946).
- - elastic callback plugin - enriched the stacktrace information with the ``message``,
- ``exception`` and ``stderr`` fields from the failed task (https://github.com/ansible-collections/community.general/pull/3556).
- - filesystem - cleanup and revamp module, tests and doc. Pass all commands to
- ``module.run_command()`` as lists. Move the device-vs-mountpoint logic to
- ``grow()`` method. Give to all ``get_fs_size()`` the same logic and error
- handling. (https://github.com/ansible-collections/community.general/pull/2472).
- - filesystem - extend support for FreeBSD. Avoid potential data loss by checking
- existence of a filesystem with ``fstyp`` (native command) if ``blkid`` (foreign
- command) doesn't find one. Add support for character devices and ``ufs`` filesystem
- type (https://github.com/ansible-collections/community.general/pull/2902).
- - flatpak - add ``no_dependencies`` parameter (https://github.com/ansible/ansible/pull/55452,
- https://github.com/ansible-collections/community.general/pull/2751).
- - flatpak - allows installing or uninstalling a list of packages (https://github.com/ansible-collections/community.general/pull/2521).
- - funcd connection - minor refactor to make lints and IDEs happy (https://github.com/ansible-collections/community.general/pull/2520).
- - gem - add ``bindir`` option to specify an installation path for executables
- such as ``/home/user/bin`` or ``/home/user/.local/bin`` (https://github.com/ansible-collections/community.general/pull/2837).
- - gem - add ``norc`` option to avoid loading any ``.gemrc`` file (https://github.com/ansible-collections/community.general/pull/2837).
- - github_repo - add new option ``api_url`` to allow working with on premises
- installations (https://github.com/ansible-collections/community.general/pull/3038).
- - gitlab_group - add new options ``project_creation_level``, ``auto_devops_enabled``,
- ``subgroup_creation_level`` (https://github.com/ansible-collections/community.general/pull/3248).
- - gitlab_group - add new property ``require_two_factor_authentication`` (https://github.com/ansible-collections/community.general/pull/3367).
- - gitlab_group_members - ``gitlab_user`` can now also be a list of users (https://github.com/ansible-collections/community.general/pull/3047).
- - gitlab_group_members - added functionality to set all members exactly as given
- (https://github.com/ansible-collections/community.general/pull/3047).
- - gitlab_project - add new options ``allow_merge_on_skipped_pipeline``, ``only_allow_merge_if_all_discussions_are_resolved``,
- ``only_allow_merge_if_pipeline_succeeds``, ``packages_enabled``, ``remove_source_branch_after_merge``,
- ``squash_option`` (https://github.com/ansible-collections/community.general/pull/3002).
- - gitlab_project - add new properties ``ci_config_path`` and ``shared_runners_enabled``
- (https://github.com/ansible-collections/community.general/pull/3379).
- - gitlab_project - projects can be created under other user's namespaces with
- the new ``username`` option (https://github.com/ansible-collections/community.general/pull/2824).
- - gitlab_project_members - ``gitlab_user`` can now also be a list of users (https://github.com/ansible-collections/community.general/pull/3319).
- - gitlab_project_members - added functionality to set all members exactly as
- given (https://github.com/ansible-collections/community.general/pull/3319).
- - gitlab_runner - support project-scoped gitlab.com runners registration (https://github.com/ansible-collections/community.general/pull/634).
- - gitlab_user - add ``expires_at`` option (https://github.com/ansible-collections/community.general/issues/2325).
- - gitlab_user - add functionality for adding external identity providers to
- a GitLab user (https://github.com/ansible-collections/community.general/pull/2691).
- - gitlab_user - allow to reset an existing password with the new ``reset_password``
- option (https://github.com/ansible-collections/community.general/pull/2691).
- - gitlab_user - specifying a password is no longer necessary (https://github.com/ansible-collections/community.general/pull/2691).
- - gunicorn - search for ``gunicorn`` binary in more paths (https://github.com/ansible-collections/community.general/pull/3092).
- - hana_query - added the abillity to use hdbuserstore (https://github.com/ansible-collections/community.general/pull/3125).
- - hpilo_info - added ``host_power_status`` return value to report power state
- of machine with ``OFF``, ``ON`` or ``UNKNOWN`` (https://github.com/ansible-collections/community.general/pull/3079).
- - idrac_redfish_config - modified set_manager_attributes function to skip invalid
- attribute instead of returning. Added skipped attributes to output. Modified
- module exit to add warning variable (https://github.com/ansible-collections/community.general/issues/1995).
- - influxdb_retention_policy - add ``state`` parameter with allowed values ``present``
- and ``absent`` to support deletion of existing retention policies (https://github.com/ansible-collections/community.general/issues/2383).
- - influxdb_retention_policy - simplify duration logic parsing (https://github.com/ansible-collections/community.general/pull/2385).
- - ini_file - add abbility to define multiple options with the same name but
- different values (https://github.com/ansible-collections/community.general/issues/273,
- https://github.com/ansible-collections/community.general/issues/1204).
- - ini_file - add module option ``exclusive`` (boolean) for the ability to add/remove
- single ``option=value`` entries without overwriting existing options with
- the same name but different values (https://github.com/ansible-collections/community.general/pull/3033).
- - ini_file - opening file with encoding ``utf-8-sig`` (https://github.com/ansible-collections/community.general/issues/2189).
- - interfaces_file - minor refactor (https://github.com/ansible-collections/community.general/pull/3328).
- - iocage connection - minor refactor to make lints and IDEs happy (https://github.com/ansible-collections/community.general/pull/2520).
- - ipa_config - add ``ipaselinuxusermaporder`` option to set the SELinux user
- map order (https://github.com/ansible-collections/community.general/pull/3178).
- - ipa_group - add ``append`` option for adding group and users members, instead
- of replacing the respective lists (https://github.com/ansible-collections/community.general/pull/3545).
- - jail connection - minor refactor to make lints and IDEs happy (https://github.com/ansible-collections/community.general/pull/2520).
- - java_keystore - added ``ssl_backend`` parameter for using the cryptography
- library instead of the OpenSSL binary (https://github.com/ansible-collections/community.general/pull/2485).
- - java_keystore - replace envvar by stdin to pass secret to ``keytool`` (https://github.com/ansible-collections/community.general/pull/2526).
- - jenkins_build - support stopping a running jenkins build (https://github.com/ansible-collections/community.general/pull/2850).
- - jenkins_job_info - the ``password`` and ``token`` parameters can also be omitted
- to retrieve only public information (https://github.com/ansible-collections/community.general/pull/2948).
- - jenkins_plugin - add fallback url(s) for failure of plugin installation/download
- (https://github.com/ansible-collections/community.general/pull/1334).
- - jira - add comment visibility parameter for comment operation (https://github.com/ansible-collections/community.general/pull/2556).
- - kernel_blacklist - revamped the module using ``ModuleHelper`` (https://github.com/ansible-collections/community.general/pull/3329).
- - keycloak_* modules - refactor many of the ``keycloak_*`` modules to have similar
- structures, comments, and documentation (https://github.com/ansible-collections/community.general/pull/3280).
- - keycloak_authentication - enhanced diff mode to also return before and after
- state when the authentication flow is updated (https://github.com/ansible-collections/community.general/pull/2963).
- - keycloak_client - add ``authentication_flow_binding_overrides`` option (https://github.com/ansible-collections/community.general/pull/2949).
- - keycloak_realm - add ``events_enabled`` parameter to allow activation or deactivation
- of login events (https://github.com/ansible-collections/community.general/pull/3231).
- - linode - added proper traceback when failing due to exceptions (https://github.com/ansible-collections/community.general/pull/2410).
- - linode - parameter ``additional_disks`` is now validated as a list of dictionaries
- (https://github.com/ansible-collections/community.general/pull/2410).
- - linode inventory plugin - adds the ``ip_style`` configuration key. Set to
- ``api`` to get more detailed network details back from the remote Linode host
- (https://github.com/ansible-collections/community.general/pull/3203).
- - lxc connection - minor refactor to make lints and IDEs happy (https://github.com/ansible-collections/community.general/pull/2520).
- - lxd_container - add ``ignore_volatile_options`` option which allows to disable
- the behavior that the module ignores options starting with ``volatile.`` (https://github.com/ansible-collections/community.general/pull/3331).
- - mail - added the ``ehlohost`` parameter which allows for manual override of
- the host used in SMTP EHLO (https://github.com/ansible-collections/community.general/pull/3425).
- - maven_artifact - added ``checksum_alg`` option to support SHA1 checksums in
- order to support FIPS systems (https://github.com/ansible-collections/community.general/pull/2662).
- - module_helper cmd module utils - added the ``ArgFormat`` style ``BOOLEAN_NOT``,
- to add CLI parameters when the module argument is false-ish (https://github.com/ansible-collections/community.general/pull/3290).
- - module_helper module utils - added feature flag parameter to ``CmdMixin``
- to control whether ``cmd_args`` is automatically added to the module output
- (https://github.com/ansible-collections/community.general/pull/3648).
- - module_helper module utils - added feature flag parameters to ``CmdMixin``
- to control whether ``rc``, ``out`` and ``err`` are automatically added to
- the module output (https://github.com/ansible-collections/community.general/pull/2922).
- - module_helper module utils - break down of the long file into smaller pieces
- (https://github.com/ansible-collections/community.general/pull/2393).
- - module_helper module utils - method ``CmdMixin.run_command()`` now accepts
- ``process_output`` specifying a function to process the outcome of the underlying
- ``module.run_command()`` (https://github.com/ansible-collections/community.general/pull/2564).
- - module_helper module_utils - added classmethod to trigger the execution of
- MH modules (https://github.com/ansible-collections/community.general/pull/3206).
- - nmcli - add ``disabled`` value to ``method6`` option (https://github.com/ansible-collections/community.general/issues/2730).
- - nmcli - add ``dummy`` interface support (https://github.com/ansible-collections/community.general/issues/724).
- - nmcli - add ``gre`` tunnel support (https://github.com/ansible-collections/community.general/issues/3105,
- https://github.com/ansible-collections/community.general/pull/3262).
- - nmcli - add ``gsm`` support (https://github.com/ansible-collections/community.general/pull/3313).
- - nmcli - add ``routing_rules4`` and ``may_fail4`` options (https://github.com/ansible-collections/community.general/issues/2730).
- - nmcli - add ``runner`` and ``runner_hwaddr_policy`` options (https://github.com/ansible-collections/community.general/issues/2901).
- - nmcli - add ``wifi-sec`` option change detection to support managing secure
- Wi-Fi connections (https://github.com/ansible-collections/community.general/pull/3136).
- - nmcli - add ``wifi`` option to support managing Wi-Fi settings such as ``hidden``
- or ``mode`` (https://github.com/ansible-collections/community.general/pull/3081).
- - nmcli - add new options to ignore automatic DNS servers and gateways (https://github.com/ansible-collections/community.general/issues/1087).
- - nmcli - query ``nmcli`` directly to determine available WiFi options (https://github.com/ansible-collections/community.general/pull/3141).
- - nmcli - remove dead code, ``options`` never contains keys from ``param_alias``
- (https://github.com/ansible-collections/community.general/pull/2417).
- - nmcli - the option ``routing_rules4`` can now be specified as a list of strings,
- instead of as a single string (https://github.com/ansible-collections/community.general/issues/3401).
- - nrdp callback plugin - parameters are now converted to strings, except ``validate_certs``
- which is converted to boolean (https://github.com/ansible-collections/community.general/pull/2878).
- - onepassword lookup plugin - add ``domain`` option (https://github.com/ansible-collections/community.general/issues/2734).
- - open-iscsi - adding support for mutual authentication between target and initiator
- (https://github.com/ansible-collections/community.general/pull/3422).
- - open_iscsi - add ``auto_portal_startup`` parameter to allow ``node.startup``
- setting per portal (https://github.com/ansible-collections/community.general/issues/2685).
- - open_iscsi - also consider ``portal`` and ``port`` to check if already logged
- in or not (https://github.com/ansible-collections/community.general/issues/2683).
- - open_iscsi - minor refactoring (https://github.com/ansible-collections/community.general/pull/3286).
- - opentelemetry callback plugin - added option ``enable_from_environment`` to
- support enabling the plugin only if the given environment variable exists
- and it is set to true (https://github.com/ansible-collections/community.general/pull/3498).
- - opentelemetry callback plugin - enriched the span attributes with HTTP metadata
- for those Ansible tasks that interact with third party systems (https://github.com/ansible-collections/community.general/pull/3448).
- - opentelemetry callback plugin - enriched the stacktrace information for loops
- with the ``message``, ``exception`` and ``stderr`` fields from the failed
- item in the tasks in addition to the name of the task and failed item (https://github.com/ansible-collections/community.general/pull/3599).
- - opentelemetry callback plugin - enriched the stacktrace information with the
- ``message``, ``exception`` and ``stderr`` fields from the failed task (https://github.com/ansible-collections/community.general/pull/3496).
- - opentelemetry callback plugin - transformed args in a list of span attributes
- in addition it redacted username and password from any URLs (https://github.com/ansible-collections/community.general/pull/3564).
- - openwrt_init - minor refactoring (https://github.com/ansible-collections/community.general/pull/3284).
- - opkg - allow ``name`` to be a YAML list of strings (https://github.com/ansible-collections/community.general/issues/572,
- https://github.com/ansible-collections/community.general/pull/3554).
- - pacman - add ``executable`` option to use an alternative pacman binary (https://github.com/ansible-collections/community.general/issues/2524).
- - pacman - speed up checking if the package is installed, when the latest version
- check is not needed (https://github.com/ansible-collections/community.general/pull/3606).
- - pamd - minor refactorings (https://github.com/ansible-collections/community.general/pull/3285).
- - passwordstore lookup - add option ``missing`` to choose what to do if the
- password file is missing (https://github.com/ansible-collections/community.general/pull/2500).
- - pids - refactor to add support for older ``psutil`` versions to the ``pattern``
- option (https://github.com/ansible-collections/community.general/pull/3315).
- - pipx - minor refactor on the ``changed`` logic (https://github.com/ansible-collections/community.general/pull/3647).
- - pkgin - in case of ``pkgin`` tool failue, display returned standard output
- ``stdout`` and standard error ``stderr`` to ease debugging (https://github.com/ansible-collections/community.general/issues/3146).
- - pkgng - ``annotation`` can now also be a YAML list (https://github.com/ansible-collections/community.general/pull/3526).
- - pkgng - packages being installed (or upgraded) are acted on in one command
- (per action) (https://github.com/ansible-collections/community.general/issues/2265).
- - pkgng - status message specifies number of packages installed and/or upgraded
- separately. Previously, all changes were reported as one count of packages
- "added" (https://github.com/ansible-collections/community.general/pull/3393).
- - proxmox inventory plugin - added snapshots to host facts (https://github.com/ansible-collections/community.general/pull/3044).
- - proxmox_group_info - minor refactor (https://github.com/ansible-collections/community.general/pull/2557).
- - proxmox_kvm - minor refactor (https://github.com/ansible-collections/community.general/pull/2557).
- - qubes connection - minor refactor to make lints and IDEs happy (https://github.com/ansible-collections/community.general/pull/2520).
- - rax_mon_notification_plan - fixed validation checks by specifying type ``str``
- as the ``elements`` of parameters ``ok_state``, ``warning_state`` and ``critical_state``
- (https://github.com/ansible-collections/community.general/pull/2955).
- - redfish_command - add ``boot_override_mode`` argument to BootSourceOverride
- commands (https://github.com/ansible-collections/community.general/issues/3134).
- - redfish_command and redfish_config and redfish_utils module utils - add parameter
- to strip etag of quotes before patch, since some vendors do not properly ``If-Match``
- etag with quotes (https://github.com/ansible-collections/community.general/pull/3296).
- - redfish_config - modified module exit to add warning variable (https://github.com/ansible-collections/community.general/issues/1995).
- - redfish_info - include ``Status`` property for Thermal objects when querying
- Thermal properties via ``GetChassisThermals`` command (https://github.com/ansible-collections/community.general/issues/3232).
- - redfish_utils module utils - modified set_bios_attributes function to skip
- invalid attribute instead of returning. Added skipped attributes to output
- (https://github.com/ansible-collections/community.general/issues/1995).
- - redhat_subscription - add ``server_prefix`` and ``server_port`` parameters
- (https://github.com/ansible-collections/community.general/pull/2779).
- - redis - allow to use the term ``replica`` instead of ``slave``, which has
- been the official Redis terminology since 2018 (https://github.com/ansible-collections/community.general/pull/2867).
- - rhevm - minor refactor (https://github.com/ansible-collections/community.general/pull/2557).
- - saltstack connection - minor refactor to make lints and IDEs happy (https://github.com/ansible-collections/community.general/pull/2520).
- - scaleway plugin inventory - parse scw-cli config file for ``oauth_token``
- (https://github.com/ansible-collections/community.general/pull/3250).
- - serverless - minor refactor (https://github.com/ansible-collections/community.general/pull/2557).
- - slack - minor refactoring (https://github.com/ansible-collections/community.general/pull/3205).
- - snap - added ``enabled`` and ``disabled`` states (https://github.com/ansible-collections/community.general/issues/1990).
- - snap - improved module error handling, especially for the case when snap server
- is down (https://github.com/ansible-collections/community.general/issues/2970).
- - splunk callback plugin - add ``batch`` option for user-configurable correlation
- ID's (https://github.com/ansible-collections/community.general/issues/2790).
- - spotinst_aws_elastigroup - elements of list parameters are now validated (https://github.com/ansible-collections/community.general/pull/2355).
- - ssh_config - new feature to set ``ForwardAgent`` option to ``yes`` or ``no``
- (https://github.com/ansible-collections/community.general/issues/2473).
- - stacki_host - minor refactoring (https://github.com/ansible-collections/community.general/pull/2681).
- - supervisorctl - add the possibility to restart all programs and program groups
- (https://github.com/ansible-collections/community.general/issues/3551).
- - supervisorctl - using standard Ansible mechanism to validate ``signalled``
- state required parameter (https://github.com/ansible-collections/community.general/pull/3068).
- - terraform - add ``check_destroy`` optional parameter to check for deletion
- of resources before it is applied (https://github.com/ansible-collections/community.general/pull/2874).
- - terraform - add ``parallelism`` parameter (https://github.com/ansible-collections/community.general/pull/3540).
- - terraform - add option ``overwrite_init`` to skip init if exists (https://github.com/ansible-collections/community.general/pull/2573).
- - terraform - minor refactor (https://github.com/ansible-collections/community.general/pull/2557).
- - timezone - print error message to debug instead of warning when timedatectl
- fails (https://github.com/ansible-collections/community.general/issues/1942).
- - tss lookup plugin - added ``token`` parameter for token authorization; ``username``
- and ``password`` are optional when ``token`` is provided (https://github.com/ansible-collections/community.general/pull/3327).
- - tss lookup plugin - added new parameter for domain authorization (https://github.com/ansible-collections/community.general/pull/3228).
- - tss lookup plugin - refactored to decouple the supporting third-party library
- (``python-tss-sdk``) (https://github.com/ansible-collections/community.general/pull/3252).
- - ufw - if ``delete=true`` and ``insert`` option is present, then ``insert``
- is now ignored rather than failing with a syntax error (https://github.com/ansible-collections/community.general/pull/3514).
- - vdo - minor refactoring of the code (https://github.com/ansible-collections/community.general/pull/3191).
- - zfs - added diff mode support (https://github.com/ansible-collections/community.general/pull/502).
- - zfs_delegate_admin - drop choices from permissions, allowing any permission
- supported by the underlying zfs commands (https://github.com/ansible-collections/community.general/pull/2540).
- - zone connection - minor refactor to make lints and IDEs happy (https://github.com/ansible-collections/community.general/pull/2520).
- - zpool_facts - minor refactoring (https://github.com/ansible-collections/community.general/pull/3332).
- - zypper - prefix zypper commands with ``/sbin/transactional-update --continue
- --drop-if-no-change --quiet run`` if transactional updates are detected (https://github.com/ansible-collections/community.general/issues/3159).
- release_summary: This is release 4.0.0 of ``community.general``, released on
- 2021-11-02.
- removed_features:
- - All inventory and vault scripts contained in community.general were moved
- to the `contrib-scripts GitHub repository `_
- (https://github.com/ansible-collections/community.general/pull/2696).
- - ModuleHelper module utils - remove fallback when value could not be determined
- for a parameter (https://github.com/ansible-collections/community.general/pull/3461).
- - Removed deprecated netapp module utils and doc fragments (https://github.com/ansible-collections/community.general/pull/3197).
- - The nios, nios_next_ip, nios_next_network lookup plugins, the nios documentation
- fragment, and the nios_host_record, nios_ptr_record, nios_mx_record, nios_fixed_address,
- nios_zone, nios_member, nios_a_record, nios_aaaa_record, nios_network, nios_dns_view,
- nios_txt_record, nios_naptr_record, nios_srv_record, nios_cname_record, nios_nsgroup,
- and nios_network_view module have been removed from community.general 4.0.0
- and were replaced by redirects to the `infoblox.nios_modules `_
- collection. Please install the ``infoblox.nios_modules`` collection to continue
- using these plugins and modules, and update your FQCNs (https://github.com/ansible-collections/community.general/pull/3592).
- - The vendored copy of ``ipaddress`` has been removed. Please use ``ipaddress``
- from the Python 3 standard library, or `from pypi `_.
- (https://github.com/ansible-collections/community.general/pull/2441).
- - cpanm - removed the deprecated ``system_lib`` option. Use Ansible's privilege
- escalation mechanism instead; the option basically used ``sudo`` (https://github.com/ansible-collections/community.general/pull/3461).
- - grove - removed the deprecated alias ``message`` of the ``message_content``
- option (https://github.com/ansible-collections/community.general/pull/3461).
- - proxmox - default value of ``proxmox_default_behavior`` changed to ``no_defaults``
- (https://github.com/ansible-collections/community.general/pull/3461).
- - proxmox_kvm - default value of ``proxmox_default_behavior`` changed to ``no_defaults``
- (https://github.com/ansible-collections/community.general/pull/3461).
- - runit - removed the deprecated ``dist`` option which was not used by the module
- (https://github.com/ansible-collections/community.general/pull/3461).
- - telegram - removed the deprecated ``msg``, ``msg_format`` and ``chat_id``
- options (https://github.com/ansible-collections/community.general/pull/3461).
- - xfconf - the default value of ``disable_facts`` changed to ``true``, and the
- value ``false`` is no longer allowed. Register the module results instead
- (https://github.com/ansible-collections/community.general/pull/3461).
- security_fixes:
- - nmcli - do not pass WiFi secrets on the ``nmcli`` command line. Use ``nmcli
- con edit`` instead and pass secrets as ``stdin`` (https://github.com/ansible-collections/community.general/issues/3145).
- fragments:
- - 1085-consul-acl-hcl-whitelist-update.yml
- - 1334-jenkins-plugin-fallback-urls.yaml
- - 1942_timezone.yml
- - 2045-bitbucket_support_basic_auth.yaml
- - 2126-consul_kv-pass-token.yml
- - 2284-influxdb_retention_policy-fix_duration_parsing.yml
- - 2323-groupby_as_dict-filter.yml
- - 2334-redfish_config-skip-incorrect-attributes.yml
- - 2337-mark-inventory-scripts-executable.yml
- - 2348-composer-no-interaction-option-discovery-to-avoid-hang.yaml
- - 2355-spotinst_aws_elastigroup-list-elements.yml
- - 2364-influxdb_user-first_user.yml
- - 2369-lvol_size_bug_fixes.yml
- - 2373-svr4pkg-fix-typeerror.yml
- - 2383-influxdb_retention_policy-add-state-option.yml
- - 2393-module_helper-breakdown.yml
- - 2407-puppet-change_stdout_to_console.yaml
- - 2409-nmcli_add_slave-type_bridge_to_nmcli_command_if_type_is_bridge-slave.yml
- - 2410-linode-improvements.yml
- - 2411-snap-revamp-enabled-disabled-states.yml
- - 2416-nmcli_compare_mac_addresses_case_insensitively.yml
- - 2417-nmcli_remove_dead_code.yml
- - 2430-linodev4-error-message.yml
- - 2435-one_vm-fix_missing_keys.yml
- - 2448-stackpath_compute-fix.yml
- - 2450-gitlab_user-add_expires_at_option.yaml
- - 2454-detect_zfs_changed.yml
- - 2461-ovirt4-fix-configparser.yml
- - 2472_filesystem_module_revamp.yml
- - 2485-java_keystore-ssl_backend-parameter.yml
- - 2499-influxdb_user-fix-multiple-no-privileges.yml
- - 2500-passwordstore-add_option_ignore_missing.yml
- - 2510-jenkins_plugin_use_post_method.yml
- - 2514-mh-improved-changed.yml
- - 2516_fix_2515_keystore_type_jks.yml
- - 2517-cmd-params-from-vars.yml
- - 2518-nmap-fix-cache-disabled.yml
- - 2520-connection-refactors.yml
- - 2521-flatpak-list.yml
- - 2524-pacman_add_bin_option.yml
- - 2525-iptables_state-fix-initialization-command.yml
- - 2526-java_keystore-password-via-stdin.yml
- - 2540-zfs-delegate-choices.yml
- - 2556-add-comment_visibility-parameter-for-comment-operation-of-jira-module.yml
- - 2557-cloud-misc-refactor.yml
- - 2560-java_cert-pkcs12-alias-bugfix.yml
- - 2564-mh-cmd-process-output.yml
- - 2568-ssh_config-reduce-stormssh-searches-based-on-host.yml
- - 2571-rhsm_release-fix-release_matcher.yaml
- - 2573-terraform-overwrite-init.yml
- - 2578-ini-file-utf8-bom.yml
- - 2579-redis-cache-ipv6.yml
- - 2590-netcup_dns-exception-no-message-attr.yml
- - 2614-influxdb_user-fix-issue-introduced-in-PR#2499.yml
- - 2616-archive-exclusion_patterns-option.yml
- - 2632-cleanup.yml
- - 2634-terraform-switch-workspace.yml
- - 2635-nmcli-add-ignore-auto-arguments.yml
- - 2648-proxmox_kvm-fix-vmid-return-value.yml
- - 2650-composer-add_composer_executable.yml
- - 2661-maven_artifact-add-sha1-option.yml
- - 2671-fix-broken-query-of-async_status-result.yml
- - 2681-stacki-host-bugfix.yml
- - 2684-open_iscsi-single-target-multiple-portal-overrides.yml
- - 2691-gitlab_user-support-identity-provider.yml
- - 2692-logstash-callback-plugin-replacing_options.yml
- - 2711-fix-iptables_state-2700-async_status-call.yml
- - 2722-zypper_repository-fix_idempotency_on_adding_repo_with_releasever.yml
- - 273-add_multiple_options_with_same_name_to_ini_file.yml
- - 2731-mh-cmd-locale.yml
- - 2732-nmcli_add_options.yml
- - 2735-onepassword-add_domain_option.yml
- - 2751-flatpak-no_dependencies.yml
- - 2771-scaleway_inventory_json_accept_byte_array.yml
- - 2774-datadog_event_api_parameter.yml
- - 2779_redhat_subscription-add_server_prefix_and_server_port.yml
- - 2787-yum_versionlock-fix_idempotency_when_using_wildcard.yml
- - 2790-callback_splunk-batch-option.yml
- - 2808-pids-older-psutil.yml
- - 2816-archive-refactor.yml
- - 2821-ipa_sudorule.yml
- - 2824-gitlab_project-project-under-user.yml
- - 2827-nmcli_fix_team_slave.yml
- - 2830-npm-version-update.yml
- - 2841-proxmox_kvm_zfs_devstr.yml
- - 2843-modprobe-failure-conditions.yml
- - 2844-ali_instance_info-deprecate-params.yml
- - 2845-serverless-deprecate-functions-param.yml
- - 2850-jenkins_build-support-stop-jenkins-build.yml
- - 2867-redis-terminology.yml
- - 2874-terraform-check-destroy.yml
- - 2875-ini_file-unicode.yml
- - 2878-validate-certs-bool.yml
- - 2881-gitlab_project-fix_workspace_user.yaml
- - 2883-_mount-fixed-sanity-checks.yml
- - 2901-nmcli_teaming.yml
- - 2902-filesystem_extend_freebsd_support.yml
- - 2904-fix-bug-when-2-identical-executions-in-same-auth-flow.yml
- - 2912-snap-module-helper.yml
- - 2913-archive-dest_state.yml
- - 2918-snap-param-order.yml
- - 2922-mh-cmd-output-feature-flag.yml
- - 2923-archive-remove-bugfix.yml
- - 2924-npm-fix-package-json.yml
- - 2935-lvol-support_check_mode_thinpool.yml
- - 2936-pacman-fix_changed_status_when_ignorepkg_has_been_defined.yml
- - 2946-python-dnsimple-v2-rewrite.yml
- - 2948-jenkins_job_info-remove_necessities_on_password_or_token.yml
- - 2949-add_authentication-flow-binding_keycloak-client.yml
- - 2951-mh-vars-deepcopy.yml
- - 2955-rax_mon_notification_plan-added-elements-to-list-params.yaml
- - 2958-datadog_monitor_support_composites.yml
- - 2960-launchd-validation-check.yaml
- - 2963-improve-diff-mode-on-keycloak_authentication.yml
- - 2967-proxmox_inventory-offline-node-fix.yml
- - 2987-archive-stage-idempotency-fix.yml
- - 2989-pamd-single-line.yaml
- - 3001-enhance_gitlab_module.yml
- - 3006-redfish_command-bootoverride-argument-check.yaml
- - 3028-snap-channel.yml
- - 3034-promox-kvm-return-new-id.yaml
- - 3036-archive-root-path-fix.yml
- - 3038-enhance_github_repo_api_url.yml
- - 3041-fix_gitlab_group_members_gitlab_project_mambers.yml
- - 3041-gitlab_x_members_fix_and_enhancement.yml
- - 3044-proxmox-inventory-snapshots.yml
- - 3049-xfconf-deprecate-get.yaml
- - 3052_proxmox_inventory_plugin.yml
- - 3067-taiga-bugfix.yaml
- - 3068-supervisorctl-bugfix.yaml
- - 3074-ini_file-3031-empty-value-inconsistency.yml
- - 3075-archive-idempotency-enhancements.yml
- - 3079-report-power-state-hpilo.yaml
- - 3080-java_cert-2460-import_private_key.yml
- - 3081-add-wifi-option-to-nmcli-module.yml
- - 3084-info-checkmode.yaml
- - 3092-gunicorn-refactor.yaml
- - 3093-ejabberd_user-refactor.yaml
- - 3098-django_manage-cmd-list.yaml
- - 3104-deploy_helper-required_if.yaml
- - 3106-apache2_module-review.yaml
- - 3125-hana-query-userstore.yaml
- - 3132-nmcli-dummy.yaml
- - 3135-add-redfish_command-bootoverridemode.yaml
- - 3136-add-wifi-sec-change-detection-to-nmcli-module.yml
- - 3139-tss-lookup-plugin-update-to-make-compatible-with-sdk-v1.yml
- - 3141-disallow-options-unsupported-by-nmcli.yml
- - 3160-pass-wifi-secrets-via-stdin-to-nmcli-module.yml
- - 3161-openbsd-pkg-fix-regexp-matching-crash.yml
- - 3164-zypper-support-transactional-updates.yaml
- - 3178-add-ipaselinuxusermaporder-to-ipa-config-module.yml
- - 3191-vdo-refactor.yml
- - 3194-sanity.yml
- - 3199-tss-lookup-plugin-bugfix-for-backwards-compatibility.yml
- - 3203-linode-inventory-return-full-api-ip-data.yml
- - 3205-slack-minor-refactor.yaml
- - 3206-mh-classmethod.yaml
- - 3211-snap-error-handling.yml
- - 3228-tss-domain-authorization.yml
- - 3231-fix-keycloak-realm-events.yml
- - 3233-include-thermal-sensor-status-via-redfish_info.yaml
- - 3237-copr-fix_chroot_naming.yml
- - 3239-nmcli-sit-ipip-config-bugfix.yaml
- - 3247-retry_servfail-for-dig.yaml
- - 3248-adds-few-more-gitlab-group-options.yml
- - 3250-parse-scw-config.yml
- - 3252-tss_lookup_plugin-refactor.yml
- - 3256-fix-ptr-handling-in-udm_dns_record.yml
- - 3258-apache2_module.yml
- - 3262-nmcli-add-gre-tunnel-support.yaml
- - 3266-vmid-existing-target-clone.yml
- - 3267-dnsimple1-deprecation.yml
- - 3280-keycloak-module-cleanup-and-consistency.yml
- - 3283-django_manage-fix-command-splitting.yaml
- - 3284-openwrt_init-improvements.yaml
- - 3285-pamd-updated-with-empty-args.yaml
- - 3286-open_iscsi-improvements.yaml
- - 3290-mh-cmd-boolean-not.yaml
- - 3296-clean-etag.yaml
- - 3313-nmcli-add_gsm_support.yml
- - 3315-pids-refactor.yml
- - 3319-gitlab_project_members_enhancement.yml
- - 3327-tss-token-authorization.yml
- - 3328-interfaces_file-improvements.yaml
- - 3329-kernel_blacklist-improvements.yaml
- - 3330-bugfix-keycloak-authentication-flow-requirements-not-set-correctly.yml.yml
- - 3331-do_not_ignore_volatile_configs_by_option.yml
- - 3332-zpool_facts-pythonify.yaml
- - 3334-django_manage-split-params.yaml
- - 3336-openbsd_pkg-fix-KeyError.yml
- - 3337-linode-fix.yml
- - 3343-redfish_utils-addUser-userId.yml
- - 3359-add-unicode_normalize-filter.yml
- - 3367-add-require_two_factor_authentication-property-to-gitlab-group.yml
- - 3379-gitlab_project-ci_cd_properties.yml
- - 3393-pkgng-many_packages_one_command.yml
- - 3400-fix-gitLab-api-searches-always-return-first-found-match-3386.yml
- - 3401-nmcli-needs-type.yml
- - 3404-redfish_utils-skip-manager-network-check.yml
- - 3422-open-iscsi-mutual-authentication-support.yaml
- - 3425-mail_add_configurable_ehlo_hostname.yml
- - 3426-copy-permissions-along-with-file-for-jboss-module.yml
- - 3429-enable_deprecaded_message_for_ignore_volatile_option.yml
- - 3450-callback_opentelemetry-exception_handling.yml
- - 3451-gitlab-group-member-deprecate-name-and-path.yml
- - 3453-fix-gitlab_group-require_two_factor_authentication-cant_be_null.yml
- - 3461-remove-deprecations-for-4.0.0.yml
- - 3473-gitlab_deploy_key-fix_idempotency.yml
- - 3474-zypper_repository_improve_repo_file_idempotency.yml
- - 3478-yaml-callback.yml
- - 3495-ssh_config_add_forwardagent_option.yml
- - 3496-callback_opentelemetry-enrich_stacktraces.yml
- - 3498-callback_opentelemetry-only_in_ci.yml
- - 3500-macports-add-stdout-and-stderr-to-status.yaml
- - 3509-redfish_utils-SetOneTimeBoot-mode-fix.yml
- - 3514-ufw_insert_or_delete_biased_when_deletion_enabled.yml
- - 3526-pkgng-add-integration-tests.yml
- - 3536-quote-role-name-in-url.yml
- - 3538-fix-keycloak-idp-mappers-change-detection.yml
- - 3540-terraform_add_parallelism_parameter.yml
- - 3545-ipa_group-add-append-option.yml
- - 3551-supervisor-all.yml
- - 3554-opkg-name.yml
- - 3556-callback_elastic-enrich_stacktraces.yml
- - 3558-callback_opentelemetry-enrich_service_map.yml
- - 3561-fix-ipa-host-var-detection.yml
- - 3563-nmcli-ipv6_dns.yaml
- - 3564-callback_opentelemetry-redacted_user_pass_from_url_args.yml
- - 3583-fix-pkgin-exception.yml
- - 3599-callback_opentelemetry-enriched_errors_in_loops.yml
- - 3602-fix-gitlab_project_members-improve-search-method.yml
- - 3606-pacman-speed-up-check-if-package-is-installed.yml
- - 3610-fix-keycloak-client-diff-bugs-when-sorting.yml
- - 3611-pipx-fix-inject.yml
- - 3622-fix-gitlab-deploy-key-check-mode.yml
- - 3626-fix-one_image-error.yml
- - 3634-pipx-improve-changed.yaml
- - 3648-mh-cmd-publish-cmd.yaml
- - 3649-proxmox_group_info_TypeError.yml
- - 3655-use-publish_cmd.yaml
- - 4.0.0.yml
- - 502-zfs_bugfix_and_diff_mode_support.yaml
- - 634-gitlab_project_runners.yaml
- - a_module-test.yml
- - ansible-core-_text.yml
- - gem_module_add_bindir_option.yml
- - ipaddress.yml
- - json_query_more_types.yml
- - keycloak-realm-no-log-password-reset.yml
- - keycloak_realm_ssl_required.yml
- - netapp-removal.yml
- - nios-removal.yml
- - pkgin-output-after-error.yml
- - remove-scripts.yml
- modules:
- - description: Install Ansible roles or collections using ansible-galaxy
- name: ansible_galaxy_install
- namespace: packaging.language
- - description: Send Discord messages
- name: discord
- namespace: notification
- - description: Locks package versions in C(dnf) based systems
- name: dnf_versionlock
- namespace: packaging.os
- - description: (un)Marking existing branches for protection
- name: gitlab_protected_branch
- namespace: source_control.gitlab
- - description: Execute SQL on HANA
- name: hana_query
- namespace: database.saphana
- - description: Configure authentication in Keycloak
- name: keycloak_authentication
- namespace: identity.keycloak
- - description: Allows administration of Keycloak client_rolemapping with the Keycloak
- API
- name: keycloak_client_rolemapping
- namespace: identity.keycloak
- - description: Allows administration of Keycloak client_scopes via Keycloak API
- name: keycloak_clientscope
- namespace: identity.keycloak
- - description: Allows administration of Keycloak identity providers via Keycloak
- API
- name: keycloak_identity_provider
- namespace: identity.keycloak
- - description: Allows administration of Keycloak roles via Keycloak API
- name: keycloak_role
- namespace: identity.keycloak
- - description: Allows administration of Keycloak user federations via Keycloak
- API
- name: keycloak_user_federation
- namespace: identity.keycloak
- - description: Execute SQL scripts on a MSSQL database
- name: mssql_script
- namespace: database.mssql
- - description: Manage pacman's list of trusted keys
- name: pacman_key
- namespace: packaging.os
- - description: Manages applications installed with pipx
- name: pipx
- namespace: packaging.language
- - description: Management of a NIC of a Qemu(KVM) VM in a Proxmox VE cluster.
- name: proxmox_nic
- namespace: cloud.misc
- - description: Retrieve information about one or more Proxmox VE tasks
- name: proxmox_tasks_info
- namespace: cloud.misc
- - description: Set key value pairs in Redis
- name: redis_data
- namespace: database.misc
- - description: Increment keys in Redis
- name: redis_data_incr
- namespace: database.misc
- - description: Get value of key in Redis database
- name: redis_data_info
- namespace: database.misc
- - description: Query executions for a Rundeck job
- name: rundeck_job_executions_info
- namespace: web_infrastructure
- - description: Run a Rundeck job
- name: rundeck_job_run
- namespace: web_infrastructure
- - description: Perform SAP Task list execution
- name: sap_task_list_execute
- namespace: system
- - description: Manages SAP SAPCAR archives
- name: sapcar_extract
- namespace: files
- - description: Manages snap aliases
- name: snap_alias
- namespace: packaging.os
- - description: Retrieve XFCE4 configurations
- name: xfconf_info
- namespace: system
- plugins:
- callback:
- - description: Create distributed traces for each Ansible task in Elastic APM
- name: elastic
- namespace: null
- - description: Create distributed traces with OpenTelemetry
- name: opentelemetry
- namespace: null
- filter:
- - description: Transform a sequence of dictionaries to a dictionary where the
- dictionaries are indexed by an attribute
- name: groupby_as_dict
- namespace: null
- - description: Normalizes unicode strings to facilitate comparison of characters
- with normalized forms
- name: unicode_normalize
- namespace: null
- inventory:
- - description: Icinga2 inventory source
- name: icinga2
- namespace: null
- - description: OpenNebula inventory source
- name: opennebula
- namespace: null
- lookup:
- - description: Retrieves the version of an installed collection
- name: collection_version
- namespace: null
- - description: Composes a list with nested elements of other lists or dicts
- which can depend on previous loop variables
- name: dependent
- namespace: null
- - description: Generates random pet names
- name: random_pet
- namespace: null
- - description: Generates random string
- name: random_string
- namespace: null
- - description: Return a number of random words
- name: random_words
- namespace: null
- test:
- - description: Check whether the given string refers to an available module
- or action plugin
- name: a_module
- namespace: null
- release_date: '2021-11-02'
- 4.0.1:
- changes:
- bugfixes:
- - a_module test plugin - fix crash when testing a module name that was tombstoned
- (https://github.com/ansible-collections/community.general/pull/3660).
- - xattr - fix exception caused by ``_run_xattr()`` raising a ``ValueError``
- due to a mishandling of base64-encoded value (https://github.com/ansible-collections/community.general/issues/3673).
- release_summary: Bugfix release for today's Ansible 5.0.0 beta 1.
- fragments:
- - 3660-a_module-tombstone.yml
- - 3675-xattr-handle-base64-values.yml
- - 4.0.1.yml
- release_date: '2021-11-09'
- 4.0.2:
- changes:
- bugfixes:
- - counter_enabled callback plugin - fix output to correctly display host and
- task counters in serial mode (https://github.com/ansible-collections/community.general/pull/3709).
- - ldap_search - allow it to be used even in check mode (https://github.com/ansible-collections/community.general/issues/3619).
- - lvol - allows logical volumes to be created with certain size arguments prefixed
- with ``+`` to preserve behavior of older versions of this module (https://github.com/ansible-collections/community.general/issues/3665).
- - nmcli - fixed falsely reported changed status when ``mtu`` is omitted with
- ``dummy`` connections (https://github.com/ansible-collections/community.general/issues/3612,
- https://github.com/ansible-collections/community.general/pull/3625).
- deprecated_features:
- - Support for Ansible 2.9 and ansible-base 2.10 is deprecated, and will be removed
- in the next major release (community.general 5.0.0) next spring. While most
- content will probably still work with ansible-base 2.10, we will remove symbolic
- links for modules and action plugins, which will make it impossible to use
- them with Ansible 2.9 anymore. Please use community.general 4.x.y with Ansible
- 2.9 and ansible-base 2.10, as these releases will continue to support Ansible
- 2.9 and ansible-base 2.10 even after they are End of Life (https://github.com/ansible-community/community-topics/issues/50,
- https://github.com/ansible-collections/community.general/pull/3723).
- release_summary: Bugfix release for today's Ansible 5.0.0 beta 2.
- fragments:
- - 3625-nmcli_false_changed_mtu_fix.yml
- - 3667-ldap_search.yml
- - 3681-lvol-fix-create.yml
- - 3709-support-batch-mode.yml
- - 4.0.2.yml
- - deprecate-ansible-2.9-2.10.yml
- release_date: '2021-11-16'
- 4.1.0:
- changes:
- bugfixes:
- - github_repo - ``private`` and ``description`` attributes should not be set
- to default values when the repo already exists (https://github.com/ansible-collections/community.general/pull/2386).
- - terraform - fix command options being ignored during planned/plan in function
- ``build_plan`` such as ``lock`` or ``lock_timeout`` (https://github.com/ansible-collections/community.general/issues/3707,
- https://github.com/ansible-collections/community.general/pull/3726).
- minor_changes:
- - gitlab - clean up modules and utils (https://github.com/ansible-collections/community.general/pull/3694).
- - ipmi_boot - add support for user-specified IPMI encryption key (https://github.com/ansible-collections/community.general/issues/3698).
- - ipmi_power - add support for user-specified IPMI encryption key (https://github.com/ansible-collections/community.general/issues/3698).
- - listen_ports_facts - add support for ``ss`` command besides ``netstat`` (https://github.com/ansible-collections/community.general/pull/3708).
- - lxd_container - adds ``type`` option which also allows to operate on virtual
- machines and not just containers (https://github.com/ansible-collections/community.general/pull/3661).
- - nmcli - add multiple addresses support for ``ip4`` parameter (https://github.com/ansible-collections/community.general/issues/1088,
- https://github.com/ansible-collections/community.general/pull/3738).
- - open_iscsi - extended module to allow rescanning of established session for
- one or all targets (https://github.com/ansible-collections/community.general/issues/3763).
- - pacman - add ``stdout`` and ``stderr`` as return values (https://github.com/ansible-collections/community.general/pull/3758).
- - redfish_command - add ``GetHostInterfaces`` command to enable reporting Redfish
- Host Interface information (https://github.com/ansible-collections/community.general/issues/3693).
- - redfish_command - add ``SetHostInterface`` command to enable configuring the
- Redfish Host Interface (https://github.com/ansible-collections/community.general/issues/3632).
- release_summary: Regular bugfix and feature release.
- fragments:
- - 1088-nmcli_add_multiple_addresses_support.yml
- - 2386-github_repo-fix-idempotency-issues.yml
- - 3632-add-redfish-host-interface-config-support.yml
- - 3661-lxd_container-add-vm-support.yml
- - 3693-add-redfish-host-interface-info-support.yml
- - 3694-gitlab-cleanup.yml
- - 3702-ipmi-encryption-key.yml
- - 3708-listen_ports_facts-add-ss-support.yml
- - 3726-terraform-missing-parameters-planned-fix.yml
- - 3758-pacman-add-stdout-stderr.yml
- - 3765-extend-open_iscsi-with-rescan.yml
- - 4.1.0.yml
- plugins:
- inventory:
- - description: Xen Orchestra inventory source
- name: xen_orchestra
- namespace: null
- lookup:
- - description: Get secrets from RevBits PAM server
- name: revbitspss
- namespace: null
- release_date: '2021-11-23'
- 4.2.0:
- changes:
- bugfixes:
- - icinga2 inventory plugin - handle 404 error when filter produces no results
- (https://github.com/ansible-collections/community.general/issues/3875, https://github.com/ansible-collections/community.general/pull/3906).
- - interfaces_file - fixed the check for existing option in interface (https://github.com/ansible-collections/community.general/issues/3841).
- - jira - fixed bug where module returns error related to dictionary key ``body``
- (https://github.com/ansible-collections/community.general/issues/3419).
- - nmcli - fix returning "changed" when no mask set for IPv4 or IPv6 addresses
- on task rerun (https://github.com/ansible-collections/community.general/issues/3768).
- - nmcli - pass ``flags``, ``ingress``, ``egress`` params to ``nmcli`` (https://github.com/ansible-collections/community.general/issues/1086).
- - nrdp callback plugin - fix error ``string arguments without an encoding``
- (https://github.com/ansible-collections/community.general/issues/3903).
- - opentelemetry_plugin - honour ``ignore_errors`` when a task has failed instead
- of reporting an error (https://github.com/ansible-collections/community.general/pull/3837).
- - pipx - passes the correct command line option ``--include-apps`` (https://github.com/ansible-collections/community.general/issues/3791).
- - proxmox - fixed ``onboot`` parameter causing module failures when undefined
- (https://github.com/ansible-collections/community.general/issues/3844).
- - python_requirements_info - fails if version operator used without version
- (https://github.com/ansible-collections/community.general/pull/3785).
- deprecated_features:
- - module_helper module utils - deprecated the attribute ``ModuleHelper.VarDict``
- (https://github.com/ansible-collections/community.general/pull/3801).
- minor_changes:
- - aix_filesystem - calling ``run_command`` with arguments as ``list`` instead
- of ``str`` (https://github.com/ansible-collections/community.general/pull/3833).
- - aix_lvg - calling ``run_command`` with arguments as ``list`` instead of ``str``
- (https://github.com/ansible-collections/community.general/pull/3834).
- - gitlab - add more token authentication support with the new options ``api_oauth_token``
- and ``api_job_token`` (https://github.com/ansible-collections/community.general/issues/705).
- - gitlab_group, gitlab_project - add new option ``avatar_path`` (https://github.com/ansible-collections/community.general/pull/3792).
- - gitlab_project - add new option ``default_branch`` to gitlab_project (if ``readme
- = true``) (https://github.com/ansible-collections/community.general/pull/3792).
- - hponcfg - revamped module using ModuleHelper (https://github.com/ansible-collections/community.general/pull/3840).
- - icinga2 inventory plugin - added the ``display_name`` field to variables (https://github.com/ansible-collections/community.general/issues/3875,
- https://github.com/ansible-collections/community.general/pull/3906).
- - icinga2 inventory plugin - inventory object names are changable using ``inventory_attr``
- in your config file to the host object name, address, or display_name fields
- (https://github.com/ansible-collections/community.general/issues/3875, https://github.com/ansible-collections/community.general/pull/3906).
- - ip_netns - calling ``run_command`` with arguments as ``list`` instead of ``str``
- (https://github.com/ansible-collections/community.general/pull/3822).
- - iso_extract - calling ``run_command`` with arguments as ``list`` instead of
- ``str`` (https://github.com/ansible-collections/community.general/pull/3805).
- - java_cert - calling ``run_command`` with arguments as ``list`` instead of
- ``str`` (https://github.com/ansible-collections/community.general/pull/3835).
- - jira - add support for Bearer token auth (https://github.com/ansible-collections/community.general/pull/3838).
- - keycloak_user_federation - add sssd user federation support (https://github.com/ansible-collections/community.general/issues/3767).
- - logentries - calling ``run_command`` with arguments as ``list`` instead of
- ``str`` (https://github.com/ansible-collections/community.general/pull/3807).
- - logstash_plugin - calling ``run_command`` with arguments as ``list`` instead
- of ``str`` (https://github.com/ansible-collections/community.general/pull/3808).
- - lxc_container - calling ``run_command`` with arguments as ``list`` instead
- of ``str`` (https://github.com/ansible-collections/community.general/pull/3851).
- - lxd connection plugin - make sure that ``ansible_lxd_host``, ``ansible_executable``,
- and ``ansible_lxd_executable`` work (https://github.com/ansible-collections/community.general/pull/3798).
- - lxd inventory plugin - support virtual machines (https://github.com/ansible-collections/community.general/pull/3519).
- - module_helper module utils - added decorators ``check_mode_skip`` and ``check_mode_skip_returns``
- for skipping methods when ``check_mode=True`` (https://github.com/ansible-collections/community.general/pull/3849).
- - monit - calling ``run_command`` with arguments as ``list`` instead of ``str``
- (https://github.com/ansible-collections/community.general/pull/3821).
- - nmcli - add multiple addresses support for ``ip6`` parameter (https://github.com/ansible-collections/community.general/issues/1088).
- - nmcli - add support for ``eui64`` and ``ipv6privacy`` parameters (https://github.com/ansible-collections/community.general/issues/3357).
- - python_requirements_info - returns python version broken down into its components,
- and some minor refactoring (https://github.com/ansible-collections/community.general/pull/3797).
- - svc - calling ``run_command`` with arguments as ``list`` instead of ``str``
- (https://github.com/ansible-collections/community.general/pull/3829).
- - xattr - calling ``run_command`` with arguments as ``list`` instead of ``str``
- (https://github.com/ansible-collections/community.general/pull/3806).
- - xfconf - minor refactor on the base class for the module (https://github.com/ansible-collections/community.general/pull/3919).
- release_summary: Regular bugfix and feature release.
- fragments:
- - 1088-add_multiple_ipv6_address_support.yml
- - 3357-nmcli-eui64-and-ipv6privacy.yml
- - 3519-inventory-support-lxd-4.yml
- - 3768-nmcli_fix_changed_when_no_mask_set.yml
- - 3780-add-keycloak-sssd-user-federation.yml
- - 3785-python_requirements_info-versionless-op.yaml
- - 3792-improve_gitlab_group_and_project.yml
- - 3797-python_requirements_info-improvements.yaml
- - 3798-fix-lxd-connection-option-vars-support.yml
- - 3800-pipx-include-apps.yaml
- - 3801-mh-deprecate-vardict-attr.yaml
- - 3805-iso_extract-run_command-list.yaml
- - 3806-xattr-run_command-list.yaml
- - 3807-logentries-run_command-list.yaml
- - 3808-logstash_plugin-run_command-list.yaml
- - 3821-monit-run-list.yaml
- - 3822-ip_netns-run-list.yaml
- - 3829-svc-run-list.yaml
- - 3833-aix_filesystem-run-list.yaml
- - 3834-aix-lvg-run-list.yaml
- - 3835-java-cert-run-list.yaml
- - 3837-opentelemetry_plugin-honour_ignore_errors.yaml
- - 3838-jira-token.yaml
- - 3840-hponcfg-mh-revamp.yaml
- - 3849-mh-check-mode-decos.yaml
- - 3851-lxc-container-run-list.yaml
- - 3862-interfaces-file-fix-dup-option.yaml
- - 3867-jira-fix-body.yaml
- - 3874-proxmox-fix-onboot-param.yml
- - 3875-icinga2-inv-fix.yml
- - 3896-nmcli_vlan_missing_options.yaml
- - 3909-nrdp_fix_string_args_without_encoding.yaml
- - 3919-xfconf-baseclass.yaml
- - 4.2.0.yml
- - 705-gitlab-auth-support.yml
- modules:
- - description: Pull basic info from DNSimple API
- name: dnsimple_info
- namespace: net_tools
- - description: Create or delete a branch
- name: gitlab_branch
- namespace: source_control.gitlab
- - description: Sets or updates configuration attributes on HPE iLO with Redfish
- OEM extensions
- name: ilo_redfish_config
- namespace: remote_management.redfish
- - description: Gathers server information through iLO using Redfish APIs
- name: ilo_redfish_info
- namespace: remote_management.redfish
- release_date: '2021-12-21'
- 4.3.0:
- changes:
- bugfixes:
- - Various modules and plugins - use vendored version of ``distutils.version``
- instead of the deprecated Python standard library ``distutils`` (https://github.com/ansible-collections/community.general/pull/3936).
- - alternatives - fix output parsing for alternatives groups (https://github.com/ansible-collections/community.general/pull/3976).
- - jail connection plugin - replace deprecated ``distutils.spawn.find_executable``
- with Ansible's ``get_bin_path`` to find the executable (https://github.com/ansible-collections/community.general/pull/3934).
- - lxd connection plugin - replace deprecated ``distutils.spawn.find_executable``
- with Ansible's ``get_bin_path`` to find the ``lxc`` executable (https://github.com/ansible-collections/community.general/pull/3934).
- - passwordstore lookup plugin - replace deprecated ``distutils.util.strtobool``
- with Ansible's ``convert_bool.boolean`` to interpret values for the ``create``,
- ``returnall``, ``overwrite``, 'backup``, and ``nosymbols`` options (https://github.com/ansible-collections/community.general/pull/3934).
- - say callback plugin - replace deprecated ``distutils.spawn.find_executable``
- with Ansible's ``get_bin_path`` to find the ``say`` resp. ``espeak`` executables
- (https://github.com/ansible-collections/community.general/pull/3934).
- - scaleway_user_data - fix double-quote added where no double-quote is needed
- to user data in scaleway's server (``Content-type`` -> ``Content-Type``) (https://github.com/ansible-collections/community.general/pull/3940).
- - slack - add ``charset`` to HTTP headers to avoid Slack API warning (https://github.com/ansible-collections/community.general/issues/3932).
- - zone connection plugin - replace deprecated ``distutils.spawn.find_executable``
- with Ansible's ``get_bin_path`` to find the executable (https://github.com/ansible-collections/community.general/pull/3934).
- minor_changes:
- - ipa_dnszone - ``dynamicupdate`` is now a boolean parameter, instead of a string
- parameter accepting ``"true"`` and ``"false"``. Also the module is now idempotent
- with respect to ``dynamicupdate`` (https://github.com/ansible-collections/community.general/pull/3374).
- - ipa_dnszone - add DNS zone synchronization support (https://github.com/ansible-collections/community.general/pull/3374).
- - ipmi_power - add ``machine`` option to ensure the power state via the remote
- target address (https://github.com/ansible-collections/community.general/pull/3968).
- - mattermost - add the possibility to send attachments instead of text messages
- (https://github.com/ansible-collections/community.general/pull/3946).
- - nmcli - add ``wireguard`` connection type (https://github.com/ansible-collections/community.general/pull/3985).
- - proxmox - add ``clone`` parameter (https://github.com/ansible-collections/community.general/pull/3930).
- - puppet - remove deprecation for ``show_diff`` parameter. Its alias ``show-diff``
- is still deprecated and will be removed in community.general 7.0.0 (https://github.com/ansible-collections/community.general/pull/3980).
- - scaleway_compute - add possibility to use project identifier (new ``project``
- option) instead of deprecated organization identifier (https://github.com/ansible-collections/community.general/pull/3951).
- - scaleway_volume - all volumes are systematically created on par1 (https://github.com/ansible-collections/community.general/pull/3964).
- release_summary: Regular feature and bugfix release.
- fragments:
- - 3374-add-ipa-ptr-sync-support.yml
- - 3921-add-counter-filter-plugin.yml
- - 3930-proxmox-add-clone.yaml
- - 3933-slack-charset-header.yaml
- - 3934-distutils.yml
- - 3936-distutils.version.yml
- - 3940_fix_contenttype_scaleway_user_data.yml
- - 3946-mattermost_attachments.yml
- - 3951-scaleway_compute_add_project_id.yml
- - 3964-scaleway_volume_add_region.yml
- - 3968-ipmi_power-add-machine-option.yaml
- - 3976-fix-alternatives-parsing.yml
- - 3980-puppet-show_diff.yml
- - 3985-nmcli-add-wireguard-connection-type.yml
- - 4.3.0.yml
- modules:
- - description: Manage Rust packages with cargo
- name: cargo
- namespace: packaging.language
- - description: Allows obtaining Keycloak realm public information via Keycloak
- API
- name: keycloak_realm_info
- namespace: identity.keycloak
- - description: Manage sudoers files
- name: sudoers
- namespace: system
- plugins:
- filter:
- - description: Counts hashable elements in a sequence
- name: counter
- namespace: null
- release_date: '2022-01-11'
- 4.4.0:
- changes:
- bugfixes:
- - cargo - fix detection of outdated packages when ``state=latest`` (https://github.com/ansible-collections/community.general/pull/4052).
- - cargo - fix incorrectly reported changed status for packages with a name containing
- a hyphen (https://github.com/ansible-collections/community.general/issues/4044,
- https://github.com/ansible-collections/community.general/pull/4052).
- - gitlab_project_variable - add missing documentation about GitLab versions
- that support ``environment_scope`` and ``variable_type`` (https://github.com/ansible-collections/community.general/issues/4038).
- - 'gitlab_project_variable - allow to set same variable name under different
- environment scopes. Due this change, the return value ``project_variable``
- differs from previous version in check mode. It was counting ``updated`` values,
- because it was accidentally overwriting environment scopes (https://github.com/ansible-collections/community.general/issues/4038).
-
- '
- - gitlab_project_variable - fix idempotent change behaviour for float and integer
- variables (https://github.com/ansible-collections/community.general/issues/4038).
- - gitlab_runner - use correct API endpoint to create and retrieve project level
- runners when using ``project`` (https://github.com/ansible-collections/community.general/pull/3965).
- - listen_ports_facts - local port regex was not handling well IPv6 only binding.
- Fixes the regex for ``ss`` (https://github.com/ansible-collections/community.general/pull/4092).
- - mail callback plugin - fix crash on Python 3 (https://github.com/ansible-collections/community.general/issues/4025,
- https://github.com/ansible-collections/community.general/pull/4026).
- - 'opentelemetry - fix generating a trace with a task containing ``no_log: true``
- (https://github.com/ansible-collections/community.general/pull/4043).'
- - python_requirements_info - store ``mismatched`` return values per package
- as documented in the module (https://github.com/ansible-collections/community.general/pull/4078).
- - yarn - fix incorrect handling of ``yarn list`` and ``yarn global list`` output
- that could result in fatal error (https://github.com/ansible-collections/community.general/pull/4050).
- - yarn - fix incorrectly reported status when installing a package globally
- (https://github.com/ansible-collections/community.general/issues/4045, https://github.com/ansible-collections/community.general/pull/4050).
- - yarn - fix missing ``~`` expansion in yarn global install folder which resulted
- in incorrect task status (https://github.com/ansible-collections/community.general/issues/4045,
- https://github.com/ansible-collections/community.general/pull/4048).
- deprecated_features:
- - mail callback plugin - not specifying ``sender`` is deprecated and will be
- disallowed in community.general 6.0.0 (https://github.com/ansible-collections/community.general/pull/4140).
- minor_changes:
- - cobbler inventory plugin - add ``include_profiles`` option (https://github.com/ansible-collections/community.general/pull/4068).
- - gitlab_project_variable - new ``variables`` parameter (https://github.com/ansible-collections/community.general/issues/4038).
- - icinga2 inventory plugin - implemented constructed interface (https://github.com/ansible-collections/community.general/pull/4088).
- - linode inventory plugin - allow templating of ``access_token`` variable in
- Linode inventory plugin (https://github.com/ansible-collections/community.general/pull/4040).
- - lists_mergeby filter plugin - add parameters ``list_merge`` and ``recursive``.
- These are only supported when used with ansible-base 2.10 or ansible-core,
- but not with Ansible 2.9 (https://github.com/ansible-collections/community.general/pull/4058).
- - lxc_container - added ``wait_for_container`` parameter. If ``true`` the module
- will wait until the running task reports success as the status (https://github.com/ansible-collections/community.general/pull/4039).
- - mail callback plugin - add ``Message-ID`` and ``Date`` headers (https://github.com/ansible-collections/community.general/issues/4055,
- https://github.com/ansible-collections/community.general/pull/4056).
- - mail callback plugin - properly use Ansible's option handling to split lists
- (https://github.com/ansible-collections/community.general/pull/4140).
- - nmcli - adds ``routes6`` and ``route_metric6`` parameters for supporting IPv6
- routes (https://github.com/ansible-collections/community.general/issues/4059).
- - opennebula - add the release action for VMs in the ``HOLD`` state (https://github.com/ansible-collections/community.general/pull/4036).
- - opentelemetry_plugin - enrich service when using the ``docker_login`` (https://github.com/ansible-collections/community.general/pull/4104).
- - proxmox modules - move ``HAS_PROXMOXER`` check into ``module_utils`` (https://github.com/ansible-collections/community.general/pull/4030).
- - scaleway inventory plugin - add profile parameter ``scw_profile`` (https://github.com/ansible-collections/community.general/pull/4049).
- - snap - add option ``options`` permitting to set options using the ``snap set``
- command (https://github.com/ansible-collections/community.general/pull/3943).
- release_summary: Regular features and bugfixes release.
- fragments:
- - 3935-use-gitlab-instance-runner-to-create-runner.yml
- - 3943-add-option-options-to-snap-module.yml
- - 4.4.0.yml
- - 4026-fix-mail-callback.yml
- - 4030-proxmox-has-proxmoxer.yml
- - 4036-onevm-add-release-action.yaml
- - 4038-fix-and-rework-gitlb-project-variable.yml
- - 4039-cluster-container-wait.yml
- - 4040-linode-token-templating.yaml
- - 4043-fix-no-log-opentelemetry.yml
- - 4048-expand-tilde-in-yarn-global-install-folder.yaml
- - 4049-profile-for-scaleway-inventory.yml
- - 4050-properly-parse-json-lines-output-from-yarn.yaml
- - 4052-fix-detection-of-installed-cargo-packages-with-hyphens.yaml
- - 4056-add-missing-mail-headers.yml
- - 4058-lists_mergeby-add-parameters.yml
- - 4062-nmcli-ipv6-routes-support.yml
- - 4068-add-include_file-option.yml
- - 4078-python_requirements_info.yaml
- - 4088-add-constructed-interface-for-icinga2-inventory.yml
- - 4092-fix_local_ports_regex_listen_ports_facts.yaml
- - 4104-opentelemetry_plugin-enrich_docker_login.yaml
- - 4140-mail-callback-options.yml
- modules:
- - description: Manage user accounts with systemd-homed
- name: homectl
- namespace: system
- release_date: '2022-02-01'
- 4.5.0:
- changes:
- bugfixes:
- - dconf - skip processes that disappeared while we inspected them (https://github.com/ansible-collections/community.general/issues/4151).
- - gitlab_group_variable - add missing documentation about GitLab versions that
- support ``environment_scope`` and ``variable_type`` (https://github.com/ansible-collections/community.general/pull/4038).
- - 'gitlab_group_variable - allow to set same variable name under different environment
- scopes. Due this change, the return value ``group_variable`` differs from
- previous version in check mode. It was counting ``updated`` values, because
- it was accidentally overwriting environment scopes (https://github.com/ansible-collections/community.general/pull/4038).
-
- '
- - gitlab_group_variable - fix idempotent change behaviour for float and integer
- variables (https://github.com/ansible-collections/community.general/pull/4038).
- - gitlab_project_variable - ``value`` is not necessary when deleting variables
- (https://github.com/ansible-collections/community.general/pull/4150).
- - gitlab_runner - make ``project`` and ``owned`` mutually exclusive (https://github.com/ansible-collections/community.general/pull/4136).
- - homebrew_cask - fix force install operation (https://github.com/ansible-collections/community.general/issues/3703).
- - imc_rest - fixes the module failure due to the usage of ``itertools.izip_longest``
- which is not available in Python 3 (https://github.com/ansible-collections/community.general/issues/4206).
- - ini_file - when removing nothing do not report changed (https://github.com/ansible-collections/community.general/issues/4154).
- - keycloak_user_federation - creating a user federation while specifying an
- ID (that does not exist yet) no longer fail with a 404 Not Found (https://github.com/ansible-collections/community.general/pull/4212).
- - keycloak_user_federation - mappers auto-created by keycloak are matched and
- merged by their name and no longer create duplicated entries (https://github.com/ansible-collections/community.general/pull/4212).
- - mail callback plugin - fix encoding of the name of sender and recipient (https://github.com/ansible-collections/community.general/issues/4060,
- https://github.com/ansible-collections/community.general/pull/4061).
- - passwordstore lookup plugin - fix error detection for non-English locales
- (https://github.com/ansible-collections/community.general/pull/4219).
- - passwordstore lookup plugin - prevent returning path names as passwords by
- accident (https://github.com/ansible-collections/community.general/issues/4185,
- https://github.com/ansible-collections/community.general/pull/4192).
- - vdo - fix options error (https://github.com/ansible-collections/community.general/pull/4163).
- - yum_versionlock - fix matching of existing entries with names passed to the
- module. Match yum and dnf lock format (https://github.com/ansible-collections/community.general/pull/4183).
- minor_changes:
- - Avoid internal ansible-core module_utils in favor of equivalent public API
- available since at least Ansible 2.9. This fixes some instances added since
- the last time this was fixed (https://github.com/ansible-collections/community.general/pull/4232).
- - ansible_galaxy_install - added option ``no_deps`` to the module (https://github.com/ansible-collections/community.general/issues/4174).
- - gitlab_group_variable - new ``variables`` parameter (https://github.com/ansible-collections/community.general/pull/4038
- and https://github.com/ansible-collections/community.general/issues/4074).
- - keycloak_* modules - added connection timeout parameter when calling server
- (https://github.com/ansible-collections/community.general/pull/4168).
- - linode inventory plugin - add support for caching inventory results (https://github.com/ansible-collections/community.general/pull/4179).
- - opentelemetry_plugin - enrich service when using the ``jenkins``, ``hetzner``
- or ``jira`` modules (https://github.com/ansible-collections/community.general/pull/4105).
- - pacman - the module has been rewritten and is now much faster when using ``state=latest``.
- Operations are now done all packages at once instead of package per package
- and the configured output format of ``pacman`` no longer affect the module's
- operation. (https://github.com/ansible-collections/community.general/pull/3907,
- https://github.com/ansible-collections/community.general/issues/3783, https://github.com/ansible-collections/community.general/issues/4079)
- - passwordstore lookup plugin - add configurable ``lock`` and ``locktimeout``
- options to avoid race conditions in itself and in the ``pass`` utility it
- calls. By default, the plugin now locks on write operations (https://github.com/ansible-collections/community.general/pull/4194).
- - proxmox modules - move common code into ``module_utils`` (https://github.com/ansible-collections/community.general/pull/4029).
- - proxmox_kvm - added EFI disk support when creating VM with OVMF UEFI BIOS
- with new ``efidisk0`` option (https://github.com/ansible-collections/community.general/pull/4106,
- https://github.com/ansible-collections/community.general/issues/1638).
- - proxmox_kwm - add ``win11`` to ``ostype`` parameter for Windows 11 and Windows
- Server 2022 support (https://github.com/ansible-collections/community.general/issues/4023,
- https://github.com/ansible-collections/community.general/pull/4191).
- release_summary: Regular feature and bugfix release.
- fragments:
- - 3703-force-install-homebrew-cask.yml
- - 3907-pacman-speedup.yml
- - 3916-fix-vdo-options-type.yml
- - 4.5.0.yml
- - 4029-proxmox-refactor.yml
- - 4061-fix-mail-recipient-encoding.yml
- - 4086-rework_of_gitlab_proyect_variable_over_gitlab_group_variable.yml
- - 4105-opentelemetry_plugin-enrich_jira_hetzner_jenkins_services.yaml
- - 4106-proxmox-efidisk0-support.yaml
- - 4136-gitlab_runner-make-project-owned-mutually-exclusive.yml
- - 4150-gitlab-project-variable-absent-fix.yml
- - 4151-dconf-catch-psutil-nosuchprocess.yaml
- - 4154-ini_file_changed.yml
- - 4168-add-keycloak-url-timeout.yml
- - 4179-linode-inventory-cache.yaml
- - 4183-fix-yum_versionlock.yaml
- - 4191-proxmox-add-win11.yml
- - 4192-improve-passwordstore-consistency.yml
- - 4194-configurable-passwordstore-locking.yml
- - 4206-imc-rest-module.yaml
- - 4212-fixes-for-keycloak-user-federation.yml
- - 4219-passwordstore-locale-fix.yml
- - 4232-text-converter-import.yml
- - 4240-ansible_galaxy_install-no_deps.yml
- modules:
- - description: Configure Intel Optane Persistent Memory modules
- name: pmem
- namespace: storage.pmem
- - description: Scaleway private network management
- name: scaleway_private_network
- namespace: cloud.scaleway
- release_date: '2022-02-22'
- 4.6.0:
- changes:
- bugfixes:
- - filesize - add support for busybox dd implementation, that is used by default
- on Alpine linux (https://github.com/ansible-collections/community.general/pull/4288,
- https://github.com/ansible-collections/community.general/issues/4259).
- - linode inventory plugin - fix configuration handling relating to inventory
- filtering (https://github.com/ansible-collections/community.general/pull/4336).
- - mksysb - fixed bug for parameter ``backup_dmapi_fs`` was passing the wrong
- CLI argument (https://github.com/ansible-collections/community.general/pull/3295).
- - pacman - Use ``--groups`` instead of ``--group`` (https://github.com/ansible-collections/community.general/pull/4312).
- - pacman - fix URL based package installation (https://github.com/ansible-collections/community.general/pull/4286,
- https://github.com/ansible-collections/community.general/issues/4285).
- - pacman - fix ``upgrade=yes`` (https://github.com/ansible-collections/community.general/pull/4275,
- https://github.com/ansible-collections/community.general/issues/4274).
- - pacman - make sure that ``packages`` is always returned when ``name`` or ``upgrade``
- is specified, also if nothing is done (https://github.com/ansible-collections/community.general/pull/4329).
- - pacman - when the ``update_cache`` option is combined with another option
- such as ``upgrade``, report ``changed`` based on the actions performed by
- the latter option. This was the behavior in community.general 4.4.0 and before.
- In community.general 4.5.0, a task combining these options would always report
- ``changed`` (https://github.com/ansible-collections/community.general/pull/4318).
- - proxmox inventory plugin - always convert strings that follow the ``key=value[,key=value[...]]``
- form into dictionaries (https://github.com/ansible-collections/community.general/pull/4349).
- - proxmox inventory plugin - fixed the ``description`` field being ignored if
- it contained a comma (https://github.com/ansible-collections/community.general/issues/4348).
- - proxmox_kvm - fix error in check when creating or cloning (https://github.com/ansible-collections/community.general/pull/4306).
- - proxmox_kvm - fix error when checking whether Proxmox VM exists (https://github.com/ansible-collections/community.general/pull/4287).
- - terraform - fix ``variable`` handling to allow complex values (https://github.com/ansible-collections/community.general/pull/4281).
- deprecated_features:
- - 'pacman - from community.general 5.0.0 on, the ``changed`` status of ``update_cache``
- will no longer be ignored if ``name`` or ``upgrade`` is specified. To keep
- the old behavior, add something like ``register: result`` and ``changed_when:
- result.packages | length > 0`` to your task (https://github.com/ansible-collections/community.general/pull/4329).'
- known_issues:
- - pacman - ``update_cache`` cannot differentiate between up to date and outdated
- package lists and will report ``changed`` in both situations (https://github.com/ansible-collections/community.general/pull/4318).
- - pacman - binaries specified in the ``executable`` parameter must support ``--print-format``
- in order to be used by this module. In particular, AUR helper ``yay`` is known
- not to currently support it (https://github.com/ansible-collections/community.general/pull/4312).
- minor_changes:
- - jira - when creating a comment, ``fields`` now is used for additional data
- (https://github.com/ansible-collections/community.general/pull/4304).
- - ldap_entry - add support for recursive deletion (https://github.com/ansible-collections/community.general/issues/3613).
- - mksysb - revamped the module using ``ModuleHelper`` (https://github.com/ansible-collections/community.general/pull/3295).
- - nmcli - add missing connection aliases ``802-3-ethernet`` and ``802-11-wireless``
- (https://github.com/ansible-collections/community.general/pull/4108).
- - nmcli - remove nmcli modify dependency on ``type`` parameter (https://github.com/ansible-collections/community.general/issues/2858).
- - npm - add ability to use ``production`` flag when ``ci`` is set (https://github.com/ansible-collections/community.general/pull/4299).
- - pacman - add ``remove_nosave`` parameter to avoid saving modified configuration
- files as ``.pacsave`` files. (https://github.com/ansible-collections/community.general/pull/4316,
- https://github.com/ansible-collections/community.general/issues/4315).
- - pacman - now implements proper change detection for ``update_cache=true``.
- Adds ``cache_updated`` return value to when ``update_cache=true`` to report
- this result independently of the module's overall changed return value (https://github.com/ansible-collections/community.general/pull/4337).
- - pipx - added options ``editable`` and ``pip_args`` (https://github.com/ansible-collections/community.general/issues/4300).
- - proxmox inventory plugin - add support for client-side jinja filters (https://github.com/ansible-collections/community.general/issues/3553).
- - redis - add authentication parameters ``login_user``, ``tls``, ``validate_certs``,
- and ``ca_certs`` (https://github.com/ansible-collections/community.general/pull/4207).
- - syslog_json - add option to skip logging of ``gather_facts`` playbook tasks;
- use v2 callback API (https://github.com/ansible-collections/community.general/pull/4223).
- - zypper - add support for ``--clean-deps`` option to remove packages that depend
- on a package being removed (https://github.com/ansible-collections/community.general/pull/4195).
- release_summary: Regular feature and bugfix release.
- fragments:
- - 3295-mksysb-revamp.yaml
- - 4.6.0.yml
- - 4108-nmcli-support-modifcation-without-type-param.yml
- - 4192-zypper-add-clean-deps.yml
- - 4207-add-redis-tls-support.yml
- - 4223-syslog-json-skip-syslog-option.yml
- - 4275-pacman-sysupgrade.yml
- - 4281-terraform-complex-variables.yml
- - 4286-pacman-url-pkgs.yml
- - 4287-fix-proxmox-vm-chek.yml
- - 4288-fix-4259-support-busybox-dd.yml
- - 4299-npm-add-production-with-ci-flag.yml
- - 4303-pipx-editable.yml
- - 4304-jira-fields-in-comment.yml
- - 4306-proxmox-fix-error-on-vm-clone.yml
- - 4312-pacman-groups.yml
- - 4316-pacman-remove-nosave.yml
- - 4318-pacman-restore-old-changed-behavior.yml
- - 4330-pacman-packages-update_cache.yml
- - 4336-linode-inventory-filtering.yaml
- - 4337-pacman-update_cache.yml
- - 4349-proxmox-inventory-dict-facts.yml
- - 4352-proxmox-inventory-filters.yml
- - 4355-ldap-recursive-delete.yml
- release_date: '2022-03-15'
- 4.6.1:
- changes:
- bugfixes:
- - 'lxd inventory plugin - do not crash if OS and release metadata are not present
-
- (https://github.com/ansible-collections/community.general/pull/4351).
-
- '
- - terraform - revert bugfix https://github.com/ansible-collections/community.general/pull/4281
- that tried to fix ``variable`` handling to allow complex values. It turned
- out that this was breaking several valid use-cases (https://github.com/ansible-collections/community.general/issues/4367,
- https://github.com/ansible-collections/community.general/pull/4370).
- release_summary: Extraordinary bugfix release to fix a breaking change in ``terraform``.
- fragments:
- - 4.6.1.yml
- - 4351-inventory-lxd-handling_metadata_wo_os_and_release.yml
- - 4368-reverts-4281.yml
- release_date: '2022-03-16'
diff --git a/ansible_collections/community/general/changelogs/config.yaml b/ansible_collections/community/general/changelogs/config.yaml
deleted file mode 100644
index fd0b422a..00000000
--- a/ansible_collections/community/general/changelogs/config.yaml
+++ /dev/null
@@ -1,29 +0,0 @@
-changelog_filename_template: ../CHANGELOG.rst
-changelog_filename_version_depth: 0
-changes_file: changelog.yaml
-changes_format: combined
-keep_fragments: false
-mention_ancestor: true
-flatmap: true
-new_plugins_after_name: removed_features
-notesdir: fragments
-prelude_section_name: release_summary
-prelude_section_title: Release Summary
-sections:
-- - major_changes
- - Major Changes
-- - minor_changes
- - Minor Changes
-- - breaking_changes
- - Breaking Changes / Porting Guide
-- - deprecated_features
- - Deprecated Features
-- - removed_features
- - Removed Features (previously deprecated)
-- - security_fixes
- - Security Fixes
-- - bugfixes
- - Bugfixes
-- - known_issues
- - Known Issues
-title: Community General
diff --git a/ansible_collections/community/general/docs/docsite/extra-docs.yml b/ansible_collections/community/general/docs/docsite/extra-docs.yml
deleted file mode 100644
index 83f533ec..00000000
--- a/ansible_collections/community/general/docs/docsite/extra-docs.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-sections:
- - title: Guides
- toctree:
- - filter_guide
- - test_guide
diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/default-common.yml b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/default-common.yml
deleted file mode 100644
index 69227fbe..00000000
--- a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/default-common.yml
+++ /dev/null
@@ -1,13 +0,0 @@
-list1:
- - name: foo
- extra: true
- - name: bar
- extra: false
- - name: meh
- extra: true
-
-list2:
- - name: foo
- path: /foo
- - name: baz
- path: /baz
diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/default-recursive-true.yml b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/default-recursive-true.yml
deleted file mode 100644
index 7d8a7cf6..00000000
--- a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/default-recursive-true.yml
+++ /dev/null
@@ -1,19 +0,0 @@
-list1:
- - name: myname01
- param01:
- x: default_value
- y: default_value
- list:
- - default_value
- - name: myname02
- param01: [1, 1, 2, 3]
-
-list2:
- - name: myname01
- param01:
- y: patch_value
- z: patch_value
- list:
- - patch_value
- - name: myname02
- param01: [3, 4, 4, {key: value}]
diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-001.yml b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-001.yml
deleted file mode 100644
index d1cbb4b3..00000000
--- a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-001.yml
+++ /dev/null
@@ -1,10 +0,0 @@
----
-- name: 1. Merge two lists by common attribute 'name'
- include_vars:
- dir: example-001_vars
-- debug:
- var: list3
- when: debug|d(false)|bool
-- template:
- src: list3.out.j2
- dest: example-001.out
diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-001_vars/default-common.yml b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-001_vars/default-common.yml
deleted file mode 120000
index 7ea8984a..00000000
--- a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-001_vars/default-common.yml
+++ /dev/null
@@ -1 +0,0 @@
-../default-common.yml
\ No newline at end of file
diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-001_vars/list3.yml b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-001_vars/list3.yml
deleted file mode 100644
index 4ecfb0a6..00000000
--- a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-001_vars/list3.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-list3: "{{ list1|
- community.general.lists_mergeby(list2, 'name') }}"
diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-002.yml b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-002.yml
deleted file mode 100644
index d21441a8..00000000
--- a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-002.yml
+++ /dev/null
@@ -1,10 +0,0 @@
----
-- name: 2. Merge two lists by common attribute 'name'
- include_vars:
- dir: example-002_vars
-- debug:
- var: list3
- when: debug|d(false)|bool
-- template:
- src: list3.out.j2
- dest: example-002.out
diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-002_vars/default-common.yml b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-002_vars/default-common.yml
deleted file mode 120000
index 7ea8984a..00000000
--- a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-002_vars/default-common.yml
+++ /dev/null
@@ -1 +0,0 @@
-../default-common.yml
\ No newline at end of file
diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-002_vars/list3.yml b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-002_vars/list3.yml
deleted file mode 100644
index 9eb6775f..00000000
--- a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-002_vars/list3.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-list3: "{{ [list1, list2]|
- community.general.lists_mergeby('name') }}"
diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-003.yml b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-003.yml
deleted file mode 100644
index 76922786..00000000
--- a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-003.yml
+++ /dev/null
@@ -1,10 +0,0 @@
----
-- name: 3. Merge recursive by 'name', replace lists (default)
- include_vars:
- dir: example-003_vars
-- debug:
- var: list3
- when: debug|d(false)|bool
-- template:
- src: list3.out.j2
- dest: example-003.out
diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-003_vars/default-recursive-true.yml b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-003_vars/default-recursive-true.yml
deleted file mode 120000
index 299736f8..00000000
--- a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-003_vars/default-recursive-true.yml
+++ /dev/null
@@ -1 +0,0 @@
-../default-recursive-true.yml
\ No newline at end of file
diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-003_vars/list3.yml b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-003_vars/list3.yml
deleted file mode 100644
index 6d6bf8a4..00000000
--- a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-003_vars/list3.yml
+++ /dev/null
@@ -1,3 +0,0 @@
-list3: "{{ [list1, list2]|
- community.general.lists_mergeby('name',
- recursive=true) }}"
diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-004.yml b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-004.yml
deleted file mode 100644
index 8a473a73..00000000
--- a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-004.yml
+++ /dev/null
@@ -1,10 +0,0 @@
----
-- name: 4. Merge recursive by 'name', keep lists
- include_vars:
- dir: example-004_vars
-- debug:
- var: list3
- when: debug|d(false)|bool
-- template:
- src: list3.out.j2
- dest: example-004.out
diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-004_vars/default-recursive-true.yml b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-004_vars/default-recursive-true.yml
deleted file mode 120000
index 299736f8..00000000
--- a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-004_vars/default-recursive-true.yml
+++ /dev/null
@@ -1 +0,0 @@
-../default-recursive-true.yml
\ No newline at end of file
diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-004_vars/list3.yml b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-004_vars/list3.yml
deleted file mode 100644
index a525ae4f..00000000
--- a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-004_vars/list3.yml
+++ /dev/null
@@ -1,4 +0,0 @@
-list3: "{{ [list1, list2]|
- community.general.lists_mergeby('name',
- recursive=true,
- list_merge='keep') }}"
diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-005.yml b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-005.yml
deleted file mode 100644
index 8bdf92c3..00000000
--- a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-005.yml
+++ /dev/null
@@ -1,10 +0,0 @@
----
-- name: 5. Merge recursive by 'name', append lists
- include_vars:
- dir: example-005_vars
-- debug:
- var: list3
- when: debug|d(false)|bool
-- template:
- src: list3.out.j2
- dest: example-005.out
diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-005_vars/default-recursive-true.yml b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-005_vars/default-recursive-true.yml
deleted file mode 120000
index 299736f8..00000000
--- a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-005_vars/default-recursive-true.yml
+++ /dev/null
@@ -1 +0,0 @@
-../default-recursive-true.yml
\ No newline at end of file
diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-005_vars/list3.yml b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-005_vars/list3.yml
deleted file mode 100644
index 65068610..00000000
--- a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-005_vars/list3.yml
+++ /dev/null
@@ -1,4 +0,0 @@
-list3: "{{ [list1, list2]|
- community.general.lists_mergeby('name',
- recursive=true,
- list_merge='append') }}"
diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-006.yml b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-006.yml
deleted file mode 100644
index 9dcb9b68..00000000
--- a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-006.yml
+++ /dev/null
@@ -1,10 +0,0 @@
----
-- name: 6. Merge recursive by 'name', prepend lists
- include_vars:
- dir: example-006_vars
-- debug:
- var: list3
- when: debug|d(false)|bool
-- template:
- src: list3.out.j2
- dest: example-006.out
diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-006_vars/default-recursive-true.yml b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-006_vars/default-recursive-true.yml
deleted file mode 120000
index 299736f8..00000000
--- a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-006_vars/default-recursive-true.yml
+++ /dev/null
@@ -1 +0,0 @@
-../default-recursive-true.yml
\ No newline at end of file
diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-006_vars/list3.yml b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-006_vars/list3.yml
deleted file mode 100644
index d880dfa9..00000000
--- a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-006_vars/list3.yml
+++ /dev/null
@@ -1,4 +0,0 @@
-list3: "{{ [list1, list2]|
- community.general.lists_mergeby('name',
- recursive=true,
- list_merge='prepend') }}"
diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-007.yml b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-007.yml
deleted file mode 100644
index e1a6f2c7..00000000
--- a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-007.yml
+++ /dev/null
@@ -1,10 +0,0 @@
----
-- name: 7. Merge recursive by 'name', append lists 'remove present'
- include_vars:
- dir: example-007_vars
-- debug:
- var: list3
- when: debug|d(false)|bool
-- template:
- src: list3.out.j2
- dest: example-007.out
diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-007_vars/default-recursive-true.yml b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-007_vars/default-recursive-true.yml
deleted file mode 120000
index 299736f8..00000000
--- a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-007_vars/default-recursive-true.yml
+++ /dev/null
@@ -1 +0,0 @@
-../default-recursive-true.yml
\ No newline at end of file
diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-007_vars/list3.yml b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-007_vars/list3.yml
deleted file mode 100644
index af71d6df..00000000
--- a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-007_vars/list3.yml
+++ /dev/null
@@ -1,4 +0,0 @@
-list3: "{{ [list1, list2]|
- community.general.lists_mergeby('name',
- recursive=true,
- list_merge='append_rp') }}"
diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-008.yml b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-008.yml
deleted file mode 100644
index 18a59886..00000000
--- a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-008.yml
+++ /dev/null
@@ -1,10 +0,0 @@
----
-- name: 8. Merge recursive by 'name', prepend lists 'remove present'
- include_vars:
- dir: example-008_vars
-- debug:
- var: list3
- when: debug|d(false)|bool
-- template:
- src: list3.out.j2
- dest: example-008.out
diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-008_vars/default-recursive-true.yml b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-008_vars/default-recursive-true.yml
deleted file mode 120000
index 299736f8..00000000
--- a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-008_vars/default-recursive-true.yml
+++ /dev/null
@@ -1 +0,0 @@
-../default-recursive-true.yml
\ No newline at end of file
diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-008_vars/list3.yml b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-008_vars/list3.yml
deleted file mode 100644
index 8a205785..00000000
--- a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-008_vars/list3.yml
+++ /dev/null
@@ -1,4 +0,0 @@
-list3: "{{ [list1, list2]|
- community.general.lists_mergeby('name',
- recursive=true,
- list_merge='prepend_rp') }}"
diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/examples_all.rst.j2 b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/examples_all.rst.j2
deleted file mode 100644
index 014ff2d1..00000000
--- a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/examples_all.rst.j2
+++ /dev/null
@@ -1,8 +0,0 @@
-{% for i in examples %}
-{{ i.label }}
-
-.. code-block:: {{ i.lang }}
-
- {{ lookup('file', i.file)|indent(2) }}
-
-{% endfor %}
diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/list3.out.j2 b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/list3.out.j2
deleted file mode 100644
index 764ce3bd..00000000
--- a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/list3.out.j2
+++ /dev/null
@@ -1,2 +0,0 @@
-list3:
-{{ list3|to_nice_yaml(indent=0) }}
diff --git a/ansible_collections/community/general/docs/docsite/rst/filter_guide.rst b/ansible_collections/community/general/docs/docsite/rst/filter_guide.rst
deleted file mode 100644
index bab223d3..00000000
--- a/ansible_collections/community/general/docs/docsite/rst/filter_guide.rst
+++ /dev/null
@@ -1,19 +0,0 @@
-
-.. _ansible_collections.community.general.docsite.filter_guide:
-
-community.general Filter Guide
-==============================
-
-The :ref:`community.general collection ` offers several useful filter plugins.
-
-.. toctree::
- :maxdepth: 2
-
- filter_guide_paths
- filter_guide_abstract_informations
- filter_guide_working_with_times
- filter_guide_working_with_versions
- filter_guide_creating_identifiers
- filter_guide_conversions
- filter_guide_selecting_json_data
- filter_guide_working_with_unicode
diff --git a/ansible_collections/community/general/docs/docsite/rst/filter_guide_abstract_informations.rst b/ansible_collections/community/general/docs/docsite/rst/filter_guide_abstract_informations.rst
deleted file mode 100644
index 04fb49bd..00000000
--- a/ansible_collections/community/general/docs/docsite/rst/filter_guide_abstract_informations.rst
+++ /dev/null
@@ -1,10 +0,0 @@
-Abstract transformations
-------------------------
-
-.. toctree::
- :maxdepth: 1
-
- filter_guide_abstract_informations_dictionaries
- filter_guide_abstract_informations_grouping
- filter_guide_abstract_informations_merging_lists_of_dictionaries
- filter_guide_abstract_informations_counting_elements_in_sequence
diff --git a/ansible_collections/community/general/meta/runtime.yml b/ansible_collections/community/general/meta/runtime.yml
deleted file mode 100644
index f5931666..00000000
--- a/ansible_collections/community/general/meta/runtime.yml
+++ /dev/null
@@ -1,629 +0,0 @@
----
-requires_ansible: '>=2.9.10'
-plugin_routing:
- connection:
- docker:
- redirect: community.docker.docker
- oc:
- redirect: community.okd.oc
- lookup:
- gcp_storage_file:
- redirect: community.google.gcp_storage_file
- hashi_vault:
- redirect: community.hashi_vault.hashi_vault
- nios:
- redirect: infoblox.nios_modules.nios_lookup
- nios_next_ip:
- redirect: infoblox.nios_modules.nios_next_ip
- nios_next_network:
- redirect: infoblox.nios_modules.nios_next_network
- modules:
- ali_instance_facts:
- tombstone:
- removal_version: 3.0.0
- warning_text: Use community.general.ali_instance_info instead.
- docker_compose:
- redirect: community.docker.docker_compose
- docker_config:
- redirect: community.docker.docker_config
- docker_container:
- redirect: community.docker.docker_container
- docker_container_info:
- redirect: community.docker.docker_container_info
- docker_host_info:
- redirect: community.docker.docker_host_info
- docker_image:
- redirect: community.docker.docker_image
- docker_image_facts:
- tombstone:
- removal_version: 2.0.0
- warning_text: Use community.docker.docker_image_info instead.
- docker_image_info:
- redirect: community.docker.docker_image_info
- docker_login:
- redirect: community.docker.docker_login
- docker_network:
- redirect: community.docker.docker_network
- docker_network_info:
- redirect: community.docker.docker_network_info
- docker_node:
- redirect: community.docker.docker_node
- docker_node_info:
- redirect: community.docker.docker_node_info
- docker_prune:
- redirect: community.docker.docker_prune
- docker_secret:
- redirect: community.docker.docker_secret
- docker_service:
- tombstone:
- removal_version: 2.0.0
- warning_text: Use community.docker.docker_compose instead.
- docker_stack:
- redirect: community.docker.docker_stack
- docker_stack_info:
- redirect: community.docker.docker_stack_info
- docker_stack_task_info:
- redirect: community.docker.docker_stack_task_info
- docker_swarm:
- redirect: community.docker.docker_swarm
- docker_swarm_info:
- redirect: community.docker.docker_swarm_info
- docker_swarm_service:
- redirect: community.docker.docker_swarm_service
- docker_swarm_service_info:
- redirect: community.docker.docker_swarm_service_info
- docker_volume:
- redirect: community.docker.docker_volume
- docker_volume_info:
- redirect: community.docker.docker_volume_info
- foreman:
- tombstone:
- removal_version: 2.0.0
- warning_text: Use the modules from the theforeman.foreman collection instead.
- gc_storage:
- redirect: community.google.gc_storage
- gcdns_record:
- tombstone:
- removal_version: 2.0.0
- warning_text: Use google.cloud.gcp_dns_resource_record_set instead.
- gcdns_zone:
- tombstone:
- removal_version: 2.0.0
- warning_text: Use google.cloud.gcp_dns_managed_zone instead.
- gce:
- tombstone:
- removal_version: 2.0.0
- warning_text: Use google.cloud.gcp_compute_instance instead.
- gce_eip:
- redirect: community.google.gce_eip
- gce_img:
- redirect: community.google.gce_img
- gce_instance_template:
- redirect: community.google.gce_instance_template
- gce_labels:
- redirect: community.google.gce_labels
- gce_lb:
- redirect: community.google.gce_lb
- gce_mig:
- redirect: community.google.gce_mig
- gce_net:
- redirect: community.google.gce_net
- gce_pd:
- redirect: community.google.gce_pd
- gce_snapshot:
- redirect: community.google.gce_snapshot
- gce_tag:
- redirect: community.google.gce_tag
- gcp_backend_service:
- tombstone:
- removal_version: 2.0.0
- warning_text: Use google.cloud.gcp_compute_backend_service instead.
- gcp_forwarding_rule:
- tombstone:
- removal_version: 2.0.0
- warning_text: Use google.cloud.gcp_compute_forwarding_rule or google.cloud.gcp_compute_global_forwarding_rule
- instead.
- gcp_healthcheck:
- tombstone:
- removal_version: 2.0.0
- warning_text: Use google.cloud.gcp_compute_health_check, google.cloud.gcp_compute_http_health_check
- or google.cloud.gcp_compute_https_health_check instead.
- gcp_target_proxy:
- tombstone:
- removal_version: 2.0.0
- warning_text: Use google.cloud.gcp_compute_target_http_proxy instead.
- gcp_url_map:
- tombstone:
- removal_version: 2.0.0
- warning_text: Use google.cloud.gcp_compute_url_map instead.
- gcpubsub:
- redirect: community.google.gcpubsub
- gcpubsub_facts:
- tombstone:
- removal_version: 3.0.0
- warning_text: Use community.google.gcpubsub_info instead.
- gcpubsub_info:
- redirect: community.google.gcpubsub_info
- gcspanner:
- tombstone:
- removal_version: 2.0.0
- warning_text: Use google.cloud.gcp_spanner_database and/or google.cloud.gcp_spanner_instance
- instead.
- github_hooks:
- tombstone:
- removal_version: 2.0.0
- warning_text: Use community.general.github_webhook and community.general.github_webhook_info
- instead.
- hetzner_failover_ip:
- redirect: community.hrobot.failover_ip
- hetzner_failover_ip_info:
- redirect: community.hrobot.failover_ip_info
- hetzner_firewall:
- redirect: community.hrobot.firewall
- hetzner_firewall_info:
- redirect: community.hrobot.firewall_info
- hpilo_facts:
- tombstone:
- removal_version: 3.0.0
- warning_text: Use community.general.hpilo_info instead.
- idrac_firmware:
- redirect: dellemc.openmanage.idrac_firmware
- idrac_redfish_facts:
- tombstone:
- removal_version: 3.0.0
- warning_text: Use community.general.idrac_redfish_info instead.
- idrac_server_config_profile:
- redirect: dellemc.openmanage.idrac_server_config_profile
- jenkins_job_facts:
- tombstone:
- removal_version: 3.0.0
- warning_text: Use community.general.jenkins_job_info instead.
- katello:
- tombstone:
- removal_version: 2.0.0
- warning_text: Use the modules from the theforeman.foreman collection instead.
- kubevirt_cdi_upload:
- redirect: community.kubevirt.kubevirt_cdi_upload
- kubevirt_preset:
- redirect: community.kubevirt.kubevirt_preset
- kubevirt_pvc:
- redirect: community.kubevirt.kubevirt_pvc
- kubevirt_rs:
- redirect: community.kubevirt.kubevirt_rs
- kubevirt_template:
- redirect: community.kubevirt.kubevirt_template
- kubevirt_vm:
- redirect: community.kubevirt.kubevirt_vm
- ldap_attr:
- tombstone:
- removal_version: 3.0.0
- warning_text: Use community.general.ldap_attrs instead.
- logicmonitor:
- tombstone:
- removal_version: 1.0.0
- warning_text: The logicmonitor_facts module is no longer maintained and the
- API used has been disabled in 2017.
- logicmonitor_facts:
- tombstone:
- removal_version: 1.0.0
- warning_text: The logicmonitor_facts module is no longer maintained and the
- API used has been disabled in 2017.
- memset_memstore_facts:
- tombstone:
- removal_version: 3.0.0
- warning_text: Use community.general.memset_memstore_info instead.
- memset_server_facts:
- tombstone:
- removal_version: 3.0.0
- warning_text: Use community.general.memset_server_info instead.
- na_cdot_aggregate:
- tombstone:
- removal_version: 2.0.0
- warning_text: Use netapp.ontap.na_ontap_aggregate instead.
- na_cdot_license:
- tombstone:
- removal_version: 2.0.0
- warning_text: Use netapp.ontap.na_ontap_license instead.
- na_cdot_lun:
- tombstone:
- removal_version: 2.0.0
- warning_text: Use netapp.ontap.na_ontap_lun instead.
- na_cdot_qtree:
- tombstone:
- removal_version: 2.0.0
- warning_text: Use netapp.ontap.na_ontap_qtree instead.
- na_cdot_svm:
- tombstone:
- removal_version: 2.0.0
- warning_text: Use netapp.ontap.na_ontap_svm instead.
- na_cdot_user:
- tombstone:
- removal_version: 2.0.0
- warning_text: Use netapp.ontap.na_ontap_user instead.
- na_cdot_user_role:
- tombstone:
- removal_version: 2.0.0
- warning_text: Use netapp.ontap.na_ontap_user_role instead.
- na_cdot_volume:
- tombstone:
- removal_version: 2.0.0
- warning_text: Use netapp.ontap.na_ontap_volume instead.
- na_ontap_gather_facts:
- tombstone:
- removal_version: 3.0.0
- warning_text: Use netapp.ontap.na_ontap_info instead.
- nginx_status_facts:
- tombstone:
- removal_version: 3.0.0
- warning_text: Use community.general.nginx_status_info instead.
- nios_a_record:
- redirect: infoblox.nios_modules.nios_a_record
- nios_aaaa_record:
- redirect: infoblox.nios_modules.nios_aaaa_record
- nios_cname_record:
- redirect: infoblox.nios_modules.nios_cname_record
- nios_dns_view:
- redirect: infoblox.nios_modules.nios_dns_view
- nios_fixed_address:
- redirect: infoblox.nios_modules.nios_fixed_address
- nios_host_record:
- redirect: infoblox.nios_modules.nios_host_record
- nios_member:
- redirect: infoblox.nios_modules.nios_member
- nios_mx_record:
- redirect: infoblox.nios_modules.nios_mx_record
- nios_naptr_record:
- redirect: infoblox.nios_modules.nios_naptr_record
- nios_network:
- redirect: infoblox.nios_modules.nios_network
- nios_network_view:
- redirect: infoblox.nios_modules.nios_network_view
- nios_nsgroup:
- redirect: infoblox.nios_modules.nios_nsgroup
- nios_ptr_record:
- redirect: infoblox.nios_modules.nios_ptr_record
- nios_srv_record:
- redirect: infoblox.nios_modules.nios_srv_record
- nios_txt_record:
- redirect: infoblox.nios_modules.nios_txt_record
- nios_zone:
- redirect: infoblox.nios_modules.nios_zone
- ome_device_info:
- redirect: dellemc.openmanage.ome_device_info
- one_image_facts:
- tombstone:
- removal_version: 3.0.0
- warning_text: Use community.general.one_image_info instead.
- onepassword_facts:
- tombstone:
- removal_version: 3.0.0
- warning_text: Use community.general.onepassword_info instead.
- oneview_datacenter_facts:
- tombstone:
- removal_version: 3.0.0
- warning_text: Use community.general.oneview_datacenter_info instead.
- oneview_enclosure_facts:
- tombstone:
- removal_version: 3.0.0
- warning_text: Use community.general.oneview_enclosure_info instead.
- oneview_ethernet_network_facts:
- tombstone:
- removal_version: 3.0.0
- warning_text: Use community.general.oneview_ethernet_network_info instead.
- oneview_fc_network_facts:
- tombstone:
- removal_version: 3.0.0
- warning_text: Use community.general.oneview_fc_network_info instead.
- oneview_fcoe_network_facts:
- tombstone:
- removal_version: 3.0.0
- warning_text: Use community.general.oneview_fcoe_network_info instead.
- oneview_logical_interconnect_group_facts:
- tombstone:
- removal_version: 3.0.0
- warning_text: Use community.general.oneview_logical_interconnect_group_info
- instead.
- oneview_network_set_facts:
- tombstone:
- removal_version: 3.0.0
- warning_text: Use community.general.oneview_network_set_info instead.
- oneview_san_manager_facts:
- tombstone:
- removal_version: 3.0.0
- warning_text: Use community.general.oneview_san_manager_info instead.
- online_server_facts:
- tombstone:
- removal_version: 3.0.0
- warning_text: Use community.general.online_server_info instead.
- online_user_facts:
- tombstone:
- removal_version: 3.0.0
- warning_text: Use community.general.online_user_info instead.
- ovirt:
- tombstone:
- removal_version: 3.0.0
- warning_text: Use ovirt.ovirt.ovirt_vm instead.
- ovirt_affinity_label_facts:
- tombstone:
- removal_version: 3.0.0
- warning_text: Use ovirt.ovirt.ovirt_affinity_label_info instead.
- ovirt_api_facts:
- tombstone:
- removal_version: 3.0.0
- warning_text: Use ovirt.ovirt.ovirt_api_info instead.
- ovirt_cluster_facts:
- tombstone:
- removal_version: 3.0.0
- warning_text: Use ovirt.ovirt.ovirt_cluster_info instead.
- ovirt_datacenter_facts:
- tombstone:
- removal_version: 3.0.0
- warning_text: Use ovirt.ovirt.ovirt_datacenter_info instead.
- ovirt_disk_facts:
- tombstone:
- removal_version: 3.0.0
- warning_text: Use ovirt.ovirt.ovirt_disk_info instead.
- ovirt_event_facts:
- tombstone:
- removal_version: 3.0.0
- warning_text: Use ovirt.ovirt.ovirt_event_info instead.
- ovirt_external_provider_facts:
- tombstone:
- removal_version: 3.0.0
- warning_text: Use ovirt.ovirt.ovirt_external_provider_info instead.
- ovirt_group_facts:
- tombstone:
- removal_version: 3.0.0
- warning_text: Use ovirt.ovirt.ovirt_group_info instead.
- ovirt_host_facts:
- tombstone:
- removal_version: 3.0.0
- warning_text: Use ovirt.ovirt.ovirt_host_info instead.
- ovirt_host_storage_facts:
- tombstone:
- removal_version: 3.0.0
- warning_text: Use ovirt.ovirt.ovirt_host_storage_info instead.
- ovirt_network_facts:
- tombstone:
- removal_version: 3.0.0
- warning_text: Use ovirt.ovirt.ovirt_network_info instead.
- ovirt_nic_facts:
- tombstone:
- removal_version: 3.0.0
- warning_text: Use ovirt.ovirt.ovirt_nic_info instead.
- ovirt_permission_facts:
- tombstone:
- removal_version: 3.0.0
- warning_text: Use ovirt.ovirt.ovirt_permission_info instead.
- ovirt_quota_facts:
- tombstone:
- removal_version: 3.0.0
- warning_text: Use ovirt.ovirt.ovirt_quota_info instead.
- ovirt_scheduling_policy_facts:
- tombstone:
- removal_version: 3.0.0
- warning_text: Use ovirt.ovirt.ovirt_scheduling_policy_info instead.
- ovirt_snapshot_facts:
- tombstone:
- removal_version: 3.0.0
- warning_text: Use ovirt.ovirt.ovirt_snapshot_info instead.
- ovirt_storage_domain_facts:
- tombstone:
- removal_version: 3.0.0
- warning_text: Use ovirt.ovirt.ovirt_storage_domain_info instead.
- ovirt_storage_template_facts:
- tombstone:
- removal_version: 3.0.0
- warning_text: Use ovirt.ovirt.ovirt_storage_template_info instead.
- ovirt_storage_vm_facts:
- tombstone:
- removal_version: 3.0.0
- warning_text: Use ovirt.ovirt.ovirt_storage_vm_info instead.
- ovirt_tag_facts:
- tombstone:
- removal_version: 3.0.0
- warning_text: Use ovirt.ovirt.ovirt_tag_info instead.
- ovirt_template_facts:
- tombstone:
- removal_version: 3.0.0
- warning_text: Use ovirt.ovirt.ovirt_template_info instead.
- ovirt_user_facts:
- tombstone:
- removal_version: 3.0.0
- warning_text: Use ovirt.ovirt.ovirt_user_info instead.
- ovirt_vm_facts:
- tombstone:
- removal_version: 3.0.0
- warning_text: Use ovirt.ovirt.ovirt_vm_info instead.
- ovirt_vmpool_facts:
- tombstone:
- removal_version: 3.0.0
- warning_text: Use ovirt.ovirt.ovirt_vmpool_info instead.
- postgresql_copy:
- redirect: community.postgresql.postgresql_copy
- postgresql_db:
- redirect: community.postgresql.postgresql_db
- postgresql_ext:
- redirect: community.postgresql.postgresql_ext
- postgresql_idx:
- redirect: community.postgresql.postgresql_idx
- postgresql_info:
- redirect: community.postgresql.postgresql_info
- postgresql_lang:
- redirect: community.postgresql.postgresql_lang
- postgresql_membership:
- redirect: community.postgresql.postgresql_membership
- postgresql_owner:
- redirect: community.postgresql.postgresql_owner
- postgresql_pg_hba:
- redirect: community.postgresql.postgresql_pg_hba
- postgresql_ping:
- redirect: community.postgresql.postgresql_ping
- postgresql_privs:
- redirect: community.postgresql.postgresql_privs
- postgresql_publication:
- redirect: community.postgresql.postgresql_publication
- postgresql_query:
- redirect: community.postgresql.postgresql_query
- postgresql_schema:
- redirect: community.postgresql.postgresql_schema
- postgresql_sequence:
- redirect: community.postgresql.postgresql_sequence
- postgresql_set:
- redirect: community.postgresql.postgresql_set
- postgresql_slot:
- redirect: community.postgresql.postgresql_slot
- postgresql_subscription:
- redirect: community.postgresql.postgresql_subscription
- postgresql_table:
- redirect: community.postgresql.postgresql_table
- postgresql_tablespace:
- redirect: community.postgresql.postgresql_tablespace
- postgresql_user:
- redirect: community.postgresql.postgresql_user
- postgresql_user_obj_stat_info:
- redirect: community.postgresql.postgresql_user_obj_stat_info
- purefa_facts:
- tombstone:
- removal_version: 3.0.0
- warning_text: Use purestorage.flasharray.purefa_info instead.
- purefb_facts:
- tombstone:
- removal_version: 3.0.0
- warning_text: Use purestorage.flashblade.purefb_info instead.
- python_requirements_facts:
- tombstone:
- removal_version: 3.0.0
- warning_text: Use community.general.python_requirements_info instead.
- redfish_facts:
- tombstone:
- removal_version: 3.0.0
- warning_text: Use community.general.redfish_info instead.
- scaleway_image_facts:
- tombstone:
- removal_version: 3.0.0
- warning_text: Use community.general.scaleway_image_info instead.
- scaleway_ip_facts:
- tombstone:
- removal_version: 3.0.0
- warning_text: Use community.general.scaleway_ip_info instead.
- scaleway_organization_facts:
- tombstone:
- removal_version: 3.0.0
- warning_text: Use community.general.scaleway_organization_info instead.
- scaleway_security_group_facts:
- tombstone:
- removal_version: 3.0.0
- warning_text: Use community.general.scaleway_security_group_info instead.
- scaleway_server_facts:
- tombstone:
- removal_version: 3.0.0
- warning_text: Use community.general.scaleway_server_info instead.
- scaleway_snapshot_facts:
- tombstone:
- removal_version: 3.0.0
- warning_text: Use community.general.scaleway_snapshot_info instead.
- scaleway_volume_facts:
- tombstone:
- removal_version: 3.0.0
- warning_text: Use community.general.scaleway_volume_info instead.
- sf_account_manager:
- tombstone:
- removal_version: 2.0.0
- warning_text: Use netapp.elementsw.na_elementsw_account instead.
- sf_check_connections:
- tombstone:
- removal_version: 2.0.0
- warning_text: Use netapp.elementsw.na_elementsw_check_connections instead.
- sf_snapshot_schedule_manager:
- tombstone:
- removal_version: 2.0.0
- warning_text: Use netapp.elementsw.na_elementsw_snapshot_schedule instead.
- sf_volume_access_group_manager:
- tombstone:
- removal_version: 2.0.0
- warning_text: Use netapp.elementsw.na_elementsw_access_group instead.
- sf_volume_manager:
- tombstone:
- removal_version: 2.0.0
- warning_text: Use netapp.elementsw.na_elementsw_volume instead.
- smartos_image_facts:
- tombstone:
- removal_version: 3.0.0
- warning_text: Use community.general.smartos_image_info instead.
- vertica_facts:
- tombstone:
- removal_version: 3.0.0
- warning_text: Use community.general.vertica_info instead.
- xenserver_guest_facts:
- tombstone:
- removal_version: 3.0.0
- warning_text: Use community.general.xenserver_guest_info instead.
- doc_fragments:
- _gcp:
- redirect: community.google._gcp
- docker:
- redirect: community.docker.docker
- hetzner:
- redirect: community.hrobot.robot
- kubevirt_common_options:
- redirect: community.kubevirt.kubevirt_common_options
- kubevirt_vm_options:
- redirect: community.kubevirt.kubevirt_vm_options
- nios:
- redirect: infoblox.nios_modules.nios
- postgresql:
- redirect: community.postgresql.postgresql
- module_utils:
- docker.common:
- redirect: community.docker.common
- docker.swarm:
- redirect: community.docker.swarm
- gcdns:
- redirect: community.google.gcdns
- gce:
- redirect: community.google.gce
- gcp:
- redirect: community.google.gcp
- hetzner:
- redirect: community.hrobot.robot
- kubevirt:
- redirect: community.kubevirt.kubevirt
- net_tools.nios.api:
- redirect: infoblox.nios_modules.api
- postgresql:
- redirect: community.postgresql.postgresql
- remote_management.dellemc.dellemc_idrac:
- redirect: dellemc.openmanage.dellemc_idrac
- remote_management.dellemc.ome:
- redirect: dellemc.openmanage.ome
- callback:
- actionable:
- tombstone:
- removal_version: 2.0.0
- warning_text: Use the 'default' callback plugin with 'display_skipped_hosts
- = no' and 'display_ok_hosts = no' options.
- full_skip:
- tombstone:
- removal_version: 2.0.0
- warning_text: Use the 'default' callback plugin with 'display_skipped_hosts
- = no' option.
- stderr:
- tombstone:
- removal_version: 2.0.0
- warning_text: Use the 'default' callback plugin with 'display_failed_stderr
- = yes' option.
- inventory:
- docker_machine:
- redirect: community.docker.docker_machine
- docker_swarm:
- redirect: community.docker.docker_swarm
- kubevirt:
- redirect: community.kubevirt.kubevirt
- filter:
- path_join:
- # The ansible.builtin.path_join filter has been added in ansible-base 2.10.
- # Since plugin routing is only available since ansible-base 2.10, this
- # redirect will be used for ansible-base 2.10 or later, and the included
- # path_join filter will be used for Ansible 2.9 or earlier.
- redirect: ansible.builtin.path_join
diff --git a/ansible_collections/community/general/plugins/action/iptables_state.py b/ansible_collections/community/general/plugins/action/iptables_state.py
deleted file mode 120000
index 864608d5..00000000
--- a/ansible_collections/community/general/plugins/action/iptables_state.py
+++ /dev/null
@@ -1 +0,0 @@
-system/iptables_state.py
\ No newline at end of file
diff --git a/ansible_collections/community/general/plugins/action/shutdown.py b/ansible_collections/community/general/plugins/action/shutdown.py
deleted file mode 120000
index 503b1ec0..00000000
--- a/ansible_collections/community/general/plugins/action/shutdown.py
+++ /dev/null
@@ -1 +0,0 @@
-system/shutdown.py
\ No newline at end of file
diff --git a/ansible_collections/community/general/plugins/action/system/iptables_state.py b/ansible_collections/community/general/plugins/action/system/iptables_state.py
deleted file mode 100644
index b8ae1a5d..00000000
--- a/ansible_collections/community/general/plugins/action/system/iptables_state.py
+++ /dev/null
@@ -1,186 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright: (c) 2020, quidame
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-import time
-
-from ansible.plugins.action import ActionBase
-from ansible.errors import AnsibleActionFail, AnsibleConnectionFailure
-from ansible.utils.vars import merge_hash
-from ansible.utils.display import Display
-
-display = Display()
-
-
-class ActionModule(ActionBase):
-
- # Keep internal params away from user interactions
- _VALID_ARGS = frozenset(('path', 'state', 'table', 'noflush', 'counters', 'modprobe', 'ip_version', 'wait'))
- DEFAULT_SUDOABLE = True
-
- MSG_ERROR__ASYNC_AND_POLL_NOT_ZERO = (
- "This module doesn't support async>0 and poll>0 when its 'state' param "
- "is set to 'restored'. To enable its rollback feature (that needs the "
- "module to run asynchronously on the remote), please set task attribute "
- "'poll' (=%s) to 0, and 'async' (=%s) to a value >2 and not greater than "
- "'ansible_timeout' (=%s) (recommended).")
- MSG_WARNING__NO_ASYNC_IS_NO_ROLLBACK = (
- "Attempts to restore iptables state without rollback in case of mistake "
- "may lead the ansible controller to loose access to the hosts and never "
- "regain it before fixing firewall rules through a serial console, or any "
- "other way except SSH. Please set task attribute 'poll' (=%s) to 0, and "
- "'async' (=%s) to a value >2 and not greater than 'ansible_timeout' (=%s) "
- "(recommended).")
- MSG_WARNING__ASYNC_GREATER_THAN_TIMEOUT = (
- "You attempt to restore iptables state with rollback in case of mistake, "
- "but with settings that will lead this rollback to happen AFTER that the "
- "controller will reach its own timeout. Please set task attribute 'poll' "
- "(=%s) to 0, and 'async' (=%s) to a value >2 and not greater than "
- "'ansible_timeout' (=%s) (recommended).")
-
- def _async_result(self, async_status_args, task_vars, timeout):
- '''
- Retrieve results of the asynchonous task, and display them in place of
- the async wrapper results (those with the ansible_job_id key).
- '''
- async_status = self._task.copy()
- async_status.args = async_status_args
- async_status.action = 'ansible.builtin.async_status'
- async_status.async_val = 0
- async_action = self._shared_loader_obj.action_loader.get(
- async_status.action, task=async_status, connection=self._connection,
- play_context=self._play_context, loader=self._loader, templar=self._templar,
- shared_loader_obj=self._shared_loader_obj)
-
- if async_status.args['mode'] == 'cleanup':
- return async_action.run(task_vars=task_vars)
-
- # At least one iteration is required, even if timeout is 0.
- for dummy in range(max(1, timeout)):
- async_result = async_action.run(task_vars=task_vars)
- if async_result.get('finished', 0) == 1:
- break
- time.sleep(min(1, timeout))
-
- return async_result
-
- def run(self, tmp=None, task_vars=None):
-
- self._supports_check_mode = True
- self._supports_async = True
-
- result = super(ActionModule, self).run(tmp, task_vars)
- del tmp # tmp no longer has any effect
-
- if not result.get('skipped'):
-
- # FUTURE: better to let _execute_module calculate this internally?
- wrap_async = self._task.async_val and not self._connection.has_native_async
-
- # Set short names for values we'll have to compare or reuse
- task_poll = self._task.poll
- task_async = self._task.async_val
- check_mode = self._play_context.check_mode
- max_timeout = self._connection._play_context.timeout
- module_args = self._task.args
-
- if module_args.get('state', None) == 'restored':
- if not wrap_async:
- if not check_mode:
- display.warning(self.MSG_WARNING__NO_ASYNC_IS_NO_ROLLBACK % (
- task_poll,
- task_async,
- max_timeout))
- elif task_poll:
- raise AnsibleActionFail(self.MSG_ERROR__ASYNC_AND_POLL_NOT_ZERO % (
- task_poll,
- task_async,
- max_timeout))
- else:
- if task_async > max_timeout and not check_mode:
- display.warning(self.MSG_WARNING__ASYNC_GREATER_THAN_TIMEOUT % (
- task_poll,
- task_async,
- max_timeout))
-
- # inject the async directory based on the shell option into the
- # module args
- async_dir = self.get_shell_option('async_dir', default="~/.ansible_async")
-
- # Bind the loop max duration to consistent values on both
- # remote and local sides (if not the same, make the loop
- # longer on the controller); and set a backup file path.
- module_args['_timeout'] = task_async
- module_args['_back'] = '%s/iptables.state' % async_dir
- async_status_args = dict(mode='status')
- confirm_cmd = 'rm -f %s' % module_args['_back']
- starter_cmd = 'touch %s.starter' % module_args['_back']
- remaining_time = max(task_async, max_timeout)
-
- # do work!
- result = merge_hash(result, self._execute_module(module_args=module_args, task_vars=task_vars, wrap_async=wrap_async))
-
- # Then the 3-steps "go ahead or rollback":
- # 1. Catch early errors of the module (in asynchronous task) if any.
- # Touch a file on the target to signal the module to process now.
- # 2. Reset connection to ensure a persistent one will not be reused.
- # 3. Confirm the restored state by removing the backup on the remote.
- # Retrieve the results of the asynchronous task to return them.
- if '_back' in module_args:
- async_status_args['jid'] = result.get('ansible_job_id', None)
- if async_status_args['jid'] is None:
- raise AnsibleActionFail("Unable to get 'ansible_job_id'.")
-
- # Catch early errors due to missing mandatory option, bad
- # option type/value, missing required system command, etc.
- result = merge_hash(result, self._async_result(async_status_args, task_vars, 0))
-
- # The module is aware to not process the main iptables-restore
- # command before finding (and deleting) the 'starter' cookie on
- # the host, so the previous query will not reach ssh timeout.
- dummy = self._low_level_execute_command(starter_cmd, sudoable=self.DEFAULT_SUDOABLE)
-
- # As the main command is not yet executed on the target, here
- # 'finished' means 'failed before main command be executed'.
- if not result['finished']:
- try:
- self._connection.reset()
- except AttributeError:
- pass
-
- for dummy in range(max_timeout):
- time.sleep(1)
- remaining_time -= 1
- # - AnsibleConnectionFailure covers rejected requests (i.e.
- # by rules with '--jump REJECT')
- # - ansible_timeout is able to cover dropped requests (due
- # to a rule or policy DROP) if not lower than async_val.
- try:
- dummy = self._low_level_execute_command(confirm_cmd, sudoable=self.DEFAULT_SUDOABLE)
- break
- except AnsibleConnectionFailure:
- continue
-
- result = merge_hash(result, self._async_result(async_status_args, task_vars, remaining_time))
-
- # Cleanup async related stuff and internal params
- for key in ('ansible_job_id', 'results_file', 'started', 'finished'):
- if result.get(key):
- del result[key]
-
- if result.get('invocation', {}).get('module_args'):
- for key in ('_back', '_timeout', '_async_dir', 'jid'):
- if result['invocation']['module_args'].get(key):
- del result['invocation']['module_args'][key]
-
- async_status_args['mode'] = 'cleanup'
- dummy = self._async_result(async_status_args, task_vars, 0)
-
- if not wrap_async:
- # remove a temporary path we created
- self._remove_tmp_path(self._connection._shell.tmpdir)
-
- return result
diff --git a/ansible_collections/community/general/plugins/action/system/shutdown.py b/ansible_collections/community/general/plugins/action/system/shutdown.py
deleted file mode 100644
index 19813b08..00000000
--- a/ansible_collections/community/general/plugins/action/system/shutdown.py
+++ /dev/null
@@ -1,212 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright: (c) 2020, Amin Vakil
-# Copyright: (c) 2016-2018, Matt Davis
-# Copyright: (c) 2018, Sam Doran
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-from ansible.errors import AnsibleError, AnsibleConnectionFailure
-from ansible.module_utils.common.text.converters import to_native, to_text
-from ansible.module_utils.common.collections import is_string
-from ansible.plugins.action import ActionBase
-from ansible.utils.display import Display
-
-display = Display()
-
-
-class TimedOutException(Exception):
- pass
-
-
-class ActionModule(ActionBase):
- TRANSFERS_FILES = False
- _VALID_ARGS = frozenset((
- 'msg',
- 'delay',
- 'search_paths'
- ))
-
- DEFAULT_CONNECT_TIMEOUT = None
- DEFAULT_PRE_SHUTDOWN_DELAY = 0
- DEFAULT_SHUTDOWN_MESSAGE = 'Shut down initiated by Ansible'
- DEFAULT_SHUTDOWN_COMMAND = 'shutdown'
- DEFAULT_SHUTDOWN_COMMAND_ARGS = '-h {delay_min} "{message}"'
- DEFAULT_SUDOABLE = True
-
- SHUTDOWN_COMMANDS = {
- 'alpine': 'poweroff',
- 'vmkernel': 'halt',
- }
-
- SHUTDOWN_COMMAND_ARGS = {
- 'alpine': '',
- 'void': '-h +{delay_min} "{message}"',
- 'freebsd': '-h +{delay_sec}s "{message}"',
- 'linux': DEFAULT_SHUTDOWN_COMMAND_ARGS,
- 'macosx': '-h +{delay_min} "{message}"',
- 'openbsd': '-h +{delay_min} "{message}"',
- 'solaris': '-y -g {delay_sec} -i 5 "{message}"',
- 'sunos': '-y -g {delay_sec} -i 5 "{message}"',
- 'vmkernel': '-d {delay_sec}',
- 'aix': '-Fh',
- }
-
- def __init__(self, *args, **kwargs):
- super(ActionModule, self).__init__(*args, **kwargs)
-
- @property
- def delay(self):
- return self._check_delay('delay', self.DEFAULT_PRE_SHUTDOWN_DELAY)
-
- def _check_delay(self, key, default):
- """Ensure that the value is positive or zero"""
- value = int(self._task.args.get(key, default))
- if value < 0:
- value = 0
- return value
-
- def _get_value_from_facts(self, variable_name, distribution, default_value):
- """Get dist+version specific args first, then distribution, then family, lastly use default"""
- attr = getattr(self, variable_name)
- value = attr.get(
- distribution['name'] + distribution['version'],
- attr.get(
- distribution['name'],
- attr.get(
- distribution['family'],
- getattr(self, default_value))))
- return value
-
- def get_shutdown_command_args(self, distribution):
- args = self._get_value_from_facts('SHUTDOWN_COMMAND_ARGS', distribution, 'DEFAULT_SHUTDOWN_COMMAND_ARGS')
- # Convert seconds to minutes. If less that 60, set it to 0.
- delay_sec = self.delay
- shutdown_message = self._task.args.get('msg', self.DEFAULT_SHUTDOWN_MESSAGE)
- return args.format(delay_sec=delay_sec, delay_min=delay_sec // 60, message=shutdown_message)
-
- def get_distribution(self, task_vars):
- # FIXME: only execute the module if we don't already have the facts we need
- distribution = {}
- display.debug('{action}: running setup module to get distribution'.format(action=self._task.action))
- module_output = self._execute_module(
- task_vars=task_vars,
- module_name='ansible.legacy.setup',
- module_args={'gather_subset': 'min'})
- try:
- if module_output.get('failed', False):
- raise AnsibleError('Failed to determine system distribution. {0}, {1}'.format(
- to_native(module_output['module_stdout']).strip(),
- to_native(module_output['module_stderr']).strip()))
- distribution['name'] = module_output['ansible_facts']['ansible_distribution'].lower()
- distribution['version'] = to_text(module_output['ansible_facts']['ansible_distribution_version'].split('.')[0])
- distribution['family'] = to_text(module_output['ansible_facts']['ansible_os_family'].lower())
- display.debug("{action}: distribution: {dist}".format(action=self._task.action, dist=distribution))
- return distribution
- except KeyError as ke:
- raise AnsibleError('Failed to get distribution information. Missing "{0}" in output.'.format(ke.args[0]))
-
- def get_shutdown_command(self, task_vars, distribution):
- shutdown_bin = self._get_value_from_facts('SHUTDOWN_COMMANDS', distribution, 'DEFAULT_SHUTDOWN_COMMAND')
- default_search_paths = ['/sbin', '/usr/sbin', '/usr/local/sbin']
- search_paths = self._task.args.get('search_paths', default_search_paths)
-
- # FIXME: switch all this to user arg spec validation methods when they are available
- # Convert bare strings to a list
- if is_string(search_paths):
- search_paths = [search_paths]
-
- # Error if we didn't get a list
- err_msg = "'search_paths' must be a string or flat list of strings, got {0}"
- try:
- incorrect_type = any(not is_string(x) for x in search_paths)
- if not isinstance(search_paths, list) or incorrect_type:
- raise TypeError
- except TypeError:
- raise AnsibleError(err_msg.format(search_paths))
-
- display.debug('{action}: running find module looking in {paths} to get path for "{command}"'.format(
- action=self._task.action,
- command=shutdown_bin,
- paths=search_paths))
- find_result = self._execute_module(
- task_vars=task_vars,
- # prevent collection search by calling with ansible.legacy (still allows library/ override of find)
- module_name='ansible.legacy.find',
- module_args={
- 'paths': search_paths,
- 'patterns': [shutdown_bin],
- 'file_type': 'any'
- }
- )
-
- full_path = [x['path'] for x in find_result['files']]
- if not full_path:
- raise AnsibleError('Unable to find command "{0}" in search paths: {1}'.format(shutdown_bin, search_paths))
- self._shutdown_command = full_path[0]
- return self._shutdown_command
-
- def perform_shutdown(self, task_vars, distribution):
- result = {}
- shutdown_result = {}
- shutdown_command = self.get_shutdown_command(task_vars, distribution)
- shutdown_command_args = self.get_shutdown_command_args(distribution)
- shutdown_command_exec = '{0} {1}'.format(shutdown_command, shutdown_command_args)
-
- self.cleanup(force=True)
- try:
- display.vvv("{action}: shutting down server...".format(action=self._task.action))
- display.debug("{action}: shutting down server with command '{command}'".format(action=self._task.action, command=shutdown_command_exec))
- if self._play_context.check_mode:
- shutdown_result['rc'] = 0
- else:
- shutdown_result = self._low_level_execute_command(shutdown_command_exec, sudoable=self.DEFAULT_SUDOABLE)
- except AnsibleConnectionFailure as e:
- # If the connection is closed too quickly due to the system being shutdown, carry on
- display.debug('{action}: AnsibleConnectionFailure caught and handled: {error}'.format(action=self._task.action, error=to_text(e)))
- shutdown_result['rc'] = 0
-
- if shutdown_result['rc'] != 0:
- result['failed'] = True
- result['shutdown'] = False
- result['msg'] = "Shutdown command failed. Error was {stdout}, {stderr}".format(
- stdout=to_native(shutdown_result['stdout'].strip()),
- stderr=to_native(shutdown_result['stderr'].strip()))
- return result
-
- result['failed'] = False
- result['shutdown_command'] = shutdown_command_exec
- return result
-
- def run(self, tmp=None, task_vars=None):
- self._supports_check_mode = True
- self._supports_async = True
-
- # If running with local connection, fail so we don't shutdown ourself
- if self._connection.transport == 'local' and (not self._play_context.check_mode):
- msg = 'Running {0} with local connection would shutdown the control node.'.format(self._task.action)
- return {'changed': False, 'elapsed': 0, 'shutdown': False, 'failed': True, 'msg': msg}
-
- if task_vars is None:
- task_vars = {}
-
- result = super(ActionModule, self).run(tmp, task_vars)
-
- if result.get('skipped', False) or result.get('failed', False):
- return result
-
- distribution = self.get_distribution(task_vars)
-
- # Initiate shutdown
- shutdown_result = self.perform_shutdown(task_vars, distribution)
-
- if shutdown_result['failed']:
- result = shutdown_result
- return result
-
- result['shutdown'] = True
- result['changed'] = True
- result['shutdown_command'] = shutdown_result['shutdown_command']
-
- return result
diff --git a/ansible_collections/community/general/plugins/become/machinectl.py b/ansible_collections/community/general/plugins/become/machinectl.py
deleted file mode 100644
index aebb0891..00000000
--- a/ansible_collections/community/general/plugins/become/machinectl.py
+++ /dev/null
@@ -1,88 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright: (c) 2018, Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-DOCUMENTATION = '''
- name: machinectl
- short_description: Systemd's machinectl privilege escalation
- description:
- - This become plugins allows your remote/login user to execute commands as another user via the machinectl utility.
- author: Ansible Core Team
- options:
- become_user:
- description: User you 'become' to execute the task
- default: ''
- ini:
- - section: privilege_escalation
- key: become_user
- - section: machinectl_become_plugin
- key: user
- vars:
- - name: ansible_become_user
- - name: ansible_machinectl_user
- env:
- - name: ANSIBLE_BECOME_USER
- - name: ANSIBLE_MACHINECTL_USER
- become_exe:
- description: Machinectl executable
- default: machinectl
- ini:
- - section: privilege_escalation
- key: become_exe
- - section: machinectl_become_plugin
- key: executable
- vars:
- - name: ansible_become_exe
- - name: ansible_machinectl_exe
- env:
- - name: ANSIBLE_BECOME_EXE
- - name: ANSIBLE_MACHINECTL_EXE
- become_flags:
- description: Options to pass to machinectl
- default: ''
- ini:
- - section: privilege_escalation
- key: become_flags
- - section: machinectl_become_plugin
- key: flags
- vars:
- - name: ansible_become_flags
- - name: ansible_machinectl_flags
- env:
- - name: ANSIBLE_BECOME_FLAGS
- - name: ANSIBLE_MACHINECTL_FLAGS
- become_pass:
- description: Password for machinectl
- required: False
- vars:
- - name: ansible_become_password
- - name: ansible_become_pass
- - name: ansible_machinectl_pass
- env:
- - name: ANSIBLE_BECOME_PASS
- - name: ANSIBLE_MACHINECTL_PASS
- ini:
- - section: machinectl_become_plugin
- key: password
-'''
-
-from ansible.plugins.become import BecomeBase
-
-
-class BecomeModule(BecomeBase):
-
- name = 'community.general.machinectl'
-
- def build_become_command(self, cmd, shell):
- super(BecomeModule, self).build_become_command(cmd, shell)
-
- if not cmd:
- return cmd
-
- become = self.get_option('become_exe')
-
- flags = self.get_option('become_flags')
- user = self.get_option('become_user')
- return '%s -q shell %s %s@ %s' % (become, flags, user, cmd)
diff --git a/ansible_collections/community/general/plugins/cache/redis.py b/ansible_collections/community/general/plugins/cache/redis.py
deleted file mode 100644
index 3c73d8b5..00000000
--- a/ansible_collections/community/general/plugins/cache/redis.py
+++ /dev/null
@@ -1,243 +0,0 @@
-# -*- coding: utf-8 -*-
-# (c) 2014, Brian Coca, Josh Drake, et al
-# (c) 2017 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-DOCUMENTATION = '''
- author: Unknown (!UNKNOWN)
- name: redis
- short_description: Use Redis DB for cache
- description:
- - This cache uses JSON formatted, per host records saved in Redis.
- requirements:
- - redis>=2.4.5 (python lib)
- options:
- _uri:
- description:
- - A colon separated string of connection information for Redis.
- - The format is C(host:port:db:password), for example C(localhost:6379:0:changeme).
- - To use encryption in transit, prefix the connection with C(tls://), as in C(tls://localhost:6379:0:changeme).
- - To use redis sentinel, use separator C(;), for example C(localhost:26379;localhost:26379;0:changeme). Requires redis>=2.9.0.
- required: True
- env:
- - name: ANSIBLE_CACHE_PLUGIN_CONNECTION
- ini:
- - key: fact_caching_connection
- section: defaults
- _prefix:
- description: User defined prefix to use when creating the DB entries
- default: ansible_facts
- env:
- - name: ANSIBLE_CACHE_PLUGIN_PREFIX
- ini:
- - key: fact_caching_prefix
- section: defaults
- _keyset_name:
- description: User defined name for cache keyset name.
- default: ansible_cache_keys
- env:
- - name: ANSIBLE_CACHE_REDIS_KEYSET_NAME
- ini:
- - key: fact_caching_redis_keyset_name
- section: defaults
- version_added: 1.3.0
- _sentinel_service_name:
- description: The redis sentinel service name (or referenced as cluster name).
- env:
- - name: ANSIBLE_CACHE_REDIS_SENTINEL
- ini:
- - key: fact_caching_redis_sentinel
- section: defaults
- version_added: 1.3.0
- _timeout:
- default: 86400
- description: Expiration timeout in seconds for the cache plugin data. Set to 0 to never expire
- env:
- - name: ANSIBLE_CACHE_PLUGIN_TIMEOUT
- ini:
- - key: fact_caching_timeout
- section: defaults
- type: integer
-'''
-
-import re
-import time
-import json
-
-from ansible import constants as C
-from ansible.errors import AnsibleError
-from ansible.module_utils.common.text.converters import to_native
-from ansible.parsing.ajson import AnsibleJSONEncoder, AnsibleJSONDecoder
-from ansible.plugins.cache import BaseCacheModule
-from ansible.release import __version__ as ansible_base_version
-from ansible.utils.display import Display
-
-try:
- from redis import StrictRedis, VERSION
- HAS_REDIS = True
-except ImportError:
- HAS_REDIS = False
-
-display = Display()
-
-
-class CacheModule(BaseCacheModule):
- """
- A caching module backed by redis.
-
- Keys are maintained in a zset with their score being the timestamp
- when they are inserted. This allows for the usage of 'zremrangebyscore'
- to expire keys. This mechanism is used or a pattern matched 'scan' for
- performance.
- """
- _sentinel_service_name = None
- re_url_conn = re.compile(r'^([^:]+|\[[^]]+\]):(\d+):(\d+)(?::(.*))?$')
- re_sent_conn = re.compile(r'^(.*):(\d+)$')
-
- def __init__(self, *args, **kwargs):
- uri = ''
-
- try:
- super(CacheModule, self).__init__(*args, **kwargs)
- if self.get_option('_uri'):
- uri = self.get_option('_uri')
- self._timeout = float(self.get_option('_timeout'))
- self._prefix = self.get_option('_prefix')
- self._keys_set = self.get_option('_keyset_name')
- self._sentinel_service_name = self.get_option('_sentinel_service_name')
- except KeyError:
- # TODO: remove once we no longer support Ansible 2.9
- if not ansible_base_version.startswith('2.9.'):
- raise AnsibleError("Do not import CacheModules directly. Use ansible.plugins.loader.cache_loader instead.")
- if C.CACHE_PLUGIN_CONNECTION:
- uri = C.CACHE_PLUGIN_CONNECTION
- self._timeout = float(C.CACHE_PLUGIN_TIMEOUT)
- self._prefix = C.CACHE_PLUGIN_PREFIX
- self._keys_set = 'ansible_cache_keys'
-
- if not HAS_REDIS:
- raise AnsibleError("The 'redis' python module (version 2.4.5 or newer) is required for the redis fact cache, 'pip install redis'")
-
- self._cache = {}
- kw = {}
-
- # tls connection
- tlsprefix = 'tls://'
- if uri.startswith(tlsprefix):
- kw['ssl'] = True
- uri = uri[len(tlsprefix):]
-
- # redis sentinel connection
- if self._sentinel_service_name:
- self._db = self._get_sentinel_connection(uri, kw)
- # normal connection
- else:
- connection = self._parse_connection(self.re_url_conn, uri)
- self._db = StrictRedis(*connection, **kw)
-
- display.vv('Redis connection: %s' % self._db)
-
- @staticmethod
- def _parse_connection(re_patt, uri):
- match = re_patt.match(uri)
- if not match:
- raise AnsibleError("Unable to parse connection string")
- return match.groups()
-
- def _get_sentinel_connection(self, uri, kw):
- """
- get sentinel connection details from _uri
- """
- try:
- from redis.sentinel import Sentinel
- except ImportError:
- raise AnsibleError("The 'redis' python module (version 2.9.0 or newer) is required to use redis sentinel.")
-
- if ';' not in uri:
- raise AnsibleError('_uri does not have sentinel syntax.')
-
- # format: "localhost:26379;localhost2:26379;0:changeme"
- connections = uri.split(';')
- connection_args = connections.pop(-1)
- if len(connection_args) > 0: # hanle if no db nr is given
- connection_args = connection_args.split(':')
- kw['db'] = connection_args.pop(0)
- try:
- kw['password'] = connection_args.pop(0)
- except IndexError:
- pass # password is optional
-
- sentinels = [self._parse_connection(self.re_sent_conn, shost) for shost in connections]
- display.vv('\nUsing redis sentinels: %s' % sentinels)
- scon = Sentinel(sentinels, **kw)
- try:
- return scon.master_for(self._sentinel_service_name, socket_timeout=0.2)
- except Exception as exc:
- raise AnsibleError('Could not connect to redis sentinel: %s' % to_native(exc))
-
- def _make_key(self, key):
- return self._prefix + key
-
- def get(self, key):
-
- if key not in self._cache:
- value = self._db.get(self._make_key(key))
- # guard against the key not being removed from the zset;
- # this could happen in cases where the timeout value is changed
- # between invocations
- if value is None:
- self.delete(key)
- raise KeyError
- self._cache[key] = json.loads(value, cls=AnsibleJSONDecoder)
-
- return self._cache.get(key)
-
- def set(self, key, value):
-
- value2 = json.dumps(value, cls=AnsibleJSONEncoder, sort_keys=True, indent=4)
- if self._timeout > 0: # a timeout of 0 is handled as meaning 'never expire'
- self._db.setex(self._make_key(key), int(self._timeout), value2)
- else:
- self._db.set(self._make_key(key), value2)
-
- if VERSION[0] == 2:
- self._db.zadd(self._keys_set, time.time(), key)
- else:
- self._db.zadd(self._keys_set, {key: time.time()})
- self._cache[key] = value
-
- def _expire_keys(self):
- if self._timeout > 0:
- expiry_age = time.time() - self._timeout
- self._db.zremrangebyscore(self._keys_set, 0, expiry_age)
-
- def keys(self):
- self._expire_keys()
- return self._db.zrange(self._keys_set, 0, -1)
-
- def contains(self, key):
- self._expire_keys()
- return (self._db.zrank(self._keys_set, key) is not None)
-
- def delete(self, key):
- if key in self._cache:
- del self._cache[key]
- self._db.delete(self._make_key(key))
- self._db.zrem(self._keys_set, key)
-
- def flush(self):
- for key in list(self.keys()):
- self.delete(key)
-
- def copy(self):
- # TODO: there is probably a better way to do this in redis
- ret = dict([(k, self.get(k)) for k in self.keys()])
- return ret
-
- def __getstate__(self):
- return dict()
-
- def __setstate__(self, data):
- self.__init__()
diff --git a/ansible_collections/community/general/plugins/cache/yaml.py b/ansible_collections/community/general/plugins/cache/yaml.py
deleted file mode 100644
index e5062b16..00000000
--- a/ansible_collections/community/general/plugins/cache/yaml.py
+++ /dev/null
@@ -1,65 +0,0 @@
-# -*- coding: utf-8 -*-
-# (c) 2017, Brian Coca
-# (c) 2017 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-DOCUMENTATION = '''
- name: yaml
- short_description: YAML formatted files.
- description:
- - This cache uses YAML formatted, per host, files saved to the filesystem.
- author: Brian Coca (@bcoca)
- options:
- _uri:
- required: True
- description:
- - Path in which the cache plugin will save the files
- env:
- - name: ANSIBLE_CACHE_PLUGIN_CONNECTION
- ini:
- - key: fact_caching_connection
- section: defaults
- _prefix:
- description: User defined prefix to use when creating the files
- env:
- - name: ANSIBLE_CACHE_PLUGIN_PREFIX
- ini:
- - key: fact_caching_prefix
- section: defaults
- _timeout:
- default: 86400
- description: Expiration timeout in seconds for the cache plugin data. Set to 0 to never expire
- env:
- - name: ANSIBLE_CACHE_PLUGIN_TIMEOUT
- ini:
- - key: fact_caching_timeout
- section: defaults
- type: integer
-'''
-
-
-import codecs
-
-import yaml
-
-from ansible.parsing.yaml.loader import AnsibleLoader
-from ansible.parsing.yaml.dumper import AnsibleDumper
-from ansible.plugins.cache import BaseFileCacheModule
-
-
-class CacheModule(BaseFileCacheModule):
- """
- A caching module backed by yaml files.
- """
-
- def _load(self, filepath):
- with codecs.open(filepath, 'r', encoding='utf-8') as f:
- return AnsibleLoader(f).get_single_data()
-
- def _dump(self, value, filepath):
- with codecs.open(filepath, 'w', encoding='utf-8') as f:
- yaml.dump(value, f, Dumper=AnsibleDumper, default_flow_style=False)
diff --git a/ansible_collections/community/general/plugins/callback/hipchat.py b/ansible_collections/community/general/plugins/callback/hipchat.py
deleted file mode 100644
index c64b892d..00000000
--- a/ansible_collections/community/general/plugins/callback/hipchat.py
+++ /dev/null
@@ -1,228 +0,0 @@
-# -*- coding: utf-8 -*-
-# (C) 2014, Matt Martz
-# (c) 2017 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-DOCUMENTATION = '''
- author: Unknown (!UNKNOWN)
- name: hipchat
- type: notification
- requirements:
- - whitelist in configuration.
- - prettytable (python lib)
- short_description: post task events to hipchat
- description:
- - This callback plugin sends status updates to a HipChat channel during playbook execution.
- - Before 2.4 only environment variables were available for configuring this plugin.
- options:
- token:
- description: HipChat API token for v1 or v2 API.
- required: True
- env:
- - name: HIPCHAT_TOKEN
- ini:
- - section: callback_hipchat
- key: token
- api_version:
- description: HipChat API version, v1 or v2.
- required: False
- default: v1
- env:
- - name: HIPCHAT_API_VERSION
- ini:
- - section: callback_hipchat
- key: api_version
- room:
- description: HipChat room to post in.
- default: ansible
- env:
- - name: HIPCHAT_ROOM
- ini:
- - section: callback_hipchat
- key: room
- from:
- description: Name to post as
- default: ansible
- env:
- - name: HIPCHAT_FROM
- ini:
- - section: callback_hipchat
- key: from
- notify:
- description: Add notify flag to important messages
- type: bool
- default: True
- env:
- - name: HIPCHAT_NOTIFY
- ini:
- - section: callback_hipchat
- key: notify
-
-'''
-
-import os
-import json
-
-try:
- import prettytable
- HAS_PRETTYTABLE = True
-except ImportError:
- HAS_PRETTYTABLE = False
-
-from ansible.plugins.callback import CallbackBase
-from ansible.module_utils.six.moves.urllib.parse import urlencode
-from ansible.module_utils.urls import open_url
-
-
-class CallbackModule(CallbackBase):
- """This is an example ansible callback plugin that sends status
- updates to a HipChat channel during playbook execution.
- """
-
- CALLBACK_VERSION = 2.0
- CALLBACK_TYPE = 'notification'
- CALLBACK_NAME = 'community.general.hipchat'
- CALLBACK_NEEDS_WHITELIST = True
-
- API_V1_URL = 'https://api.hipchat.com/v1/rooms/message'
- API_V2_URL = 'https://api.hipchat.com/v2/'
-
- def __init__(self):
-
- super(CallbackModule, self).__init__()
-
- if not HAS_PRETTYTABLE:
- self.disabled = True
- self._display.warning('The `prettytable` python module is not installed. '
- 'Disabling the HipChat callback plugin.')
- self.printed_playbook = False
- self.playbook_name = None
- self.play = None
-
- def set_options(self, task_keys=None, var_options=None, direct=None):
- super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
-
- self.token = self.get_option('token')
- self.api_version = self.get_option('api_version')
- self.from_name = self.get_option('from')
- self.allow_notify = self.get_option('notify')
- self.room = self.get_option('room')
-
- if self.token is None:
- self.disabled = True
- self._display.warning('HipChat token could not be loaded. The HipChat '
- 'token can be provided using the `HIPCHAT_TOKEN` '
- 'environment variable.')
-
- # Pick the request handler.
- if self.api_version == 'v2':
- self.send_msg = self.send_msg_v2
- else:
- self.send_msg = self.send_msg_v1
-
- def send_msg_v2(self, msg, msg_format='text', color='yellow', notify=False):
- """Method for sending a message to HipChat"""
-
- headers = {'Authorization': 'Bearer %s' % self.token, 'Content-Type': 'application/json'}
-
- body = {}
- body['room_id'] = self.room
- body['from'] = self.from_name[:15] # max length is 15
- body['message'] = msg
- body['message_format'] = msg_format
- body['color'] = color
- body['notify'] = self.allow_notify and notify
-
- data = json.dumps(body)
- url = self.API_V2_URL + "room/{room_id}/notification".format(room_id=self.room)
- try:
- response = open_url(url, data=data, headers=headers, method='POST')
- return response.read()
- except Exception as ex:
- self._display.warning('Could not submit message to hipchat: {0}'.format(ex))
-
- def send_msg_v1(self, msg, msg_format='text', color='yellow', notify=False):
- """Method for sending a message to HipChat"""
-
- params = {}
- params['room_id'] = self.room
- params['from'] = self.from_name[:15] # max length is 15
- params['message'] = msg
- params['message_format'] = msg_format
- params['color'] = color
- params['notify'] = int(self.allow_notify and notify)
-
- url = ('%s?auth_token=%s' % (self.API_V1_URL, self.token))
- try:
- response = open_url(url, data=urlencode(params))
- return response.read()
- except Exception as ex:
- self._display.warning('Could not submit message to hipchat: {0}'.format(ex))
-
- def v2_playbook_on_play_start(self, play):
- """Display Playbook and play start messages"""
-
- self.play = play
- name = play.name
- # This block sends information about a playbook when it starts
- # The playbook object is not immediately available at
- # playbook_on_start so we grab it via the play
- #
- # Displays info about playbook being started by a person on an
- # inventory, as well as Tags, Skip Tags and Limits
- if not self.printed_playbook:
- self.playbook_name, dummy = os.path.splitext(os.path.basename(self.play.playbook.filename))
- host_list = self.play.playbook.inventory.host_list
- inventory = os.path.basename(os.path.realpath(host_list))
- self.send_msg("%s: Playbook initiated by %s against %s" %
- (self.playbook_name,
- self.play.playbook.remote_user,
- inventory), notify=True)
- self.printed_playbook = True
- subset = self.play.playbook.inventory._subset
- skip_tags = self.play.playbook.skip_tags
- self.send_msg("%s:\nTags: %s\nSkip Tags: %s\nLimit: %s" %
- (self.playbook_name,
- ', '.join(self.play.playbook.only_tags),
- ', '.join(skip_tags) if skip_tags else None,
- ', '.join(subset) if subset else subset))
-
- # This is where we actually say we are starting a play
- self.send_msg("%s: Starting play: %s" %
- (self.playbook_name, name))
-
- def playbook_on_stats(self, stats):
- """Display info about playbook statistics"""
- hosts = sorted(stats.processed.keys())
-
- t = prettytable.PrettyTable(['Host', 'Ok', 'Changed', 'Unreachable',
- 'Failures'])
-
- failures = False
- unreachable = False
-
- for h in hosts:
- s = stats.summarize(h)
-
- if s['failures'] > 0:
- failures = True
- if s['unreachable'] > 0:
- unreachable = True
-
- t.add_row([h] + [s[k] for k in ['ok', 'changed', 'unreachable',
- 'failures']])
-
- self.send_msg("%s: Playbook complete" % self.playbook_name,
- notify=True)
-
- if failures or unreachable:
- color = 'red'
- self.send_msg("%s: Failures detected" % self.playbook_name,
- color=color, notify=True)
- else:
- color = 'green'
-
- self.send_msg("/code %s:\n%s" % (self.playbook_name, t), color=color)
diff --git a/ansible_collections/community/general/plugins/callback/jabber.py b/ansible_collections/community/general/plugins/callback/jabber.py
deleted file mode 100644
index b535fa95..00000000
--- a/ansible_collections/community/general/plugins/callback/jabber.py
+++ /dev/null
@@ -1,119 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright (C) 2016 maxn nikolaev.makc@gmail.com
-# Copyright (c) 2017 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-DOCUMENTATION = '''
- author: Unknown (!UNKNOWN)
- name: jabber
- type: notification
- short_description: post task events to a jabber server
- description:
- - The chatty part of ChatOps with a Hipchat server as a target
- - This callback plugin sends status updates to a HipChat channel during playbook execution.
- requirements:
- - xmpp (python lib https://github.com/ArchipelProject/xmpppy)
- options:
- server:
- description: connection info to jabber server
- required: True
- env:
- - name: JABBER_SERV
- user:
- description: Jabber user to authenticate as
- required: True
- env:
- - name: JABBER_USER
- password:
- description: Password for the user to the jabber server
- required: True
- env:
- - name: JABBER_PASS
- to:
- description: chat identifier that will receive the message
- required: True
- env:
- - name: JABBER_TO
-'''
-
-import os
-
-HAS_XMPP = True
-try:
- import xmpp
-except ImportError:
- HAS_XMPP = False
-
-from ansible.plugins.callback import CallbackBase
-
-
-class CallbackModule(CallbackBase):
-
- CALLBACK_VERSION = 2.0
- CALLBACK_TYPE = 'notification'
- CALLBACK_NAME = 'community.general.jabber'
- CALLBACK_NEEDS_WHITELIST = True
-
- def __init__(self, display=None):
-
- super(CallbackModule, self).__init__(display=display)
-
- if not HAS_XMPP:
- self._display.warning("The required python xmpp library (xmpppy) is not installed. "
- "pip install git+https://github.com/ArchipelProject/xmpppy")
- self.disabled = True
-
- self.serv = os.getenv('JABBER_SERV')
- self.j_user = os.getenv('JABBER_USER')
- self.j_pass = os.getenv('JABBER_PASS')
- self.j_to = os.getenv('JABBER_TO')
-
- if (self.j_user or self.j_pass or self.serv or self.j_to) is None:
- self.disabled = True
- self._display.warning('Jabber CallBack wants the JABBER_SERV, JABBER_USER, JABBER_PASS and JABBER_TO environment variables')
-
- def send_msg(self, msg):
- """Send message"""
- jid = xmpp.JID(self.j_user)
- client = xmpp.Client(self.serv, debug=[])
- client.connect(server=(self.serv, 5222))
- client.auth(jid.getNode(), self.j_pass, resource=jid.getResource())
- message = xmpp.Message(self.j_to, msg)
- message.setAttr('type', 'chat')
- client.send(message)
- client.disconnect()
-
- def v2_runner_on_ok(self, result):
- self._clean_results(result._result, result._task.action)
- self.debug = self._dump_results(result._result)
-
- def v2_playbook_on_task_start(self, task, is_conditional):
- self.task = task
-
- def v2_playbook_on_play_start(self, play):
- """Display Playbook and play start messages"""
- self.play = play
- name = play.name
- self.send_msg("Ansible starting play: %s" % (name))
-
- def playbook_on_stats(self, stats):
- name = self.play
- hosts = sorted(stats.processed.keys())
- failures = False
- unreachable = False
- for h in hosts:
- s = stats.summarize(h)
- if s['failures'] > 0:
- failures = True
- if s['unreachable'] > 0:
- unreachable = True
-
- if failures or unreachable:
- out = self.debug
- self.send_msg("%s: Failures detected \n%s \nHost: %s\n Failed at:\n%s" % (name, self.task, h, out))
- else:
- out = self.debug
- self.send_msg("Great! \n Playbook %s completed:\n%s \n Last task debug:\n %s" % (name, s, out))
diff --git a/ansible_collections/community/general/plugins/callback/logentries.py b/ansible_collections/community/general/plugins/callback/logentries.py
deleted file mode 100644
index ad71a6d4..00000000
--- a/ansible_collections/community/general/plugins/callback/logentries.py
+++ /dev/null
@@ -1,331 +0,0 @@
-# -*- coding: utf-8 -*-
-# (c) 2015, Logentries.com, Jimmy Tang
-# (c) 2017 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-DOCUMENTATION = '''
- author: Unknown (!UNKNOWN)
- name: logentries
- type: notification
- short_description: Sends events to Logentries
- description:
- - This callback plugin will generate JSON objects and send them to Logentries via TCP for auditing/debugging purposes.
- - Before 2.4, if you wanted to use an ini configuration, the file must be placed in the same directory as this plugin and named logentries.ini
- - In 2.4 and above you can just put it in the main Ansible configuration file.
- requirements:
- - whitelisting in configuration
- - certifi (python library)
- - flatdict (python library), if you want to use the 'flatten' option
- options:
- api:
- description: URI to the Logentries API
- env:
- - name: LOGENTRIES_API
- default: data.logentries.com
- ini:
- - section: callback_logentries
- key: api
- port:
- description: HTTP port to use when connecting to the API
- env:
- - name: LOGENTRIES_PORT
- default: 80
- ini:
- - section: callback_logentries
- key: port
- tls_port:
- description: Port to use when connecting to the API when TLS is enabled
- env:
- - name: LOGENTRIES_TLS_PORT
- default: 443
- ini:
- - section: callback_logentries
- key: tls_port
- token:
- description: The logentries "TCP token"
- env:
- - name: LOGENTRIES_ANSIBLE_TOKEN
- required: True
- ini:
- - section: callback_logentries
- key: token
- use_tls:
- description:
- - Toggle to decide whether to use TLS to encrypt the communications with the API server
- env:
- - name: LOGENTRIES_USE_TLS
- default: False
- type: boolean
- ini:
- - section: callback_logentries
- key: use_tls
- flatten:
- description: flatten complex data structures into a single dictionary with complex keys
- type: boolean
- default: False
- env:
- - name: LOGENTRIES_FLATTEN
- ini:
- - section: callback_logentries
- key: flatten
-'''
-
-EXAMPLES = '''
-examples: >
- To enable, add this to your ansible.cfg file in the defaults block
-
- [defaults]
- callback_whitelist = community.general.logentries
-
- Either set the environment variables
- export LOGENTRIES_API=data.logentries.com
- export LOGENTRIES_PORT=10000
- export LOGENTRIES_ANSIBLE_TOKEN=dd21fc88-f00a-43ff-b977-e3a4233c53af
-
- Or in the main Ansible config file
- [callback_logentries]
- api = data.logentries.com
- port = 10000
- tls_port = 20000
- use_tls = no
- token = dd21fc88-f00a-43ff-b977-e3a4233c53af
- flatten = False
-'''
-
-import os
-import socket
-import random
-import time
-import uuid
-
-try:
- import certifi
- HAS_CERTIFI = True
-except ImportError:
- HAS_CERTIFI = False
-
-try:
- import flatdict
- HAS_FLATDICT = True
-except ImportError:
- HAS_FLATDICT = False
-
-from ansible.module_utils.common.text.converters import to_bytes, to_text
-from ansible.plugins.callback import CallbackBase
-
-# Todo:
-# * Better formatting of output before sending out to logentries data/api nodes.
-
-
-class PlainTextSocketAppender(object):
- def __init__(self, display, LE_API='data.logentries.com', LE_PORT=80, LE_TLS_PORT=443):
-
- self.LE_API = LE_API
- self.LE_PORT = LE_PORT
- self.LE_TLS_PORT = LE_TLS_PORT
- self.MIN_DELAY = 0.1
- self.MAX_DELAY = 10
- # Error message displayed when an incorrect Token has been detected
- self.INVALID_TOKEN = "\n\nIt appears the LOGENTRIES_TOKEN parameter you entered is incorrect!\n\n"
- # Unicode Line separator character \u2028
- self.LINE_SEP = u'\u2028'
-
- self._display = display
- self._conn = None
-
- def open_connection(self):
- self._conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- self._conn.connect((self.LE_API, self.LE_PORT))
-
- def reopen_connection(self):
- self.close_connection()
-
- root_delay = self.MIN_DELAY
- while True:
- try:
- self.open_connection()
- return
- except Exception as e:
- self._display.vvvv(u"Unable to connect to Logentries: %s" % to_text(e))
-
- root_delay *= 2
- if root_delay > self.MAX_DELAY:
- root_delay = self.MAX_DELAY
-
- wait_for = root_delay + random.uniform(0, root_delay)
-
- try:
- self._display.vvvv("sleeping %s before retry" % wait_for)
- time.sleep(wait_for)
- except KeyboardInterrupt:
- raise
-
- def close_connection(self):
- if self._conn is not None:
- self._conn.close()
-
- def put(self, data):
- # Replace newlines with Unicode line separator
- # for multi-line events
- data = to_text(data, errors='surrogate_or_strict')
- multiline = data.replace(u'\n', self.LINE_SEP)
- multiline += u"\n"
- # Send data, reconnect if needed
- while True:
- try:
- self._conn.send(to_bytes(multiline, errors='surrogate_or_strict'))
- except socket.error:
- self.reopen_connection()
- continue
- break
-
- self.close_connection()
-
-
-try:
- import ssl
- HAS_SSL = True
-except ImportError: # for systems without TLS support.
- SocketAppender = PlainTextSocketAppender
- HAS_SSL = False
-else:
-
- class TLSSocketAppender(PlainTextSocketAppender):
- def open_connection(self):
- sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- sock = ssl.wrap_socket(
- sock=sock,
- keyfile=None,
- certfile=None,
- server_side=False,
- cert_reqs=ssl.CERT_REQUIRED,
- ssl_version=getattr(
- ssl, 'PROTOCOL_TLSv1_2', ssl.PROTOCOL_TLSv1),
- ca_certs=certifi.where(),
- do_handshake_on_connect=True,
- suppress_ragged_eofs=True, )
- sock.connect((self.LE_API, self.LE_TLS_PORT))
- self._conn = sock
-
- SocketAppender = TLSSocketAppender
-
-
-class CallbackModule(CallbackBase):
- CALLBACK_VERSION = 2.0
- CALLBACK_TYPE = 'notification'
- CALLBACK_NAME = 'community.general.logentries'
- CALLBACK_NEEDS_WHITELIST = True
-
- def __init__(self):
-
- # TODO: allow for alternate posting methods (REST/UDP/agent/etc)
- super(CallbackModule, self).__init__()
-
- # verify dependencies
- if not HAS_SSL:
- self._display.warning("Unable to import ssl module. Will send over port 80.")
-
- if not HAS_CERTIFI:
- self.disabled = True
- self._display.warning('The `certifi` python module is not installed.\nDisabling the Logentries callback plugin.')
-
- self.le_jobid = str(uuid.uuid4())
-
- # FIXME: make configurable, move to options
- self.timeout = 10
-
- def set_options(self, task_keys=None, var_options=None, direct=None):
-
- super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
-
- # get options
- try:
- self.api_url = self.get_option('api')
- self.api_port = self.get_option('port')
- self.api_tls_port = self.get_option('tls_port')
- self.use_tls = self.get_option('use_tls')
- self.flatten = self.get_option('flatten')
- except KeyError as e:
- self._display.warning(u"Missing option for Logentries callback plugin: %s" % to_text(e))
- self.disabled = True
-
- try:
- self.token = self.get_option('token')
- except KeyError as e:
- self._display.warning('Logentries token was not provided, this is required for this callback to operate, disabling')
- self.disabled = True
-
- if self.flatten and not HAS_FLATDICT:
- self.disabled = True
- self._display.warning('You have chosen to flatten and the `flatdict` python module is not installed.\nDisabling the Logentries callback plugin.')
-
- self._initialize_connections()
-
- def _initialize_connections(self):
-
- if not self.disabled:
- if self.use_tls:
- self._display.vvvv("Connecting to %s:%s with TLS" % (self.api_url, self.api_tls_port))
- self._appender = TLSSocketAppender(display=self._display, LE_API=self.api_url, LE_TLS_PORT=self.api_tls_port)
- else:
- self._display.vvvv("Connecting to %s:%s" % (self.api_url, self.api_port))
- self._appender = PlainTextSocketAppender(display=self._display, LE_API=self.api_url, LE_PORT=self.api_port)
- self._appender.reopen_connection()
-
- def emit_formatted(self, record):
- if self.flatten:
- results = flatdict.FlatDict(record)
- self.emit(self._dump_results(results))
- else:
- self.emit(self._dump_results(record))
-
- def emit(self, record):
- msg = record.rstrip('\n')
- msg = "{0} {1}".format(self.token, msg)
- self._appender.put(msg)
- self._display.vvvv("Sent event to logentries")
-
- def _set_info(self, host, res):
- return {'le_jobid': self.le_jobid, 'hostname': host, 'results': res}
-
- def runner_on_ok(self, host, res):
- results = self._set_info(host, res)
- results['status'] = 'OK'
- self.emit_formatted(results)
-
- def runner_on_failed(self, host, res, ignore_errors=False):
- results = self._set_info(host, res)
- results['status'] = 'FAILED'
- self.emit_formatted(results)
-
- def runner_on_skipped(self, host, item=None):
- results = self._set_info(host, item)
- del results['results']
- results['status'] = 'SKIPPED'
- self.emit_formatted(results)
-
- def runner_on_unreachable(self, host, res):
- results = self._set_info(host, res)
- results['status'] = 'UNREACHABLE'
- self.emit_formatted(results)
-
- def runner_on_async_failed(self, host, res, jid):
- results = self._set_info(host, res)
- results['jid'] = jid
- results['status'] = 'ASYNC_FAILED'
- self.emit_formatted(results)
-
- def v2_playbook_on_play_start(self, play):
- results = {}
- results['le_jobid'] = self.le_jobid
- results['started_by'] = os.getlogin()
- if play.name:
- results['play'] = play.name
- results['hosts'] = play.hosts
- self.emit_formatted(results)
-
- def playbook_on_stats(self, stats):
- """ close connection """
- self._appender.close_connection()
diff --git a/ansible_collections/community/general/plugins/callback/mail.py b/ansible_collections/community/general/plugins/callback/mail.py
deleted file mode 100644
index 3805bae5..00000000
--- a/ansible_collections/community/general/plugins/callback/mail.py
+++ /dev/null
@@ -1,253 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2012, Dag Wieers
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-DOCUMENTATION = '''
-name: mail
-type: notification
-short_description: Sends failure events via email
-description:
-- This callback will report failures via email.
-author:
-- Dag Wieers (@dagwieers)
-requirements:
-- whitelisting in configuration
-options:
- mta:
- description:
- - Mail Transfer Agent, server that accepts SMTP.
- type: str
- env:
- - name: SMTPHOST
- ini:
- - section: callback_mail
- key: smtphost
- default: localhost
- mtaport:
- description:
- - Mail Transfer Agent Port.
- - Port at which server SMTP.
- type: int
- ini:
- - section: callback_mail
- key: smtpport
- default: 25
- to:
- description:
- - Mail recipient.
- type: list
- elements: str
- ini:
- - section: callback_mail
- key: to
- default: [root]
- sender:
- description:
- - Mail sender.
- - Note that this will be required from community.general 6.0.0 on.
- type: str
- ini:
- - section: callback_mail
- key: sender
- cc:
- description:
- - CC'd recipients.
- type: list
- elements: str
- ini:
- - section: callback_mail
- key: cc
- bcc:
- description:
- - BCC'd recipients.
- type: list
- elements: str
- ini:
- - section: callback_mail
- key: bcc
-'''
-
-import json
-import os
-import re
-import email.utils
-import smtplib
-
-from ansible.module_utils.six import string_types
-from ansible.module_utils.common.text.converters import to_bytes
-from ansible.parsing.ajson import AnsibleJSONEncoder
-from ansible.plugins.callback import CallbackBase
-
-
-class CallbackModule(CallbackBase):
- ''' This Ansible callback plugin mails errors to interested parties. '''
- CALLBACK_VERSION = 2.0
- CALLBACK_TYPE = 'notification'
- CALLBACK_NAME = 'community.general.mail'
- CALLBACK_NEEDS_WHITELIST = True
-
- def __init__(self, display=None):
- super(CallbackModule, self).__init__(display=display)
- self.sender = None
- self.to = 'root'
- self.smtphost = os.getenv('SMTPHOST', 'localhost')
- self.smtpport = 25
- self.cc = None
- self.bcc = None
-
- def set_options(self, task_keys=None, var_options=None, direct=None):
-
- super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
-
- self.sender = self.get_option('sender')
- if self.sender is None:
- self._display.deprecated(
- 'The sender for the mail callback has not been specified. This will be an error in the future',
- version='6.0.0', collection_name='community.general')
- self.to = self.get_option('to')
- self.smtphost = self.get_option('mta')
- self.smtpport = self.get_option('mtaport')
- self.cc = self.get_option('cc')
- self.bcc = self.get_option('bcc')
-
- def mail(self, subject='Ansible error mail', body=None):
- if body is None:
- body = subject
-
- smtp = smtplib.SMTP(self.smtphost, port=self.smtpport)
-
- sender_address = email.utils.parseaddr(self.sender)
- if self.to:
- to_addresses = email.utils.getaddresses(self.to)
- if self.cc:
- cc_addresses = email.utils.getaddresses(self.cc)
- if self.bcc:
- bcc_addresses = email.utils.getaddresses(self.bcc)
-
- content = 'Date: %s\n' % email.utils.formatdate()
- content += 'From: %s\n' % email.utils.formataddr(sender_address)
- if self.to:
- content += 'To: %s\n' % ', '.join([email.utils.formataddr(pair) for pair in to_addresses])
- if self.cc:
- content += 'Cc: %s\n' % ', '.join([email.utils.formataddr(pair) for pair in cc_addresses])
- content += 'Message-ID: %s\n' % email.utils.make_msgid()
- content += 'Subject: %s\n\n' % subject.strip()
- content += body
-
- addresses = to_addresses
- if self.cc:
- addresses += cc_addresses
- if self.bcc:
- addresses += bcc_addresses
-
- if not addresses:
- self._display.warning('No receiver has been specified for the mail callback plugin.')
-
- smtp.sendmail(self.sender, [address for name, address in addresses], to_bytes(content))
-
- smtp.quit()
-
- def subject_msg(self, multiline, failtype, linenr):
- return '%s: %s' % (failtype, multiline.strip('\r\n').splitlines()[linenr])
-
- def indent(self, multiline, indent=8):
- return re.sub('^', ' ' * indent, multiline, flags=re.MULTILINE)
-
- def body_blob(self, multiline, texttype):
- ''' Turn some text output in a well-indented block for sending in a mail body '''
- intro = 'with the following %s:\n\n' % texttype
- blob = ''
- for line in multiline.strip('\r\n').splitlines():
- blob += '%s\n' % line
- return intro + self.indent(blob) + '\n'
-
- def mail_result(self, result, failtype):
- host = result._host.get_name()
- if not self.sender:
- self.sender = '"Ansible: %s" ' % host
-
- # Add subject
- if self.itembody:
- subject = self.itemsubject
- elif result._result.get('failed_when_result') is True:
- subject = "Failed due to 'failed_when' condition"
- elif result._result.get('msg'):
- subject = self.subject_msg(result._result['msg'], failtype, 0)
- elif result._result.get('stderr'):
- subject = self.subject_msg(result._result['stderr'], failtype, -1)
- elif result._result.get('stdout'):
- subject = self.subject_msg(result._result['stdout'], failtype, -1)
- elif result._result.get('exception'): # Unrelated exceptions are added to output :-/
- subject = self.subject_msg(result._result['exception'], failtype, -1)
- else:
- subject = '%s: %s' % (failtype, result._task.name or result._task.action)
-
- # Make playbook name visible (e.g. in Outlook/Gmail condensed view)
- body = 'Playbook: %s\n' % os.path.basename(self.playbook._file_name)
- if result._task.name:
- body += 'Task: %s\n' % result._task.name
- body += 'Module: %s\n' % result._task.action
- body += 'Host: %s\n' % host
- body += '\n'
-
- # Add task information (as much as possible)
- body += 'The following task failed:\n\n'
- if 'invocation' in result._result:
- body += self.indent('%s: %s\n' % (result._task.action, json.dumps(result._result['invocation']['module_args'], indent=4)))
- elif result._task.name:
- body += self.indent('%s (%s)\n' % (result._task.name, result._task.action))
- else:
- body += self.indent('%s\n' % result._task.action)
- body += '\n'
-
- # Add item / message
- if self.itembody:
- body += self.itembody
- elif result._result.get('failed_when_result') is True:
- body += "due to the following condition:\n\n" + self.indent('failed_when:\n- ' + '\n- '.join(result._task.failed_when)) + '\n\n'
- elif result._result.get('msg'):
- body += self.body_blob(result._result['msg'], 'message')
-
- # Add stdout / stderr / exception / warnings / deprecations
- if result._result.get('stdout'):
- body += self.body_blob(result._result['stdout'], 'standard output')
- if result._result.get('stderr'):
- body += self.body_blob(result._result['stderr'], 'error output')
- if result._result.get('exception'): # Unrelated exceptions are added to output :-/
- body += self.body_blob(result._result['exception'], 'exception')
- if result._result.get('warnings'):
- for i in range(len(result._result.get('warnings'))):
- body += self.body_blob(result._result['warnings'][i], 'exception %d' % (i + 1))
- if result._result.get('deprecations'):
- for i in range(len(result._result.get('deprecations'))):
- body += self.body_blob(result._result['deprecations'][i], 'exception %d' % (i + 1))
-
- body += 'and a complete dump of the error:\n\n'
- body += self.indent('%s: %s' % (failtype, json.dumps(result._result, cls=AnsibleJSONEncoder, indent=4)))
-
- self.mail(subject=subject, body=body)
-
- def v2_playbook_on_start(self, playbook):
- self.playbook = playbook
- self.itembody = ''
-
- def v2_runner_on_failed(self, result, ignore_errors=False):
- if ignore_errors:
- return
-
- self.mail_result(result, 'Failed')
-
- def v2_runner_on_unreachable(self, result):
- self.mail_result(result, 'Unreachable')
-
- def v2_runner_on_async_failed(self, result):
- self.mail_result(result, 'Async failure')
-
- def v2_runner_item_on_failed(self, result):
- # Pass item information to task failure
- self.itemsubject = result._result['msg']
- self.itembody += self.body_blob(json.dumps(result._result, cls=AnsibleJSONEncoder, indent=4), "failed item dump '%(item)s'" % result._result)
diff --git a/ansible_collections/community/general/plugins/callback/null.py b/ansible_collections/community/general/plugins/callback/null.py
deleted file mode 100644
index 13ea65b4..00000000
--- a/ansible_collections/community/general/plugins/callback/null.py
+++ /dev/null
@@ -1,31 +0,0 @@
-# -*- coding: utf-8 -*-
-# (c) 2017 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-DOCUMENTATION = '''
- author: Unknown (!UNKNOWN)
- name: 'null'
- type: stdout
- requirements:
- - set as main display callback
- short_description: Don't display stuff to screen
- description:
- - This callback prevents outputing events to screen
-'''
-
-from ansible.plugins.callback import CallbackBase
-
-
-class CallbackModule(CallbackBase):
-
- '''
- This callback wont print messages to stdout when new callback events are received.
- '''
-
- CALLBACK_VERSION = 2.0
- CALLBACK_TYPE = 'stdout'
- CALLBACK_NAME = 'community.general.null'
diff --git a/ansible_collections/community/general/plugins/callback/osx_say.py b/ansible_collections/community/general/plugins/callback/osx_say.py
deleted file mode 120000
index f080521d..00000000
--- a/ansible_collections/community/general/plugins/callback/osx_say.py
+++ /dev/null
@@ -1 +0,0 @@
-say.py
\ No newline at end of file
diff --git a/ansible_collections/community/general/plugins/callback/say.py b/ansible_collections/community/general/plugins/callback/say.py
deleted file mode 100644
index 8d67e433..00000000
--- a/ansible_collections/community/general/plugins/callback/say.py
+++ /dev/null
@@ -1,118 +0,0 @@
-# -*- coding: utf-8 -*-
-# (c) 2012, Michael DeHaan,
-# (c) 2017 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-DOCUMENTATION = '''
- author: Unknown (!UNKNOWN)
- name: say
- type: notification
- requirements:
- - whitelisting in configuration
- - the '/usr/bin/say' command line program (standard on macOS) or 'espeak' command line program
- short_description: notify using software speech synthesizer
- description:
- - This plugin will use the 'say' or 'espeak' program to "speak" about play events.
- notes:
- - In 2.8, this callback has been renamed from C(osx_say) into M(community.general.say).
-'''
-
-import platform
-import subprocess
-import os
-
-from ansible.module_utils.common.process import get_bin_path
-from ansible.plugins.callback import CallbackBase
-
-
-class CallbackModule(CallbackBase):
- """
- makes Ansible much more exciting.
- """
- CALLBACK_VERSION = 2.0
- CALLBACK_TYPE = 'notification'
- CALLBACK_NAME = 'community.general.say'
- CALLBACK_NEEDS_WHITELIST = True
-
- def __init__(self):
-
- super(CallbackModule, self).__init__()
-
- self.FAILED_VOICE = None
- self.REGULAR_VOICE = None
- self.HAPPY_VOICE = None
- self.LASER_VOICE = None
-
- try:
- self.synthesizer = get_bin_path('say')
- if platform.system() != 'Darwin':
- # 'say' binary available, it might be GNUstep tool which doesn't support 'voice' parameter
- self._display.warning("'say' executable found but system is '%s': ignoring voice parameter" % platform.system())
- else:
- self.FAILED_VOICE = 'Zarvox'
- self.REGULAR_VOICE = 'Trinoids'
- self.HAPPY_VOICE = 'Cellos'
- self.LASER_VOICE = 'Princess'
- except ValueError:
- try:
- self.synthesizer = get_bin_path('espeak')
- self.FAILED_VOICE = 'klatt'
- self.HAPPY_VOICE = 'f5'
- self.LASER_VOICE = 'whisper'
- except ValueError:
- self.synthesizer = None
-
- # plugin disable itself if say is not present
- # ansible will not call any callback if disabled is set to True
- if not self.synthesizer:
- self.disabled = True
- self._display.warning("Unable to find either 'say' or 'espeak' executable, plugin %s disabled" % os.path.basename(__file__))
-
- def say(self, msg, voice):
- cmd = [self.synthesizer, msg]
- if voice:
- cmd.extend(('-v', voice))
- subprocess.call(cmd)
-
- def runner_on_failed(self, host, res, ignore_errors=False):
- self.say("Failure on host %s" % host, self.FAILED_VOICE)
-
- def runner_on_ok(self, host, res):
- self.say("pew", self.LASER_VOICE)
-
- def runner_on_skipped(self, host, item=None):
- self.say("pew", self.LASER_VOICE)
-
- def runner_on_unreachable(self, host, res):
- self.say("Failure on host %s" % host, self.FAILED_VOICE)
-
- def runner_on_async_ok(self, host, res, jid):
- self.say("pew", self.LASER_VOICE)
-
- def runner_on_async_failed(self, host, res, jid):
- self.say("Failure on host %s" % host, self.FAILED_VOICE)
-
- def playbook_on_start(self):
- self.say("Running Playbook", self.REGULAR_VOICE)
-
- def playbook_on_notify(self, host, handler):
- self.say("pew", self.LASER_VOICE)
-
- def playbook_on_task_start(self, name, is_conditional):
- if not is_conditional:
- self.say("Starting task: %s" % name, self.REGULAR_VOICE)
- else:
- self.say("Notifying task: %s" % name, self.REGULAR_VOICE)
-
- def playbook_on_setup(self):
- self.say("Gathering facts", self.REGULAR_VOICE)
-
- def playbook_on_play_start(self, name):
- self.say("Starting play: %s" % name, self.HAPPY_VOICE)
-
- def playbook_on_stats(self, stats):
- self.say("Play complete", self.HAPPY_VOICE)
diff --git a/ansible_collections/community/general/plugins/callback/slack.py b/ansible_collections/community/general/plugins/callback/slack.py
deleted file mode 100644
index 5cb402b1..00000000
--- a/ansible_collections/community/general/plugins/callback/slack.py
+++ /dev/null
@@ -1,252 +0,0 @@
-# -*- coding: utf-8 -*-
-# (C) 2014-2015, Matt Martz
-# (C) 2017 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-DOCUMENTATION = '''
- author: Unknown (!UNKNOWN)
- name: slack
- type: notification
- requirements:
- - whitelist in configuration
- - prettytable (python library)
- short_description: Sends play events to a Slack channel
- description:
- - This is an ansible callback plugin that sends status updates to a Slack channel during playbook execution.
- - Before 2.4 only environment variables were available for configuring this plugin
- options:
- webhook_url:
- required: True
- description: Slack Webhook URL
- env:
- - name: SLACK_WEBHOOK_URL
- ini:
- - section: callback_slack
- key: webhook_url
- channel:
- default: "#ansible"
- description: Slack room to post in.
- env:
- - name: SLACK_CHANNEL
- ini:
- - section: callback_slack
- key: channel
- username:
- description: Username to post as.
- env:
- - name: SLACK_USERNAME
- default: ansible
- ini:
- - section: callback_slack
- key: username
- validate_certs:
- description: validate the SSL certificate of the Slack server. (For HTTPS URLs)
- env:
- - name: SLACK_VALIDATE_CERTS
- ini:
- - section: callback_slack
- key: validate_certs
- default: True
- type: bool
-'''
-
-import json
-import os
-import uuid
-
-from ansible import context
-from ansible.module_utils.common.text.converters import to_text
-from ansible.module_utils.urls import open_url
-from ansible.plugins.callback import CallbackBase
-
-try:
- import prettytable
- HAS_PRETTYTABLE = True
-except ImportError:
- HAS_PRETTYTABLE = False
-
-
-class CallbackModule(CallbackBase):
- """This is an ansible callback plugin that sends status
- updates to a Slack channel during playbook execution.
- """
- CALLBACK_VERSION = 2.0
- CALLBACK_TYPE = 'notification'
- CALLBACK_NAME = 'community.general.slack'
- CALLBACK_NEEDS_WHITELIST = True
-
- def __init__(self, display=None):
-
- super(CallbackModule, self).__init__(display=display)
-
- if not HAS_PRETTYTABLE:
- self.disabled = True
- self._display.warning('The `prettytable` python module is not '
- 'installed. Disabling the Slack callback '
- 'plugin.')
-
- self.playbook_name = None
-
- # This is a 6 character identifier provided with each message
- # This makes it easier to correlate messages when there are more
- # than 1 simultaneous playbooks running
- self.guid = uuid.uuid4().hex[:6]
-
- def set_options(self, task_keys=None, var_options=None, direct=None):
-
- super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
-
- self.webhook_url = self.get_option('webhook_url')
- self.channel = self.get_option('channel')
- self.username = self.get_option('username')
- self.show_invocation = (self._display.verbosity > 1)
- self.validate_certs = self.get_option('validate_certs')
-
- if self.webhook_url is None:
- self.disabled = True
- self._display.warning('Slack Webhook URL was not provided. The '
- 'Slack Webhook URL can be provided using '
- 'the `SLACK_WEBHOOK_URL` environment '
- 'variable.')
-
- def send_msg(self, attachments):
- headers = {
- 'Content-type': 'application/json',
- }
-
- payload = {
- 'channel': self.channel,
- 'username': self.username,
- 'attachments': attachments,
- 'parse': 'none',
- 'icon_url': ('https://cdn2.hubspot.net/hub/330046/'
- 'file-449187601-png/ansible_badge.png'),
- }
-
- data = json.dumps(payload)
- self._display.debug(data)
- self._display.debug(self.webhook_url)
- try:
- response = open_url(self.webhook_url, data=data, validate_certs=self.validate_certs,
- headers=headers)
- return response.read()
- except Exception as e:
- self._display.warning(u'Could not submit message to Slack: %s' %
- to_text(e))
-
- def v2_playbook_on_start(self, playbook):
- self.playbook_name = os.path.basename(playbook._file_name)
-
- title = [
- '*Playbook initiated* (_%s_)' % self.guid
- ]
-
- invocation_items = []
- if context.CLIARGS and self.show_invocation:
- tags = context.CLIARGS['tags']
- skip_tags = context.CLIARGS['skip_tags']
- extra_vars = context.CLIARGS['extra_vars']
- subset = context.CLIARGS['subset']
- inventory = [os.path.abspath(i) for i in context.CLIARGS['inventory']]
-
- invocation_items.append('Inventory: %s' % ', '.join(inventory))
- if tags and tags != ['all']:
- invocation_items.append('Tags: %s' % ', '.join(tags))
- if skip_tags:
- invocation_items.append('Skip Tags: %s' % ', '.join(skip_tags))
- if subset:
- invocation_items.append('Limit: %s' % subset)
- if extra_vars:
- invocation_items.append('Extra Vars: %s' %
- ' '.join(extra_vars))
-
- title.append('by *%s*' % context.CLIARGS['remote_user'])
-
- title.append('\n\n*%s*' % self.playbook_name)
- msg_items = [' '.join(title)]
- if invocation_items:
- msg_items.append('```\n%s\n```' % '\n'.join(invocation_items))
-
- msg = '\n'.join(msg_items)
-
- attachments = [{
- 'fallback': msg,
- 'fields': [
- {
- 'value': msg
- }
- ],
- 'color': 'warning',
- 'mrkdwn_in': ['text', 'fallback', 'fields'],
- }]
-
- self.send_msg(attachments=attachments)
-
- def v2_playbook_on_play_start(self, play):
- """Display Play start messages"""
-
- name = play.name or 'Play name not specified (%s)' % play._uuid
- msg = '*Starting play* (_%s_)\n\n*%s*' % (self.guid, name)
- attachments = [
- {
- 'fallback': msg,
- 'text': msg,
- 'color': 'warning',
- 'mrkdwn_in': ['text', 'fallback', 'fields'],
- }
- ]
- self.send_msg(attachments=attachments)
-
- def v2_playbook_on_stats(self, stats):
- """Display info about playbook statistics"""
-
- hosts = sorted(stats.processed.keys())
-
- t = prettytable.PrettyTable(['Host', 'Ok', 'Changed', 'Unreachable',
- 'Failures', 'Rescued', 'Ignored'])
-
- failures = False
- unreachable = False
-
- for h in hosts:
- s = stats.summarize(h)
-
- if s['failures'] > 0:
- failures = True
- if s['unreachable'] > 0:
- unreachable = True
-
- t.add_row([h] + [s[k] for k in ['ok', 'changed', 'unreachable',
- 'failures', 'rescued', 'ignored']])
-
- attachments = []
- msg_items = [
- '*Playbook Complete* (_%s_)' % self.guid
- ]
- if failures or unreachable:
- color = 'danger'
- msg_items.append('\n*Failed!*')
- else:
- color = 'good'
- msg_items.append('\n*Success!*')
-
- msg_items.append('```\n%s\n```' % t)
-
- msg = '\n'.join(msg_items)
-
- attachments.append({
- 'fallback': msg,
- 'fields': [
- {
- 'value': msg
- }
- ],
- 'color': color,
- 'mrkdwn_in': ['text', 'fallback', 'fields']
- })
-
- self.send_msg(attachments=attachments)
diff --git a/ansible_collections/community/general/plugins/callback/yaml.py b/ansible_collections/community/general/plugins/callback/yaml.py
deleted file mode 100644
index 59fb3509..00000000
--- a/ansible_collections/community/general/plugins/callback/yaml.py
+++ /dev/null
@@ -1,131 +0,0 @@
-# -*- coding: utf-8 -*-
-# (c) 2017 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-DOCUMENTATION = '''
- author: Unknown (!UNKNOWN)
- name: yaml
- type: stdout
- short_description: yaml-ized Ansible screen output
- description:
- - Ansible output that can be quite a bit easier to read than the
- default JSON formatting.
- extends_documentation_fragment:
- - default_callback
- requirements:
- - set as stdout in configuration
-'''
-
-import yaml
-import json
-import re
-import string
-import sys
-
-from ansible.module_utils.common.text.converters import to_bytes, to_text
-from ansible.module_utils.six import string_types
-from ansible.parsing.yaml.dumper import AnsibleDumper
-from ansible.plugins.callback import CallbackBase, strip_internal_keys, module_response_deepcopy
-from ansible.plugins.callback.default import CallbackModule as Default
-
-
-# from http://stackoverflow.com/a/15423007/115478
-def should_use_block(value):
- """Returns true if string should be in block format"""
- for c in u"\u000a\u000d\u001c\u001d\u001e\u0085\u2028\u2029":
- if c in value:
- return True
- return False
-
-
-class MyDumper(AnsibleDumper):
- def represent_scalar(self, tag, value, style=None):
- """Uses block style for multi-line strings"""
- if style is None:
- if should_use_block(value):
- style = '|'
- # we care more about readable than accuracy, so...
- # ...no trailing space
- value = value.rstrip()
- # ...and non-printable characters
- value = ''.join(x for x in value if x in string.printable or ord(x) >= 0xA0)
- # ...tabs prevent blocks from expanding
- value = value.expandtabs()
- # ...and odd bits of whitespace
- value = re.sub(r'[\x0b\x0c\r]', '', value)
- # ...as does trailing space
- value = re.sub(r' +\n', '\n', value)
- else:
- style = self.default_style
- node = yaml.representer.ScalarNode(tag, value, style=style)
- if self.alias_key is not None:
- self.represented_objects[self.alias_key] = node
- return node
-
-
-class CallbackModule(Default):
-
- """
- Variation of the Default output which uses nicely readable YAML instead
- of JSON for printing results.
- """
-
- CALLBACK_VERSION = 2.0
- CALLBACK_TYPE = 'stdout'
- CALLBACK_NAME = 'community.general.yaml'
-
- def __init__(self):
- super(CallbackModule, self).__init__()
-
- def _dump_results(self, result, indent=None, sort_keys=True, keep_invocation=False):
- if result.get('_ansible_no_log', False):
- return json.dumps(dict(censored="The output has been hidden due to the fact that 'no_log: true' was specified for this result"))
-
- # All result keys stating with _ansible_ are internal, so remove them from the result before we output anything.
- abridged_result = strip_internal_keys(module_response_deepcopy(result))
-
- # remove invocation unless specifically wanting it
- if not keep_invocation and self._display.verbosity < 3 and 'invocation' in result:
- del abridged_result['invocation']
-
- # remove diff information from screen output
- if self._display.verbosity < 3 and 'diff' in result:
- del abridged_result['diff']
-
- # remove exception from screen output
- if 'exception' in abridged_result:
- del abridged_result['exception']
-
- dumped = ''
-
- # put changed and skipped into a header line
- if 'changed' in abridged_result:
- dumped += 'changed=' + str(abridged_result['changed']).lower() + ' '
- del abridged_result['changed']
-
- if 'skipped' in abridged_result:
- dumped += 'skipped=' + str(abridged_result['skipped']).lower() + ' '
- del abridged_result['skipped']
-
- # if we already have stdout, we don't need stdout_lines
- if 'stdout' in abridged_result and 'stdout_lines' in abridged_result:
- abridged_result['stdout_lines'] = ''
-
- # if we already have stderr, we don't need stderr_lines
- if 'stderr' in abridged_result and 'stderr_lines' in abridged_result:
- abridged_result['stderr_lines'] = ''
-
- if abridged_result:
- dumped += '\n'
- dumped += to_text(yaml.dump(abridged_result, allow_unicode=True, width=1000, Dumper=MyDumper, default_flow_style=False))
-
- # indent by a couple of spaces
- dumped = '\n '.join(dumped.split('\n')).rstrip()
- return dumped
-
- def _serialize_diff(self, diff):
- return to_text(yaml.dump(diff, allow_unicode=True, width=1000, Dumper=AnsibleDumper, default_flow_style=False))
diff --git a/ansible_collections/community/general/plugins/connection/lxd.py b/ansible_collections/community/general/plugins/connection/lxd.py
deleted file mode 100644
index f3b06e6e..00000000
--- a/ansible_collections/community/general/plugins/connection/lxd.py
+++ /dev/null
@@ -1,161 +0,0 @@
-# -*- coding: utf-8 -*-
-# (c) 2016 Matt Clay
-# (c) 2017 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-DOCUMENTATION = '''
- author: Matt Clay (@mattclay)
- name: lxd
- short_description: Run tasks in lxc containers via lxc CLI
- description:
- - Run commands or put/fetch files to an existing lxc container using lxc CLI
- options:
- remote_addr:
- description:
- - Container identifier.
- default: inventory_hostname
- vars:
- - name: ansible_host
- - name: ansible_lxd_host
- executable:
- description:
- - shell to use for execution inside container
- default: /bin/sh
- vars:
- - name: ansible_executable
- - name: ansible_lxd_executable
- remote:
- description:
- - Name of the LXD remote to use.
- default: local
- vars:
- - name: ansible_lxd_remote
- version_added: 2.0.0
- project:
- description:
- - Name of the LXD project to use.
- vars:
- - name: ansible_lxd_project
- version_added: 2.0.0
-'''
-
-import os
-from subprocess import Popen, PIPE
-
-from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound
-from ansible.module_utils.common.process import get_bin_path
-from ansible.module_utils.common.text.converters import to_bytes, to_text
-from ansible.plugins.connection import ConnectionBase
-
-
-class Connection(ConnectionBase):
- """ lxd based connections """
-
- transport = 'community.general.lxd'
- has_pipelining = True
- default_user = 'root'
-
- def __init__(self, play_context, new_stdin, *args, **kwargs):
- super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
-
- self._host = self._play_context.remote_addr
- try:
- self._lxc_cmd = get_bin_path("lxc")
- except ValueError:
- raise AnsibleError("lxc command not found in PATH")
-
- if self._play_context.remote_user is not None and self._play_context.remote_user != 'root':
- self._display.warning('lxd does not support remote_user, using container default: root')
-
- def _connect(self):
- """connect to lxd (nothing to do here) """
- super(Connection, self)._connect()
-
- if not self._connected:
- self._display.vvv(u"ESTABLISH LXD CONNECTION FOR USER: root", host=self._host)
- self._connected = True
-
- def exec_command(self, cmd, in_data=None, sudoable=True):
- """ execute a command on the lxd host """
- super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
-
- self._display.vvv(u"EXEC {0}".format(cmd), host=self._host)
-
- local_cmd = [self._lxc_cmd]
- if self.get_option("project"):
- local_cmd.extend(["--project", self.get_option("project")])
- local_cmd.extend([
- "exec",
- "%s:%s" % (self.get_option("remote"), self.get_option("remote_addr")),
- "--",
- self.get_option("executable"), "-c", cmd
- ])
-
- local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
- in_data = to_bytes(in_data, errors='surrogate_or_strict', nonstring='passthru')
-
- process = Popen(local_cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
- stdout, stderr = process.communicate(in_data)
-
- stdout = to_text(stdout)
- stderr = to_text(stderr)
-
- if stderr == "error: Container is not running.\n":
- raise AnsibleConnectionFailure("container not running: %s" % self._host)
-
- if stderr == "error: not found\n":
- raise AnsibleConnectionFailure("container not found: %s" % self._host)
-
- return process.returncode, stdout, stderr
-
- def put_file(self, in_path, out_path):
- """ put a file from local to lxd """
- super(Connection, self).put_file(in_path, out_path)
-
- self._display.vvv(u"PUT {0} TO {1}".format(in_path, out_path), host=self._host)
-
- if not os.path.isfile(to_bytes(in_path, errors='surrogate_or_strict')):
- raise AnsibleFileNotFound("input path is not a file: %s" % in_path)
-
- local_cmd = [self._lxc_cmd]
- if self.get_option("project"):
- local_cmd.extend(["--project", self.get_option("project")])
- local_cmd.extend([
- "file", "push",
- in_path,
- "%s:%s/%s" % (self.get_option("remote"), self.get_option("remote_addr"), out_path)
- ])
-
- local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
-
- process = Popen(local_cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
- process.communicate()
-
- def fetch_file(self, in_path, out_path):
- """ fetch a file from lxd to local """
- super(Connection, self).fetch_file(in_path, out_path)
-
- self._display.vvv(u"FETCH {0} TO {1}".format(in_path, out_path), host=self._host)
-
- local_cmd = [self._lxc_cmd]
- if self.get_option("project"):
- local_cmd.extend(["--project", self.get_option("project")])
- local_cmd.extend([
- "file", "pull",
- "%s:%s/%s" % (self.get_option("remote"), self.get_option("remote_addr"), in_path),
- out_path
- ])
-
- local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
-
- process = Popen(local_cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
- process.communicate()
-
- def close(self):
- """ close the connection (nothing to do here) """
- super(Connection, self).close()
-
- self._connected = False
diff --git a/ansible_collections/community/general/plugins/doc_fragments/alicloud.py b/ansible_collections/community/general/plugins/doc_fragments/alicloud.py
deleted file mode 100644
index f9c9640b..00000000
--- a/ansible_collections/community/general/plugins/doc_fragments/alicloud.py
+++ /dev/null
@@ -1,108 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright (c) 2017-present Alibaba Group Holding Limited. He Guimin
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-class ModuleDocFragment(object):
-
- # Alicloud only documentation fragment
- DOCUMENTATION = r'''
-options:
- alicloud_access_key:
- description:
- - Alibaba Cloud access key. If not set then the value of environment variable C(ALICLOUD_ACCESS_KEY),
- C(ALICLOUD_ACCESS_KEY_ID) will be used instead.
- aliases: ['access_key_id', 'access_key']
- type: str
- alicloud_secret_key:
- description:
- - Alibaba Cloud secret key. If not set then the value of environment variable C(ALICLOUD_SECRET_KEY),
- C(ALICLOUD_SECRET_ACCESS_KEY) will be used instead.
- aliases: ['secret_access_key', 'secret_key']
- type: str
- alicloud_region:
- description:
- - The Alibaba Cloud region to use. If not specified then the value of environment variable
- C(ALICLOUD_REGION), C(ALICLOUD_REGION_ID) will be used instead.
- aliases: ['region', 'region_id']
- required: true
- type: str
- alicloud_security_token:
- description:
- - The Alibaba Cloud security token. If not specified then the value of environment variable
- C(ALICLOUD_SECURITY_TOKEN) will be used instead.
- aliases: ['security_token']
- type: str
- alicloud_assume_role:
- description:
- - If provided with a role ARN, Ansible will attempt to assume this role using the supplied credentials.
- - The nested assume_role block supports I(alicloud_assume_role_arn), I(alicloud_assume_role_session_name),
- I(alicloud_assume_role_session_expiration) and I(alicloud_assume_role_policy)
- type: dict
- aliases: ['assume_role']
- alicloud_assume_role_arn:
- description:
- - The Alibaba Cloud role_arn. The ARN of the role to assume. If ARN is set to an empty string,
- it does not perform role switching. It supports environment variable ALICLOUD_ASSUME_ROLE_ARN.
- ansible will execute with provided credentials.
- aliases: ['assume_role_arn']
- type: str
- alicloud_assume_role_session_name:
- description:
- - The Alibaba Cloud session_name. The session name to use when assuming the role. If omitted,
- 'ansible' is passed to the AssumeRole call as session name. It supports environment variable
- ALICLOUD_ASSUME_ROLE_SESSION_NAME
- aliases: ['assume_role_session_name']
- type: str
- alicloud_assume_role_session_expiration:
- description:
- - The Alibaba Cloud session_expiration. The time after which the established session for assuming
- role expires. Valid value range 900-3600 seconds. Default to 3600 (in this case Alicloud use own default
- value). It supports environment variable ALICLOUD_ASSUME_ROLE_SESSION_EXPIRATION
- aliases: ['assume_role_session_expiration']
- type: int
- ecs_role_name:
- description:
- - The RAM Role Name attached on a ECS instance for API operations. You can retrieve this from the 'Access Control'
- section of the Alibaba Cloud console.
- - If you're running Ansible from an ECS instance with RAM Instance using RAM Role, Ansible will just access the
- metadata U(http://100.100.100.200/latest/meta-data/ram/security-credentials/) to obtain the STS
- credential. This is a preferred approach over any other when running in ECS as you can avoid hard coding
- credentials. Instead these are leased on-the-fly by Ansible which reduces the chance of leakage.
- aliases: ['role_name']
- type: str
- profile:
- description:
- - This is the Alicloud profile name as set in the shared credentials file. It can also be sourced from the
- ALICLOUD_PROFILE environment variable.
- type: str
- shared_credentials_file:
- description:
- - This is the path to the shared credentials file. It can also be sourced from the ALICLOUD_SHARED_CREDENTIALS_FILE
- environment variable.
- - If this is not set and a profile is specified, ~/.aliyun/config.json will be used.
- type: str
-author:
- - "He Guimin (@xiaozhu36)"
-requirements:
- - "python >= 3.6"
-notes:
- - If parameters are not set within the module, the following
- environment variables can be used in decreasing order of precedence
- C(ALICLOUD_ACCESS_KEY) or C(ALICLOUD_ACCESS_KEY_ID),
- C(ALICLOUD_SECRET_KEY) or C(ALICLOUD_SECRET_ACCESS_KEY),
- C(ALICLOUD_REGION) or C(ALICLOUD_REGION_ID),
- C(ALICLOUD_SECURITY_TOKEN),
- C(ALICLOUD_ECS_ROLE_NAME),
- C(ALICLOUD_SHARED_CREDENTIALS_FILE),
- C(ALICLOUD_PROFILE),
- C(ALICLOUD_ASSUME_ROLE_ARN),
- C(ALICLOUD_ASSUME_ROLE_SESSION_NAME),
- C(ALICLOUD_ASSUME_ROLE_SESSION_EXPIRATION),
- - C(ALICLOUD_REGION) or C(ALICLOUD_REGION_ID) can be typically be used to specify the
- ALICLOUD region, when required, but this can also be configured in the footmark config file
-'''
diff --git a/ansible_collections/community/general/plugins/doc_fragments/bitbucket.py b/ansible_collections/community/general/plugins/doc_fragments/bitbucket.py
deleted file mode 100644
index 28489356..00000000
--- a/ansible_collections/community/general/plugins/doc_fragments/bitbucket.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2019, Evgeniy Krysanov
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-class ModuleDocFragment(object):
-
- # Standard documentation fragment
- DOCUMENTATION = r'''
-options:
- client_id:
- description:
- - The OAuth consumer key.
- - If not set the environment variable C(BITBUCKET_CLIENT_ID) will be used.
- type: str
- client_secret:
- description:
- - The OAuth consumer secret.
- - If not set the environment variable C(BITBUCKET_CLIENT_SECRET) will be used.
- type: str
- user:
- description:
- - The username.
- - If not set the environment variable C(BITBUCKET_USERNAME) will be used.
- type: str
- version_added: 4.0.0
- password:
- description:
- - The App password.
- - If not set the environment variable C(BITBUCKET_PASSWORD) will be used.
- type: str
- version_added: 4.0.0
-notes:
- - Bitbucket OAuth consumer key and secret can be obtained from Bitbucket profile -> Settings -> Access Management -> OAuth.
- - Bitbucket App password can be created from Bitbucket profile -> Personal Settings -> App passwords.
- - If both OAuth and Basic Auth credentials are passed, OAuth credentials take precedence.
-'''
diff --git a/ansible_collections/community/general/plugins/doc_fragments/dimensiondata.py b/ansible_collections/community/general/plugins/doc_fragments/dimensiondata.py
deleted file mode 100644
index 02435e25..00000000
--- a/ansible_collections/community/general/plugins/doc_fragments/dimensiondata.py
+++ /dev/null
@@ -1,50 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright: (c) 2016, Dimension Data
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-# Authors:
-# - Adam Friedman
-
-
-class ModuleDocFragment(object):
-
- # Dimension Data doc fragment
- DOCUMENTATION = r'''
-
-options:
- region:
- description:
- - The target region.
- - Regions are defined in Apache libcloud project [libcloud/common/dimensiondata.py]
- - They are also listed in U(https://libcloud.readthedocs.io/en/latest/compute/drivers/dimensiondata.html)
- - Note that the default value "na" stands for "North America".
- - The module prepends 'dd-' to the region choice.
- type: str
- default: na
- mcp_user:
- description:
- - The username used to authenticate to the CloudControl API.
- - If not specified, will fall back to C(MCP_USER) from environment variable or C(~/.dimensiondata).
- type: str
- mcp_password:
- description:
- - The password used to authenticate to the CloudControl API.
- - If not specified, will fall back to C(MCP_PASSWORD) from environment variable or C(~/.dimensiondata).
- - Required if I(mcp_user) is specified.
- type: str
- location:
- description:
- - The target datacenter.
- type: str
- required: true
- validate_certs:
- description:
- - If C(false), SSL certificates will not be validated.
- - This should only be used on private instances of the CloudControl API that use self-signed certificates.
- type: bool
- default: yes
-'''
diff --git a/ansible_collections/community/general/plugins/doc_fragments/dimensiondata_wait.py b/ansible_collections/community/general/plugins/doc_fragments/dimensiondata_wait.py
deleted file mode 100644
index ac3deab1..00000000
--- a/ansible_collections/community/general/plugins/doc_fragments/dimensiondata_wait.py
+++ /dev/null
@@ -1,36 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright: (c) 2016, Dimension Data
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-# Authors:
-# - Adam Friedman
-
-
-class ModuleDocFragment(object):
-
- # Dimension Data ("wait-for-completion" parameters) doc fragment
- DOCUMENTATION = r'''
-
-options:
- wait:
- description:
- - Should we wait for the task to complete before moving onto the next.
- type: bool
- default: no
- wait_time:
- description:
- - The maximum amount of time (in seconds) to wait for the task to complete.
- - Only applicable if I(wait=true).
- type: int
- default: 600
- wait_poll_interval:
- description:
- - The amount of time (in seconds) to wait between checks for task completion.
- - Only applicable if I(wait=true).
- type: int
- default: 2
- '''
diff --git a/ansible_collections/community/general/plugins/doc_fragments/gitlab.py b/ansible_collections/community/general/plugins/doc_fragments/gitlab.py
deleted file mode 100644
index 21e4584f..00000000
--- a/ansible_collections/community/general/plugins/doc_fragments/gitlab.py
+++ /dev/null
@@ -1,31 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-class ModuleDocFragment(object):
-
- # Standard files documentation fragment
- DOCUMENTATION = r'''
-requirements:
- - requests (Python library U(https://pypi.org/project/requests/))
-
-options:
- api_token:
- description:
- - GitLab access token with API permissions.
- type: str
- api_oauth_token:
- description:
- - GitLab OAuth token for logging in.
- type: str
- version_added: 4.2.0
- api_job_token:
- description:
- - GitLab CI job token for logging in.
- type: str
- version_added: 4.2.0
-'''
diff --git a/ansible_collections/community/general/plugins/doc_fragments/hpe3par.py b/ansible_collections/community/general/plugins/doc_fragments/hpe3par.py
deleted file mode 100644
index ad445205..00000000
--- a/ansible_collections/community/general/plugins/doc_fragments/hpe3par.py
+++ /dev/null
@@ -1,36 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright: (c) 2018, Hewlett Packard Enterprise Development LP
-# GNU General Public License v3.0+
-# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-class ModuleDocFragment(object):
-
- # HPE 3PAR doc fragment
- DOCUMENTATION = '''
-options:
- storage_system_ip:
- description:
- - The storage system IP address.
- type: str
- required: true
- storage_system_password:
- description:
- - The storage system password.
- type: str
- required: true
- storage_system_username:
- description:
- - The storage system user name.
- type: str
- required: true
-
-requirements:
- - hpe3par_sdk >= 1.0.2. Install using 'pip install hpe3par_sdk'
- - WSAPI service should be enabled on the 3PAR storage array.
-notes:
- - check_mode not supported
- '''
diff --git a/ansible_collections/community/general/plugins/doc_fragments/influxdb.py b/ansible_collections/community/general/plugins/doc_fragments/influxdb.py
deleted file mode 100644
index a31c84cb..00000000
--- a/ansible_collections/community/general/plugins/doc_fragments/influxdb.py
+++ /dev/null
@@ -1,82 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2017, Ansible Project
-# Copyright: (c) 2017, Abhijeet Kasurde (akasurde@redhat.com)
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-class ModuleDocFragment(object):
- # Parameters for influxdb modules
- DOCUMENTATION = r'''
-options:
- hostname:
- description:
- - The hostname or IP address on which InfluxDB server is listening.
- - Since Ansible 2.5, defaulted to localhost.
- type: str
- default: localhost
- username:
- description:
- - Username that will be used to authenticate against InfluxDB server.
- - Alias C(login_username) added in Ansible 2.5.
- type: str
- default: root
- aliases: [ login_username ]
- password:
- description:
- - Password that will be used to authenticate against InfluxDB server.
- - Alias C(login_password) added in Ansible 2.5.
- type: str
- default: root
- aliases: [ login_password ]
- port:
- description:
- - The port on which InfluxDB server is listening
- type: int
- default: 8086
- path:
- description:
- - The path on which InfluxDB server is accessible
- - Only available when using python-influxdb >= 5.1.0
- type: str
- version_added: '0.2.0'
- validate_certs:
- description:
- - If set to C(no), the SSL certificates will not be validated.
- - This should only set to C(no) used on personally controlled sites using self-signed certificates.
- type: bool
- default: yes
- ssl:
- description:
- - Use https instead of http to connect to InfluxDB server.
- type: bool
- default: false
- timeout:
- description:
- - Number of seconds Requests will wait for client to establish a connection.
- type: int
- retries:
- description:
- - Number of retries client will try before aborting.
- - C(0) indicates try until success.
- - Only available when using python-influxdb >= 4.1.0
- type: int
- default: 3
- use_udp:
- description:
- - Use UDP to connect to InfluxDB server.
- type: bool
- default: false
- udp_port:
- description:
- - UDP port to connect to InfluxDB server.
- type: int
- default: 4444
- proxies:
- description:
- - HTTP(S) proxy to use for Requests to connect to InfluxDB server.
- type: dict
-'''
diff --git a/ansible_collections/community/general/plugins/doc_fragments/ipa.py b/ansible_collections/community/general/plugins/doc_fragments/ipa.py
deleted file mode 100644
index 47bcee60..00000000
--- a/ansible_collections/community/general/plugins/doc_fragments/ipa.py
+++ /dev/null
@@ -1,75 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2017-18, Ansible Project
-# Copyright: (c) 2017-18, Abhijeet Kasurde (akasurde@redhat.com)
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-class ModuleDocFragment(object):
- # Parameters for FreeIPA/IPA modules
- DOCUMENTATION = r'''
-options:
- ipa_port:
- description:
- - Port of FreeIPA / IPA server.
- - If the value is not specified in the task, the value of environment variable C(IPA_PORT) will be used instead.
- - If both the environment variable C(IPA_PORT) and the value are not specified in the task, then default value is set.
- - Environment variable fallback mechanism is added in Ansible 2.5.
- type: int
- default: 443
- ipa_host:
- description:
- - IP or hostname of IPA server.
- - If the value is not specified in the task, the value of environment variable C(IPA_HOST) will be used instead.
- - If both the environment variable C(IPA_HOST) and the value are not specified in the task, then DNS will be used to try to discover the FreeIPA server.
- - The relevant entry needed in FreeIPA is the 'ipa-ca' entry.
- - If neither the DNS entry, nor the environment C(IPA_HOST), nor the value are available in the task, then the default value will be used.
- - Environment variable fallback mechanism is added in Ansible 2.5.
- type: str
- default: ipa.example.com
- ipa_user:
- description:
- - Administrative account used on IPA server.
- - If the value is not specified in the task, the value of environment variable C(IPA_USER) will be used instead.
- - If both the environment variable C(IPA_USER) and the value are not specified in the task, then default value is set.
- - Environment variable fallback mechanism is added in Ansible 2.5.
- type: str
- default: admin
- ipa_pass:
- description:
- - Password of administrative user.
- - If the value is not specified in the task, the value of environment variable C(IPA_PASS) will be used instead.
- - Note that if the 'urllib_gssapi' library is available, it is possible to use GSSAPI to authenticate to FreeIPA.
- - If the environment variable C(KRB5CCNAME) is available, the module will use this kerberos credentials cache to authenticate to the FreeIPA server.
- - If the environment variable C(KRB5_CLIENT_KTNAME) is available, and C(KRB5CCNAME) is not; the module will use this kerberos keytab to authenticate.
- - If GSSAPI is not available, the usage of 'ipa_pass' is required.
- - Environment variable fallback mechanism is added in Ansible 2.5.
- type: str
- ipa_prot:
- description:
- - Protocol used by IPA server.
- - If the value is not specified in the task, the value of environment variable C(IPA_PROT) will be used instead.
- - If both the environment variable C(IPA_PROT) and the value are not specified in the task, then default value is set.
- - Environment variable fallback mechanism is added in Ansible 2.5.
- type: str
- choices: [ http, https ]
- default: https
- validate_certs:
- description:
- - This only applies if C(ipa_prot) is I(https).
- - If set to C(no), the SSL certificates will not be validated.
- - This should only set to C(no) used on personally controlled sites using self-signed certificates.
- type: bool
- default: yes
- ipa_timeout:
- description:
- - Specifies idle timeout (in seconds) for the connection.
- - For bulk operations, you may want to increase this in order to avoid timeout from IPA server.
- - If the value is not specified in the task, the value of environment variable C(IPA_TIMEOUT) will be used instead.
- - If both the environment variable C(IPA_TIMEOUT) and the value are not specified in the task, then default value is set.
- type: int
- default: 10
-'''
diff --git a/ansible_collections/community/general/plugins/doc_fragments/keycloak.py b/ansible_collections/community/general/plugins/doc_fragments/keycloak.py
deleted file mode 100644
index fab9a6e8..00000000
--- a/ansible_collections/community/general/plugins/doc_fragments/keycloak.py
+++ /dev/null
@@ -1,71 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2017, Eike Frost
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-class ModuleDocFragment(object):
-
- # Standard documentation fragment
- DOCUMENTATION = r'''
-options:
- auth_keycloak_url:
- description:
- - URL to the Keycloak instance.
- type: str
- required: true
- aliases:
- - url
-
- auth_client_id:
- description:
- - OpenID Connect I(client_id) to authenticate to the API with.
- type: str
- default: admin-cli
-
- auth_realm:
- description:
- - Keycloak realm name to authenticate to for API access.
- type: str
-
- auth_client_secret:
- description:
- - Client Secret to use in conjunction with I(auth_client_id) (if required).
- type: str
-
- auth_username:
- description:
- - Username to authenticate for API access with.
- type: str
- aliases:
- - username
-
- auth_password:
- description:
- - Password to authenticate for API access with.
- type: str
- aliases:
- - password
-
- token:
- description:
- - Authentication token for Keycloak API.
- type: str
- version_added: 3.0.0
-
- validate_certs:
- description:
- - Verify TLS certificates (do not disable this in production).
- type: bool
- default: yes
-
- connection_timeout:
- description:
- - Controls the HTTP connections timeout period (in seconds) to Keycloak API.
- type: int
- default: 10
- version_added: 4.5.0
-'''
diff --git a/ansible_collections/community/general/plugins/doc_fragments/ldap.py b/ansible_collections/community/general/plugins/doc_fragments/ldap.py
deleted file mode 100644
index 1c9931fb..00000000
--- a/ansible_collections/community/general/plugins/doc_fragments/ldap.py
+++ /dev/null
@@ -1,64 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2016, Peter Sagerson
-# Copyright: (c) 2016, Jiri Tyr
-# Copyright: (c) 2017-2018 Keller Fuchs (@KellerFuchs)
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-class ModuleDocFragment(object):
- # Standard LDAP documentation fragment
- DOCUMENTATION = r'''
-options:
- bind_dn:
- description:
- - A DN to bind with. If this is omitted, we'll try a SASL bind with the EXTERNAL mechanism as default.
- - If this is blank, we'll use an anonymous bind.
- type: str
- bind_pw:
- description:
- - The password to use with I(bind_dn).
- type: str
- dn:
- required: true
- description:
- - The DN of the entry to add or remove.
- type: str
- referrals_chasing:
- choices: [disabled, anonymous]
- default: anonymous
- type: str
- description:
- - Set the referrals chasing behavior.
- - C(anonymous) follow referrals anonymously. This is the default behavior.
- - C(disabled) disable referrals chasing. This sets C(OPT_REFERRALS) to off.
- version_added: 2.0.0
- server_uri:
- description:
- - A URI to the LDAP server.
- - The default value lets the underlying LDAP client library look for a UNIX domain socket in its default location.
- type: str
- default: ldapi:///
- start_tls:
- description:
- - If true, we'll use the START_TLS LDAP extension.
- type: bool
- default: no
- validate_certs:
- description:
- - If set to C(no), SSL certificates will not be validated.
- - This should only be used on sites using self-signed certificates.
- type: bool
- default: yes
- sasl_class:
- description:
- - The class to use for SASL authentication.
- - possible choices are C(external), C(gssapi).
- type: str
- choices: ['external', 'gssapi']
- default: external
- version_added: "2.0.0"
-'''
diff --git a/ansible_collections/community/general/plugins/doc_fragments/manageiq.py b/ansible_collections/community/general/plugins/doc_fragments/manageiq.py
deleted file mode 100644
index b610b512..00000000
--- a/ansible_collections/community/general/plugins/doc_fragments/manageiq.py
+++ /dev/null
@@ -1,52 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2017, Daniel Korn
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-class ModuleDocFragment(object):
-
- # Standard ManageIQ documentation fragment
- DOCUMENTATION = r'''
-options:
- manageiq_connection:
- description:
- - ManageIQ connection configuration information.
- required: false
- type: dict
- suboptions:
- url:
- description:
- - ManageIQ environment url. C(MIQ_URL) env var if set. otherwise, it is required to pass it.
- type: str
- required: false
- username:
- description:
- - ManageIQ username. C(MIQ_USERNAME) env var if set. otherwise, required if no token is passed in.
- type: str
- password:
- description:
- - ManageIQ password. C(MIQ_PASSWORD) env var if set. otherwise, required if no token is passed in.
- type: str
- token:
- description:
- - ManageIQ token. C(MIQ_TOKEN) env var if set. otherwise, required if no username or password is passed in.
- type: str
- validate_certs:
- description:
- - Whether SSL certificates should be verified for HTTPS requests. defaults to True.
- type: bool
- default: yes
- aliases: [ verify_ssl ]
- ca_cert:
- description:
- - The path to a CA bundle file or directory with certificates. defaults to None.
- type: str
- aliases: [ ca_bundle_path ]
-
-requirements:
- - 'manageiq-client U(https://github.com/ManageIQ/manageiq-api-client-python/)'
-'''
diff --git a/ansible_collections/community/general/plugins/doc_fragments/oneview.py b/ansible_collections/community/general/plugins/doc_fragments/oneview.py
deleted file mode 100644
index 0d385e99..00000000
--- a/ansible_collections/community/general/plugins/doc_fragments/oneview.py
+++ /dev/null
@@ -1,79 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright: (c) 2016-2017, Hewlett Packard Enterprise Development LP
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-class ModuleDocFragment(object):
-
- # OneView doc fragment
- DOCUMENTATION = r'''
-options:
- config:
- description:
- - Path to a .json configuration file containing the OneView client configuration.
- The configuration file is optional and when used should be present in the host running the ansible commands.
- If the file path is not provided, the configuration will be loaded from environment variables.
- For links to example configuration files or how to use the environment variables verify the notes section.
- type: path
- api_version:
- description:
- - OneView API Version.
- type: int
- image_streamer_hostname:
- description:
- - IP address or hostname for the HPE Image Streamer REST API.
- type: str
- hostname:
- description:
- - IP address or hostname for the appliance.
- type: str
- username:
- description:
- - Username for API authentication.
- type: str
- password:
- description:
- - Password for API authentication.
- type: str
-
-requirements:
- - python >= 2.7.9
-
-notes:
- - "A sample configuration file for the config parameter can be found at:
- U(https://github.com/HewlettPackard/oneview-ansible/blob/master/examples/oneview_config-rename.json)"
- - "Check how to use environment variables for configuration at:
- U(https://github.com/HewlettPackard/oneview-ansible#environment-variables)"
- - "Additional Playbooks for the HPE OneView Ansible modules can be found at:
- U(https://github.com/HewlettPackard/oneview-ansible/tree/master/examples)"
- - "The OneView API version used will directly affect returned and expected fields in resources.
- Information on setting the desired API version and can be found at:
- U(https://github.com/HewlettPackard/oneview-ansible#setting-your-oneview-version)"
- '''
-
- VALIDATEETAG = r'''
-options:
- validate_etag:
- description:
- - When the ETag Validation is enabled, the request will be conditionally processed only if the current ETag
- for the resource matches the ETag provided in the data.
- type: bool
- default: yes
-'''
-
- FACTSPARAMS = r'''
-options:
- params:
- description:
- - List of params to delimit, filter and sort the list of resources.
- - "params allowed:
- - C(start): The first item to return, using 0-based indexing.
- - C(count): The number of resources to return.
- - C(filter): A general filter/query string to narrow the list of items returned.
- - C(sort): The sort order of the returned data set."
- type: dict
-'''
diff --git a/ansible_collections/community/general/plugins/doc_fragments/online.py b/ansible_collections/community/general/plugins/doc_fragments/online.py
deleted file mode 100644
index 4ad35bab..00000000
--- a/ansible_collections/community/general/plugins/doc_fragments/online.py
+++ /dev/null
@@ -1,44 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-class ModuleDocFragment(object):
-
- # Standard documentation fragment
- DOCUMENTATION = r'''
-options:
- api_token:
- description:
- - Online OAuth token.
- type: str
- required: true
- aliases: [ oauth_token ]
- api_url:
- description:
- - Online API URL
- type: str
- default: 'https://api.online.net'
- aliases: [ base_url ]
- api_timeout:
- description:
- - HTTP timeout to Online API in seconds.
- type: int
- default: 30
- aliases: [ timeout ]
- validate_certs:
- description:
- - Validate SSL certs of the Online API.
- type: bool
- default: yes
-notes:
- - Also see the API documentation on U(https://console.online.net/en/api/)
- - If C(api_token) is not set within the module, the following
- environment variables can be used in decreasing order of precedence
- C(ONLINE_TOKEN), C(ONLINE_API_KEY), C(ONLINE_OAUTH_TOKEN), C(ONLINE_API_TOKEN)
- - If one wants to use a different C(api_url) one can also set the C(ONLINE_API_URL)
- environment variable.
-'''
diff --git a/ansible_collections/community/general/plugins/doc_fragments/opennebula.py b/ansible_collections/community/general/plugins/doc_fragments/opennebula.py
deleted file mode 100644
index 08b614a6..00000000
--- a/ansible_collections/community/general/plugins/doc_fragments/opennebula.py
+++ /dev/null
@@ -1,44 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2018, www.privaz.io Valletech AB
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-class ModuleDocFragment(object):
- # OpenNebula common documentation
- DOCUMENTATION = r'''
-options:
- api_url:
- description:
- - The ENDPOINT URL of the XMLRPC server.
- - If not specified then the value of the ONE_URL environment variable, if any, is used.
- type: str
- aliases:
- - api_endpoint
- api_username:
- description:
- - The name of the user for XMLRPC authentication.
- - If not specified then the value of the ONE_USERNAME environment variable, if any, is used.
- type: str
- api_password:
- description:
- - The password or token for XMLRPC authentication.
- - If not specified then the value of the ONE_PASSWORD environment variable, if any, is used.
- type: str
- aliases:
- - api_token
- validate_certs:
- description:
- - Whether to validate the SSL certificates or not.
- - This parameter is ignored if PYTHONHTTPSVERIFY environment variable is used.
- type: bool
- default: yes
- wait_timeout:
- description:
- - Time to wait for the desired state to be reached before timeout, in seconds.
- type: int
- default: 300
-'''
diff --git a/ansible_collections/community/general/plugins/doc_fragments/openswitch.py b/ansible_collections/community/general/plugins/doc_fragments/openswitch.py
deleted file mode 100644
index 7ab7c155..00000000
--- a/ansible_collections/community/general/plugins/doc_fragments/openswitch.py
+++ /dev/null
@@ -1,84 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2015, Peter Sprygada
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-class ModuleDocFragment(object):
-
- # Standard files documentation fragment
- DOCUMENTATION = r'''
-options:
- host:
- description:
- - Specifies the DNS host name or address for connecting to the remote
- device over the specified transport. The value of host is used as
- the destination address for the transport. Note this argument
- does not affect the SSH argument.
- type: str
- port:
- description:
- - Specifies the port to use when building the connection to the remote
- device. This value applies to either I(cli) or I(rest). The port
- value will default to the appropriate transport common port if
- none is provided in the task. (cli=22, http=80, https=443). Note
- this argument does not affect the SSH transport.
- type: int
- default: 0 (use common port)
- username:
- description:
- - Configures the username to use to authenticate the connection to
- the remote device. This value is used to authenticate
- either the CLI login or the eAPI authentication depending on which
- transport is used. Note this argument does not affect the SSH
- transport. If the value is not specified in the task, the value of
- environment variable C(ANSIBLE_NET_USERNAME) will be used instead.
- type: str
- password:
- description:
- - Specifies the password to use to authenticate the connection to
- the remote device. This is a common argument used for either I(cli)
- or I(rest) transports. Note this argument does not affect the SSH
- transport. If the value is not specified in the task, the value of
- environment variable C(ANSIBLE_NET_PASSWORD) will be used instead.
- type: str
- timeout:
- description:
- - Specifies the timeout in seconds for communicating with the network device
- for either connecting or sending commands. If the timeout is
- exceeded before the operation is completed, the module will error.
- type: int
- default: 10
- ssh_keyfile:
- description:
- - Specifies the SSH key to use to authenticate the connection to
- the remote device. This argument is only used for the I(cli)
- transports. If the value is not specified in the task, the value of
- environment variable C(ANSIBLE_NET_SSH_KEYFILE) will be used instead.
- type: path
- transport:
- description:
- - Configures the transport connection to use when connecting to the
- remote device. The transport argument supports connectivity to the
- device over ssh, cli or REST.
- required: true
- type: str
- choices: [ cli, rest, ssh ]
- default: ssh
- use_ssl:
- description:
- - Configures the I(transport) to use SSL if set to C(yes) only when the
- I(transport) argument is configured as rest. If the transport
- argument is not I(rest), this value is ignored.
- type: bool
- default: yes
- provider:
- description:
- - Convenience method that allows all I(openswitch) arguments to be passed as
- a dict object. All constraints (required, choices, etc) must be
- met either by individual arguments or values in this dict.
- type: dict
-'''
diff --git a/ansible_collections/community/general/plugins/doc_fragments/oracle.py b/ansible_collections/community/general/plugins/doc_fragments/oracle.py
deleted file mode 100644
index 94999c04..00000000
--- a/ansible_collections/community/general/plugins/doc_fragments/oracle.py
+++ /dev/null
@@ -1,83 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright (c) 2018, Oracle and/or its affiliates.
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-class ModuleDocFragment(object):
- DOCUMENTATION = """
- requirements:
- - "python >= 2.7"
- - Python SDK for Oracle Cloud Infrastructure U(https://oracle-cloud-infrastructure-python-sdk.readthedocs.io)
- notes:
- - For OCI python sdk configuration, please refer to
- U(https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/configuration.html)
- options:
- config_file_location:
- description:
- - Path to configuration file. If not set then the value of the OCI_CONFIG_FILE environment variable,
- if any, is used. Otherwise, defaults to ~/.oci/config.
- type: str
- config_profile_name:
- description:
- - The profile to load from the config file referenced by C(config_file_location). If not set, then the
- value of the OCI_CONFIG_PROFILE environment variable, if any, is used. Otherwise, defaults to the
- "DEFAULT" profile in C(config_file_location).
- default: "DEFAULT"
- type: str
- api_user:
- description:
- - The OCID of the user, on whose behalf, OCI APIs are invoked. If not set, then the
- value of the OCI_USER_OCID environment variable, if any, is used. This option is required if the user
- is not specified through a configuration file (See C(config_file_location)). To get the user's OCID,
- please refer U(https://docs.us-phoenix-1.oraclecloud.com/Content/API/Concepts/apisigningkey.htm).
- type: str
- api_user_fingerprint:
- description:
- - Fingerprint for the key pair being used. If not set, then the value of the OCI_USER_FINGERPRINT
- environment variable, if any, is used. This option is required if the key fingerprint is not
- specified through a configuration file (See C(config_file_location)). To get the key pair's
- fingerprint value please refer
- U(https://docs.us-phoenix-1.oraclecloud.com/Content/API/Concepts/apisigningkey.htm).
- type: str
- api_user_key_file:
- description:
- - Full path and filename of the private key (in PEM format). If not set, then the value of the
- OCI_USER_KEY_FILE variable, if any, is used. This option is required if the private key is
- not specified through a configuration file (See C(config_file_location)). If the key is encrypted
- with a pass-phrase, the C(api_user_key_pass_phrase) option must also be provided.
- type: path
- api_user_key_pass_phrase:
- description:
- - Passphrase used by the key referenced in C(api_user_key_file), if it is encrypted. If not set, then
- the value of the OCI_USER_KEY_PASS_PHRASE variable, if any, is used. This option is required if the
- key passphrase is not specified through a configuration file (See C(config_file_location)).
- type: str
- auth_type:
- description:
- - The type of authentication to use for making API requests. By default C(auth_type="api_key") based
- authentication is performed and the API key (see I(api_user_key_file)) in your config file will be
- used. If this 'auth_type' module option is not specified, the value of the OCI_ANSIBLE_AUTH_TYPE,
- if any, is used. Use C(auth_type="instance_principal") to use instance principal based authentication
- when running ansible playbooks within an OCI compute instance.
- choices: ['api_key', 'instance_principal']
- default: 'api_key'
- type: str
- tenancy:
- description:
- - OCID of your tenancy. If not set, then the value of the OCI_TENANCY variable, if any, is
- used. This option is required if the tenancy OCID is not specified through a configuration file
- (See C(config_file_location)). To get the tenancy OCID, please refer
- U(https://docs.us-phoenix-1.oraclecloud.com/Content/API/Concepts/apisigningkey.htm)
- type: str
- region:
- description:
- - The Oracle Cloud Infrastructure region to use for all OCI API requests. If not set, then the
- value of the OCI_REGION variable, if any, is used. This option is required if the region is
- not specified through a configuration file (See C(config_file_location)). Please refer to
- U(https://docs.us-phoenix-1.oraclecloud.com/Content/General/Concepts/regions.htm) for more information
- on OCI regions.
- type: str
- """
diff --git a/ansible_collections/community/general/plugins/doc_fragments/oracle_display_name_option.py b/ansible_collections/community/general/plugins/doc_fragments/oracle_display_name_option.py
deleted file mode 100644
index ff70d45d..00000000
--- a/ansible_collections/community/general/plugins/doc_fragments/oracle_display_name_option.py
+++ /dev/null
@@ -1,16 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright (c) 2018, Oracle and/or its affiliates.
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-class ModuleDocFragment(object):
- DOCUMENTATION = """
- options:
- display_name:
- description: Use I(display_name) along with the other options to return only resources that match the given
- display name exactly.
- type: str
- """
diff --git a/ansible_collections/community/general/plugins/doc_fragments/oracle_name_option.py b/ansible_collections/community/general/plugins/doc_fragments/oracle_name_option.py
deleted file mode 100644
index 8c4f9c1e..00000000
--- a/ansible_collections/community/general/plugins/doc_fragments/oracle_name_option.py
+++ /dev/null
@@ -1,16 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright (c) 2018, Oracle and/or its affiliates.
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-class ModuleDocFragment(object):
- DOCUMENTATION = """
- options:
- name:
- description: Use I(name) along with the other options to return only resources that match the given name
- exactly.
- type: str
- """
diff --git a/ansible_collections/community/general/plugins/doc_fragments/oracle_wait_options.py b/ansible_collections/community/general/plugins/doc_fragments/oracle_wait_options.py
deleted file mode 100644
index 0312755f..00000000
--- a/ansible_collections/community/general/plugins/doc_fragments/oracle_wait_options.py
+++ /dev/null
@@ -1,26 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright (c) 2018, Oracle and/or its affiliates.
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-class ModuleDocFragment(object):
- DOCUMENTATION = """
- options:
- wait:
- description: Whether to wait for create or delete operation to complete.
- default: yes
- type: bool
- wait_timeout:
- description: Time, in seconds, to wait when I(wait=yes).
- default: 1200
- type: int
- wait_until:
- description: The lifecycle state to wait for the resource to transition into when I(wait=yes). By default,
- when I(wait=yes), we wait for the resource to get into ACTIVE/ATTACHED/AVAILABLE/PROVISIONED/
- RUNNING applicable lifecycle state during create operation & to get into DELETED/DETACHED/
- TERMINATED lifecycle state during delete operation.
- type: str
- """
diff --git a/ansible_collections/community/general/plugins/doc_fragments/proxmox.py b/ansible_collections/community/general/plugins/doc_fragments/proxmox.py
deleted file mode 100644
index 165a7852..00000000
--- a/ansible_collections/community/general/plugins/doc_fragments/proxmox.py
+++ /dev/null
@@ -1,64 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-class ModuleDocFragment(object):
- # Common parameters for Proxmox VE modules
- DOCUMENTATION = r'''
-options:
- api_host:
- description:
- - Specify the target host of the Proxmox VE cluster.
- type: str
- required: true
- api_user:
- description:
- - Specify the user to authenticate with.
- type: str
- required: true
- api_password:
- description:
- - Specify the password to authenticate with.
- - You can use C(PROXMOX_PASSWORD) environment variable.
- type: str
- api_token_id:
- description:
- - Specify the token ID.
- type: str
- version_added: 1.3.0
- api_token_secret:
- description:
- - Specify the token secret.
- type: str
- version_added: 1.3.0
- validate_certs:
- description:
- - If C(no), SSL certificates will not be validated.
- - This should only be used on personally controlled sites using self-signed certificates.
- type: bool
- default: no
-requirements: [ "proxmoxer", "requests" ]
-'''
-
- SELECTION = r'''
-options:
- vmid:
- description:
- - Specifies the instance ID.
- - If not set the next available ID will be fetched from ProxmoxAPI.
- type: int
- node:
- description:
- - Proxmox VE node on which to operate.
- - Only required for I(state=present).
- - For every other states it will be autodiscovered.
- type: str
- pool:
- description:
- - Add the new VM to the specified pool.
- type: str
-'''
diff --git a/ansible_collections/community/general/plugins/doc_fragments/purestorage.py b/ansible_collections/community/general/plugins/doc_fragments/purestorage.py
deleted file mode 100644
index f35f0267..00000000
--- a/ansible_collections/community/general/plugins/doc_fragments/purestorage.py
+++ /dev/null
@@ -1,62 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2017, Simon Dodsley
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-class ModuleDocFragment(object):
-
- # Standard Pure Storage documentation fragment
- DOCUMENTATION = r'''
-options:
- - See separate platform section for more details
-requirements:
- - See separate platform section for more details
-notes:
- - Ansible modules are available for the following Pure Storage products: FlashArray, FlashBlade
-'''
-
- # Documentation fragment for FlashBlade
- FB = r'''
-options:
- fb_url:
- description:
- - FlashBlade management IP address or Hostname.
- type: str
- api_token:
- description:
- - FlashBlade API token for admin privileged user.
- type: str
-notes:
- - This module requires the C(purity_fb) Python library
- - You must set C(PUREFB_URL) and C(PUREFB_API) environment variables
- if I(fb_url) and I(api_token) arguments are not passed to the module directly
-requirements:
- - python >= 2.7
- - purity_fb >= 1.1
-'''
-
- # Documentation fragment for FlashArray
- FA = r'''
-options:
- fa_url:
- description:
- - FlashArray management IPv4 address or Hostname.
- type: str
- required: true
- api_token:
- description:
- - FlashArray API token for admin privileged user.
- type: str
- required: true
-notes:
- - This module requires the C(purestorage) Python library
- - You must set C(PUREFA_URL) and C(PUREFA_API) environment variables
- if I(fa_url) and I(api_token) arguments are not passed to the module directly
-requirements:
- - python >= 2.7
- - purestorage
-'''
diff --git a/ansible_collections/community/general/plugins/doc_fragments/rackspace.py b/ansible_collections/community/general/plugins/doc_fragments/rackspace.py
deleted file mode 100644
index 0f57dd88..00000000
--- a/ansible_collections/community/general/plugins/doc_fragments/rackspace.py
+++ /dev/null
@@ -1,117 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2014, Matt Martz
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-class ModuleDocFragment(object):
-
- # Standard Rackspace only documentation fragment
- DOCUMENTATION = r'''
-options:
- api_key:
- description:
- - Rackspace API key, overrides I(credentials).
- type: str
- aliases: [ password ]
- credentials:
- description:
- - File to find the Rackspace credentials in. Ignored if I(api_key) and
- I(username) are provided.
- type: path
- aliases: [ creds_file ]
- env:
- description:
- - Environment as configured in I(~/.pyrax.cfg),
- see U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#pyrax-configuration).
- type: str
- region:
- description:
- - Region to create an instance in.
- type: str
- username:
- description:
- - Rackspace username, overrides I(credentials).
- type: str
- validate_certs:
- description:
- - Whether or not to require SSL validation of API endpoints.
- type: bool
- aliases: [ verify_ssl ]
-requirements:
- - python >= 2.6
- - pyrax
-notes:
- - The following environment variables can be used, C(RAX_USERNAME),
- C(RAX_API_KEY), C(RAX_CREDS_FILE), C(RAX_CREDENTIALS), C(RAX_REGION).
- - C(RAX_CREDENTIALS) and C(RAX_CREDS_FILE) points to a credentials file
- appropriate for pyrax. See U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#authenticating)
- - C(RAX_USERNAME) and C(RAX_API_KEY) obviate the use of a credentials file
- - C(RAX_REGION) defines a Rackspace Public Cloud region (DFW, ORD, LON, ...)
-'''
-
- # Documentation fragment including attributes to enable communication
- # of other OpenStack clouds. Not all rax modules support this.
- OPENSTACK = r'''
-options:
- api_key:
- type: str
- description:
- - Rackspace API key, overrides I(credentials).
- aliases: [ password ]
- auth_endpoint:
- type: str
- description:
- - The URI of the authentication service.
- - If not specified will be set to U(https://identity.api.rackspacecloud.com/v2.0/)
- credentials:
- type: path
- description:
- - File to find the Rackspace credentials in. Ignored if I(api_key) and
- I(username) are provided.
- aliases: [ creds_file ]
- env:
- type: str
- description:
- - Environment as configured in I(~/.pyrax.cfg),
- see U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#pyrax-configuration).
- identity_type:
- type: str
- description:
- - Authentication mechanism to use, such as rackspace or keystone.
- default: rackspace
- region:
- type: str
- description:
- - Region to create an instance in.
- tenant_id:
- type: str
- description:
- - The tenant ID used for authentication.
- tenant_name:
- type: str
- description:
- - The tenant name used for authentication.
- username:
- type: str
- description:
- - Rackspace username, overrides I(credentials).
- validate_certs:
- description:
- - Whether or not to require SSL validation of API endpoints.
- type: bool
- aliases: [ verify_ssl ]
-requirements:
- - python >= 2.6
- - pyrax
-notes:
- - The following environment variables can be used, C(RAX_USERNAME),
- C(RAX_API_KEY), C(RAX_CREDS_FILE), C(RAX_CREDENTIALS), C(RAX_REGION).
- - C(RAX_CREDENTIALS) and C(RAX_CREDS_FILE) points to a credentials file
- appropriate for pyrax. See U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#authenticating)
- - C(RAX_USERNAME) and C(RAX_API_KEY) obviate the use of a credentials file
- - C(RAX_REGION) defines a Rackspace Public Cloud region (DFW, ORD, LON, ...)
-'''
diff --git a/ansible_collections/community/general/plugins/doc_fragments/redis.py b/ansible_collections/community/general/plugins/doc_fragments/redis.py
deleted file mode 100644
index e7af25ec..00000000
--- a/ansible_collections/community/general/plugins/doc_fragments/redis.py
+++ /dev/null
@@ -1,57 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2021, Andreas Botzner
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-class ModuleDocFragment(object):
- # Common parameters for Redis modules
- DOCUMENTATION = r'''
-options:
- login_host:
- description:
- - Specify the target host running the database.
- default: localhost
- type: str
- login_port:
- description:
- - Specify the port to connect to.
- default: 6379
- type: int
- login_user:
- description:
- - Specify the user to authenticate with.
- - Requires L(redis,https://pypi.org/project/redis) >= 3.4.0.
- type: str
- login_password:
- description:
- - Specify the password to authenticate with.
- - Usually not used when target is localhost.
- type: str
- tls:
- description:
- - Specify whether or not to use TLS for the connection.
- type: bool
- default: true
- validate_certs:
- description:
- - Specify whether or not to validate TLS certificates.
- - This should only be turned off for personally controlled sites or with
- C(localhost) as target.
- type: bool
- default: true
- ca_certs:
- description:
- - Path to root certificates file. If not set and I(tls) is
- set to C(true), certifi ca-certificates will be used.
- type: str
-requirements: [ "redis", "certifi" ]
-
-notes:
- - Requires the C(redis) Python package on the remote host. You can
- install it with pip (C(pip install redis)) or with a package manager.
- Information on the library can be found at U(https://github.com/andymccurdy/redis-py).
-'''
diff --git a/ansible_collections/community/general/plugins/doc_fragments/rundeck.py b/ansible_collections/community/general/plugins/doc_fragments/rundeck.py
deleted file mode 100644
index 056a54f3..00000000
--- a/ansible_collections/community/general/plugins/doc_fragments/rundeck.py
+++ /dev/null
@@ -1,31 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2021, Phillipe Smith
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-class ModuleDocFragment(object):
-
- # Standard files documentation fragment
- DOCUMENTATION = r'''
-options:
- url:
- type: str
- description:
- - Rundeck instance URL.
- required: true
- api_version:
- type: int
- description:
- - Rundeck API version to be used.
- - API version must be at least 14.
- default: 39
- api_token:
- type: str
- description:
- - Rundeck User API Token.
- required: true
-'''
diff --git a/ansible_collections/community/general/plugins/doc_fragments/scaleway.py b/ansible_collections/community/general/plugins/doc_fragments/scaleway.py
deleted file mode 100644
index c1e1b13d..00000000
--- a/ansible_collections/community/general/plugins/doc_fragments/scaleway.py
+++ /dev/null
@@ -1,50 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2018, Yanis Guenane
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-class ModuleDocFragment(object):
-
- # Standard documentation fragment
- DOCUMENTATION = r'''
-options:
- api_token:
- description:
- - Scaleway OAuth token.
- type: str
- required: true
- aliases: [ oauth_token ]
- api_url:
- description:
- - Scaleway API URL.
- type: str
- default: https://api.scaleway.com
- aliases: [ base_url ]
- api_timeout:
- description:
- - HTTP timeout to Scaleway API in seconds.
- type: int
- default: 30
- aliases: [ timeout ]
- query_parameters:
- description:
- - List of parameters passed to the query string.
- type: dict
- default: {}
- validate_certs:
- description:
- - Validate SSL certs of the Scaleway API.
- type: bool
- default: yes
-notes:
- - Also see the API documentation on U(https://developer.scaleway.com/)
- - If C(api_token) is not set within the module, the following
- environment variables can be used in decreasing order of precedence
- C(SCW_TOKEN), C(SCW_API_KEY), C(SCW_OAUTH_TOKEN) or C(SCW_API_TOKEN).
- - If one wants to use a different C(api_url) one can also set the C(SCW_API_URL)
- environment variable.
-'''
diff --git a/ansible_collections/community/general/plugins/doc_fragments/vexata.py b/ansible_collections/community/general/plugins/doc_fragments/vexata.py
deleted file mode 100644
index d541d5ad..00000000
--- a/ansible_collections/community/general/plugins/doc_fragments/vexata.py
+++ /dev/null
@@ -1,53 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright: (c) 2019, Sandeep Kasargod
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-class ModuleDocFragment(object):
-
- DOCUMENTATION = r'''
-options:
- - See respective platform section for more details
-requirements:
- - See respective platform section for more details
-notes:
- - Ansible modules are available for Vexata VX100 arrays.
-'''
-
- # Documentation fragment for Vexata VX100 series
- VX100 = r'''
-options:
- array:
- description:
- - Vexata VX100 array hostname or IPv4 Address.
- required: true
- type: str
- user:
- description:
- - Vexata API user with administrative privileges.
- required: false
- type: str
- password:
- description:
- - Vexata API user password.
- required: false
- type: str
- validate_certs:
- description:
- - Allows connection when SSL certificates are not valid. Set to C(false) when certificates are not trusted.
- - If set to C(yes), please make sure Python >= 2.7.9 is installed on the given machine.
- required: false
- type: bool
- default: 'no'
-
-requirements:
- - Vexata VX100 storage array with VXOS >= v3.5.0 on storage array
- - vexatapi >= 0.0.1
- - python >= 2.7
- - VEXATA_USER and VEXATA_PASSWORD environment variables must be set if
- user and password arguments are not passed to the module directly.
-'''
diff --git a/ansible_collections/community/general/plugins/doc_fragments/xenserver.py b/ansible_collections/community/general/plugins/doc_fragments/xenserver.py
deleted file mode 100644
index 747bf02f..00000000
--- a/ansible_collections/community/general/plugins/doc_fragments/xenserver.py
+++ /dev/null
@@ -1,40 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2018, Bojan Vitnik
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-class ModuleDocFragment(object):
- # Common parameters for XenServer modules
- DOCUMENTATION = r'''
-options:
- hostname:
- description:
- - The hostname or IP address of the XenServer host or XenServer pool master.
- - If the value is not specified in the task, the value of environment variable C(XENSERVER_HOST) will be used instead.
- type: str
- default: localhost
- aliases: [ host, pool ]
- username:
- description:
- - The username to use for connecting to XenServer.
- - If the value is not specified in the task, the value of environment variable C(XENSERVER_USER) will be used instead.
- type: str
- default: root
- aliases: [ admin, user ]
- password:
- description:
- - The password to use for connecting to XenServer.
- - If the value is not specified in the task, the value of environment variable C(XENSERVER_PASSWORD) will be used instead.
- type: str
- aliases: [ pass, pwd ]
- validate_certs:
- description:
- - Allows connection when SSL certificates are not valid. Set to C(false) when certificates are not trusted.
- - If the value is not specified in the task, the value of environment variable C(XENSERVER_VALIDATE_CERTS) will be used instead.
- type: bool
- default: yes
-'''
diff --git a/ansible_collections/community/general/plugins/filter/counter.py b/ansible_collections/community/general/plugins/filter/counter.py
deleted file mode 100644
index ad957fce..00000000
--- a/ansible_collections/community/general/plugins/filter/counter.py
+++ /dev/null
@@ -1,36 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright (c) 2021, Remy Keil
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-from ansible.errors import AnsibleFilterError
-from ansible.module_utils.common._collections_compat import Sequence
-from collections import Counter
-
-
-def counter(sequence):
- ''' Count elements in a sequence. Returns dict with count result. '''
- if not isinstance(sequence, Sequence):
- raise AnsibleFilterError('Argument for community.general.counter must be a sequence (string or list). %s is %s' %
- (sequence, type(sequence)))
-
- try:
- result = dict(Counter(sequence))
- except TypeError as e:
- raise AnsibleFilterError(
- "community.general.counter needs a sequence with hashable elements (int, float or str) - %s" % (e)
- )
- return result
-
-
-class FilterModule(object):
- ''' Ansible counter jinja2 filters '''
-
- def filters(self):
- filters = {
- 'counter': counter,
- }
-
- return filters
diff --git a/ansible_collections/community/general/plugins/filter/dict.py b/ansible_collections/community/general/plugins/filter/dict.py
deleted file mode 100644
index 3d20e752..00000000
--- a/ansible_collections/community/general/plugins/filter/dict.py
+++ /dev/null
@@ -1,24 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2021, Felix Fontein
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-def dict_filter(sequence):
- '''Convert a list of tuples to a dictionary.
-
- Example: ``[[1, 2], ['a', 'b']] | community.general.dict`` results in ``{1: 2, 'a': 'b'}``
- '''
- return dict(sequence)
-
-
-class FilterModule(object):
- '''Ansible jinja2 filters'''
-
- def filters(self):
- return {
- 'dict': dict_filter,
- }
diff --git a/ansible_collections/community/general/plugins/filter/dict_kv.py b/ansible_collections/community/general/plugins/filter/dict_kv.py
deleted file mode 100644
index 7ce6c3e4..00000000
--- a/ansible_collections/community/general/plugins/filter/dict_kv.py
+++ /dev/null
@@ -1,71 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright (C) 2020 Stanislav German-Evtushenko (@giner)
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-def dict_kv(value, key):
- '''Return a dictionary with a single key-value pair
-
- Example:
-
- - hosts: localhost
- gather_facts: false
- vars:
- myvar: myvalue
- tasks:
- - debug:
- msg: "{{ myvar | dict_kv('thatsmyvar') }}"
-
- produces:
-
- ok: [localhost] => {
- "msg": {
- "thatsmyvar": "myvalue"
- }
- }
-
- Example 2:
-
- - hosts: localhost
- gather_facts: false
- vars:
- common_config:
- type: host
- database: all
- myservers:
- - server1
- - server2
- tasks:
- - debug:
- msg: "{{ myservers | map('dict_kv', 'server') | map('combine', common_config) }}"
-
- produces:
-
- ok: [localhost] => {
- "msg": [
- {
- "database": "all",
- "server": "server1",
- "type": "host"
- },
- {
- "database": "all",
- "server": "server2",
- "type": "host"
- }
- ]
- }
- '''
- return {key: value}
-
-
-class FilterModule(object):
- ''' Query filter '''
-
- def filters(self):
- return {
- 'dict_kv': dict_kv
- }
diff --git a/ansible_collections/community/general/plugins/filter/from_csv.py b/ansible_collections/community/general/plugins/filter/from_csv.py
deleted file mode 100644
index b66d4769..00000000
--- a/ansible_collections/community/general/plugins/filter/from_csv.py
+++ /dev/null
@@ -1,49 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2021, Andrew Pantuso (@ajpantuso)
-# Copyright: (c) 2018, Dag Wieers (@dagwieers)
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-from ansible.errors import AnsibleFilterError
-from ansible.module_utils.common.text.converters import to_native
-
-from ansible_collections.community.general.plugins.module_utils.csv import (initialize_dialect, read_csv, CSVError,
- DialectNotAvailableError,
- CustomDialectFailureError)
-
-
-def from_csv(data, dialect='excel', fieldnames=None, delimiter=None, skipinitialspace=None, strict=None):
-
- dialect_params = {
- "delimiter": delimiter,
- "skipinitialspace": skipinitialspace,
- "strict": strict,
- }
-
- try:
- dialect = initialize_dialect(dialect, **dialect_params)
- except (CustomDialectFailureError, DialectNotAvailableError) as e:
- raise AnsibleFilterError(to_native(e))
-
- reader = read_csv(data, dialect, fieldnames)
-
- data_list = []
-
- try:
- for row in reader:
- data_list.append(row)
- except CSVError as e:
- raise AnsibleFilterError("Unable to process file: %s" % to_native(e))
-
- return data_list
-
-
-class FilterModule(object):
-
- def filters(self):
- return {
- 'from_csv': from_csv
- }
diff --git a/ansible_collections/community/general/plugins/filter/groupby.py b/ansible_collections/community/general/plugins/filter/groupby.py
deleted file mode 100644
index a2a85aa9..00000000
--- a/ansible_collections/community/general/plugins/filter/groupby.py
+++ /dev/null
@@ -1,42 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright (c) 2021, Felix Fontein
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-from ansible.errors import AnsibleFilterError
-from ansible.module_utils.common._collections_compat import Mapping, Sequence
-
-
-def groupby_as_dict(sequence, attribute):
- '''
- Given a sequence of dictionaries and an attribute name, returns a dictionary mapping
- the value of this attribute to the dictionary.
-
- If multiple dictionaries in the sequence have the same value for this attribute,
- the filter will fail.
- '''
- if not isinstance(sequence, Sequence):
- raise AnsibleFilterError('Input is not a sequence')
-
- result = dict()
- for list_index, element in enumerate(sequence):
- if not isinstance(element, Mapping):
- raise AnsibleFilterError('Sequence element #{0} is not a mapping'.format(list_index))
- if attribute not in element:
- raise AnsibleFilterError('Attribute not contained in element #{0} of sequence'.format(list_index))
- result_index = element[attribute]
- if result_index in result:
- raise AnsibleFilterError('Multiple sequence entries have attribute value {0!r}'.format(result_index))
- result[result_index] = element
- return result
-
-
-class FilterModule(object):
- ''' Ansible list filters '''
-
- def filters(self):
- return {
- 'groupby_as_dict': groupby_as_dict,
- }
diff --git a/ansible_collections/community/general/plugins/filter/jc.py b/ansible_collections/community/general/plugins/filter/jc.py
deleted file mode 100644
index f8fc4ac5..00000000
--- a/ansible_collections/community/general/plugins/filter/jc.py
+++ /dev/null
@@ -1,95 +0,0 @@
-# -*- coding: utf-8 -*-
-# (c) 2015, Filipe Niero Felisbino
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see .
-#
-# contributed by Kelly Brazil
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-from ansible.errors import AnsibleError, AnsibleFilterError
-import importlib
-
-try:
- import jc
- HAS_LIB = True
-except ImportError:
- HAS_LIB = False
-
-
-def jc(data, parser, quiet=True, raw=False):
- """Convert returned command output to JSON using the JC library
-
- Arguments:
-
- parser required (string) the correct parser for the input data (e.g. 'ifconfig')
- see https://github.com/kellyjonbrazil/jc#parsers for latest list of parsers.
- quiet optional (bool) True to suppress warning messages (default is True)
- raw optional (bool) True to return pre-processed JSON (default is False)
-
- Returns:
-
- dictionary or list of dictionaries
-
- Example:
-
- - name: run date command
- hosts: ubuntu
- tasks:
- - shell: date
- register: result
- - set_fact:
- myvar: "{{ result.stdout | community.general.jc('date') }}"
- - debug:
- msg: "{{ myvar }}"
-
- produces:
-
- ok: [192.168.1.239] => {
- "msg": {
- "day": 9,
- "hour": 22,
- "minute": 6,
- "month": "Aug",
- "month_num": 8,
- "second": 22,
- "timezone": "UTC",
- "weekday": "Sun",
- "weekday_num": 1,
- "year": 2020
- }
- }
- """
-
- if not HAS_LIB:
- raise AnsibleError('You need to install "jc" prior to running jc filter')
-
- try:
- jc_parser = importlib.import_module('jc.parsers.' + parser)
- return jc_parser.parse(data, quiet=quiet, raw=raw)
-
- except Exception as e:
- raise AnsibleFilterError('Error in jc filter plugin: %s' % e)
-
-
-class FilterModule(object):
- ''' Query filter '''
-
- def filters(self):
- return {
- 'jc': jc
- }
diff --git a/ansible_collections/community/general/plugins/filter/json_query.py b/ansible_collections/community/general/plugins/filter/json_query.py
deleted file mode 100644
index 9c835e8c..00000000
--- a/ansible_collections/community/general/plugins/filter/json_query.py
+++ /dev/null
@@ -1,59 +0,0 @@
-# -*- coding: utf-8 -*-
-# (c) 2015, Filipe Niero Felisbino
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see .
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-from ansible.errors import AnsibleError, AnsibleFilterError
-
-try:
- import jmespath
- HAS_LIB = True
-except ImportError:
- HAS_LIB = False
-
-
-def json_query(data, expr):
- '''Query data using jmespath query language ( http://jmespath.org ). Example:
- - ansible.builtin.debug: msg="{{ instance | json_query(tagged_instances[*].block_device_mapping.*.volume_id') }}"
- '''
- if not HAS_LIB:
- raise AnsibleError('You need to install "jmespath" prior to running '
- 'json_query filter')
-
- # Hack to handle Ansible Unsafe text, AnsibleMapping and AnsibleSequence
- # See issue: https://github.com/ansible-collections/community.general/issues/320
- jmespath.functions.REVERSE_TYPES_MAP['string'] = jmespath.functions.REVERSE_TYPES_MAP['string'] + ('AnsibleUnicode', 'AnsibleUnsafeText', )
- jmespath.functions.REVERSE_TYPES_MAP['array'] = jmespath.functions.REVERSE_TYPES_MAP['array'] + ('AnsibleSequence', )
- jmespath.functions.REVERSE_TYPES_MAP['object'] = jmespath.functions.REVERSE_TYPES_MAP['object'] + ('AnsibleMapping', )
- try:
- return jmespath.search(expr, data)
- except jmespath.exceptions.JMESPathError as e:
- raise AnsibleFilterError('JMESPathError in json_query filter plugin:\n%s' % e)
- except Exception as e:
- # For older jmespath, we can get ValueError and TypeError without much info.
- raise AnsibleFilterError('Error in jmespath.search in json_query filter plugin:\n%s' % e)
-
-
-class FilterModule(object):
- ''' Query filter '''
-
- def filters(self):
- return {
- 'json_query': json_query
- }
diff --git a/ansible_collections/community/general/plugins/filter/list.py b/ansible_collections/community/general/plugins/filter/list.py
deleted file mode 100644
index 005e4b7c..00000000
--- a/ansible_collections/community/general/plugins/filter/list.py
+++ /dev/null
@@ -1,117 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright (c) 2020-2022, Vladimir Botka
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-from ansible.errors import AnsibleFilterError
-from ansible.module_utils.six import string_types
-from ansible.module_utils.common._collections_compat import Mapping, Sequence
-from ansible.utils.vars import merge_hash
-from ansible.release import __version__ as ansible_version
-from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
-
-from collections import defaultdict
-from operator import itemgetter
-
-
-def merge_hash_wrapper(x, y, recursive=False, list_merge='replace'):
- ''' Wrapper of the function merge_hash from ansible.utils.vars. Only 2 paramaters are allowed
- for Ansible 2.9 and lower.'''
-
- if LooseVersion(ansible_version) < LooseVersion('2.10'):
- if list_merge != 'replace' or recursive:
- msg = ("Non default options of list_merge(default=replace) or recursive(default=False) "
- "are not allowed in Ansible version 2.9 or lower. Ansible version is %s, "
- "recursive=%s, and list_merge=%s.")
- raise AnsibleFilterError(msg % (ansible_version, recursive, list_merge))
- else:
- return merge_hash(x, y)
- else:
- return merge_hash(x, y, recursive, list_merge)
-
-
-def list_mergeby(x, y, index, recursive=False, list_merge='replace'):
- ''' Merge 2 lists by attribute 'index'. The function merge_hash from ansible.utils.vars is used.
- This function is used by the function lists_mergeby.
- '''
-
- d = defaultdict(dict)
- for l in (x, y):
- for elem in l:
- if not isinstance(elem, Mapping):
- msg = "Elements of list arguments for lists_mergeby must be dictionaries. %s is %s"
- raise AnsibleFilterError(msg % (elem, type(elem)))
- if index in elem.keys():
- d[elem[index]].update(merge_hash_wrapper(d[elem[index]], elem, recursive, list_merge))
- return sorted(d.values(), key=itemgetter(index))
-
-
-def lists_mergeby(*terms, **kwargs):
- ''' Merge 2 or more lists by attribute 'index'. Optional parameters 'recursive' and 'list_merge'
- control the merging of the lists in values. The function merge_hash from ansible.utils.vars
- is used. To learn details on how to use the parameters 'recursive' and 'list_merge' see
- Ansible User's Guide chapter "Using filters to manipulate data" section "Combining
- hashes/dictionaries".
-
- Example:
- - debug:
- msg: "{{ list1|
- community.general.lists_mergeby(list2,
- 'index',
- recursive=True,
- list_merge='append')|
- list }}"
- '''
-
- recursive = kwargs.pop('recursive', False)
- list_merge = kwargs.pop('list_merge', 'replace')
- if kwargs:
- raise AnsibleFilterError("'recursive' and 'list_merge' are the only valid keyword arguments.")
- if len(terms) < 2:
- raise AnsibleFilterError("At least one list and index are needed.")
-
- # allow the user to do `[list1, list2, ...] | lists_mergeby('index')`
- flat_list = []
- for sublist in terms[:-1]:
- if not isinstance(sublist, Sequence):
- msg = ("All arguments before the argument index for community.general.lists_mergeby "
- "must be lists. %s is %s")
- raise AnsibleFilterError(msg % (sublist, type(sublist)))
- if len(sublist) > 0:
- if all(isinstance(l, Sequence) for l in sublist):
- for item in sublist:
- flat_list.append(item)
- else:
- flat_list.append(sublist)
- lists = flat_list
-
- if not lists:
- return []
-
- if len(lists) == 1:
- return lists[0]
-
- index = terms[-1]
-
- if not isinstance(index, string_types):
- msg = ("First argument after the lists for community.general.lists_mergeby must be string. "
- "%s is %s")
- raise AnsibleFilterError(msg % (index, type(index)))
-
- high_to_low_prio_list_iterator = reversed(lists)
- result = next(high_to_low_prio_list_iterator)
- for list in high_to_low_prio_list_iterator:
- result = list_mergeby(list, result, index, recursive, list_merge)
-
- return result
-
-
-class FilterModule(object):
- ''' Ansible list filters '''
-
- def filters(self):
- return {
- 'lists_mergeby': lists_mergeby,
- }
diff --git a/ansible_collections/community/general/plugins/filter/path_join_shim.py b/ansible_collections/community/general/plugins/filter/path_join_shim.py
deleted file mode 100644
index 9734298a..00000000
--- a/ansible_collections/community/general/plugins/filter/path_join_shim.py
+++ /dev/null
@@ -1,28 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2020-2021, Felix Fontein
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-import os.path
-
-
-def path_join(list):
- '''Join list of paths.
-
- This is a minimal shim for ansible.builtin.path_join included in ansible-base 2.10.
- This should only be called by Ansible 2.9 or earlier. See meta/runtime.yml for details.
- '''
- return os.path.join(*list)
-
-
-class FilterModule(object):
- '''Ansible jinja2 filters'''
-
- def filters(self):
- return {
- 'path_join': path_join,
- }
diff --git a/ansible_collections/community/general/plugins/filter/random_mac.py b/ansible_collections/community/general/plugins/filter/random_mac.py
deleted file mode 100644
index 7d25555a..00000000
--- a/ansible_collections/community/general/plugins/filter/random_mac.py
+++ /dev/null
@@ -1,74 +0,0 @@
-# -*- coding: utf-8 -*-
-# (c) 2020 Ansible Project
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see .
-
-# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-import re
-from random import Random, SystemRandom
-
-from ansible.errors import AnsibleFilterError
-from ansible.module_utils.six import string_types
-
-
-def random_mac(value, seed=None):
- ''' takes string prefix, and return it completed with random bytes
- to get a complete 6 bytes MAC address '''
-
- if not isinstance(value, string_types):
- raise AnsibleFilterError('Invalid value type (%s) for random_mac (%s)' %
- (type(value), value))
-
- value = value.lower()
- mac_items = value.split(':')
-
- if len(mac_items) > 5:
- raise AnsibleFilterError('Invalid value (%s) for random_mac: 5 colon(:) separated'
- ' items max' % value)
-
- err = ""
- for mac in mac_items:
- if not mac:
- err += ",empty item"
- continue
- if not re.match('[a-f0-9]{2}', mac):
- err += ",%s not hexa byte" % mac
- err = err.strip(',')
-
- if err:
- raise AnsibleFilterError('Invalid value (%s) for random_mac: %s' % (value, err))
-
- if seed is None:
- r = SystemRandom()
- else:
- r = Random(seed)
- # Generate random int between x1000000000 and xFFFFFFFFFF
- v = r.randint(68719476736, 1099511627775)
- # Select first n chars to complement input prefix
- remain = 2 * (6 - len(mac_items))
- rnd = ('%x' % v)[:remain]
- return value + re.sub(r'(..)', r':\1', rnd)
-
-
-class FilterModule:
- ''' Ansible jinja2 filters '''
- def filters(self):
- return {
- 'random_mac': random_mac,
- }
diff --git a/ansible_collections/community/general/plugins/filter/unicode_normalize.py b/ansible_collections/community/general/plugins/filter/unicode_normalize.py
deleted file mode 100644
index 9afbf29e..00000000
--- a/ansible_collections/community/general/plugins/filter/unicode_normalize.py
+++ /dev/null
@@ -1,40 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2021, Andrew Pantuso (@ajpantuso)
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-from unicodedata import normalize
-
-from ansible.errors import AnsibleFilterError, AnsibleFilterTypeError
-from ansible.module_utils.six import text_type
-
-
-def unicode_normalize(data, form='NFC'):
- """Applies normalization to 'unicode' strings.
-
- Args:
- data: A unicode string piped into the Jinja filter
- form: One of ('NFC', 'NFD', 'NFKC', 'NFKD').
- See https://docs.python.org/3/library/unicodedata.html#unicodedata.normalize for more information.
-
- Returns:
- A normalized unicode string of the specified 'form'.
- """
-
- if not isinstance(data, text_type):
- raise AnsibleFilterTypeError("%s is not a valid input type" % type(data))
-
- if form not in ('NFC', 'NFD', 'NFKC', 'NFKD'):
- raise AnsibleFilterError("%s is not a valid form" % form)
-
- return normalize(form, data)
-
-
-class FilterModule(object):
- def filters(self):
- return {
- 'unicode_normalize': unicode_normalize,
- }
diff --git a/ansible_collections/community/general/plugins/filter/version_sort.py b/ansible_collections/community/general/plugins/filter/version_sort.py
deleted file mode 100644
index ac62ef8c..00000000
--- a/ansible_collections/community/general/plugins/filter/version_sort.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright (C) 2021 Eric Lavarde
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
-
-
-def version_sort(value, reverse=False):
- '''Sort a list according to loose versions so that e.g. 2.9 is smaller than 2.10'''
- return sorted(value, key=LooseVersion, reverse=reverse)
-
-
-class FilterModule(object):
- ''' Version sort filter '''
-
- def filters(self):
- return {
- 'version_sort': version_sort
- }
diff --git a/ansible_collections/community/general/plugins/inventory/cobbler.py b/ansible_collections/community/general/plugins/inventory/cobbler.py
deleted file mode 100644
index d50acd0c..00000000
--- a/ansible_collections/community/general/plugins/inventory/cobbler.py
+++ /dev/null
@@ -1,286 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright (C) 2020 Orion Poplawski
-# Copyright (c) 2020 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-DOCUMENTATION = '''
- author: Orion Poplawski (@opoplawski)
- name: cobbler
- short_description: Cobbler inventory source
- version_added: 1.0.0
- description:
- - Get inventory hosts from the cobbler service.
- - "Uses a configuration file as an inventory source, it must end in C(.cobbler.yml) or C(.cobbler.yaml) and has a C(plugin: cobbler) entry."
- extends_documentation_fragment:
- - inventory_cache
- options:
- plugin:
- description: The name of this plugin, it should always be set to C(community.general.cobbler) for this plugin to recognize it as it's own.
- required: yes
- choices: [ 'cobbler', 'community.general.cobbler' ]
- url:
- description: URL to cobbler.
- default: 'http://cobbler/cobbler_api'
- env:
- - name: COBBLER_SERVER
- user:
- description: Cobbler authentication user.
- required: no
- env:
- - name: COBBLER_USER
- password:
- description: Cobbler authentication password
- required: no
- env:
- - name: COBBLER_PASSWORD
- cache_fallback:
- description: Fallback to cached results if connection to cobbler fails
- type: boolean
- default: no
- exclude_profiles:
- description:
- - Profiles to exclude from inventory.
- - Ignored if I(include_profiles) is specified.
- type: list
- default: []
- elements: str
- include_profiles:
- description:
- - Profiles to include from inventory.
- - If specified, all other profiles will be excluded.
- - I(exclude_profiles) is ignored if I(include_profiles) is specified.
- type: list
- default: []
- elements: str
- version_added: 4.4.0
- group_by:
- description: Keys to group hosts by
- type: list
- elements: string
- default: [ 'mgmt_classes', 'owners', 'status' ]
- group:
- description: Group to place all hosts into
- default: cobbler
- group_prefix:
- description: Prefix to apply to cobbler groups
- default: cobbler_
- want_facts:
- description: Toggle, if C(true) the plugin will retrieve host facts from the server
- type: boolean
- default: yes
-'''
-
-EXAMPLES = '''
-# my.cobbler.yml
-plugin: community.general.cobbler
-url: http://cobbler/cobbler_api
-user: ansible-tester
-password: secure
-'''
-
-import socket
-
-from ansible.errors import AnsibleError
-from ansible.module_utils.common.text.converters import to_text
-from ansible.module_utils.six import iteritems
-from ansible.plugins.inventory import BaseInventoryPlugin, Cacheable, to_safe_group_name
-
-# xmlrpc
-try:
- import xmlrpclib as xmlrpc_client
- HAS_XMLRPC_CLIENT = True
-except ImportError:
- try:
- import xmlrpc.client as xmlrpc_client
- HAS_XMLRPC_CLIENT = True
- except ImportError:
- HAS_XMLRPC_CLIENT = False
-
-
-class InventoryModule(BaseInventoryPlugin, Cacheable):
- ''' Host inventory parser for ansible using cobbler as source. '''
-
- NAME = 'community.general.cobbler'
-
- def __init__(self):
- super(InventoryModule, self).__init__()
- self.cache_key = None
- self.connection = None
-
- def verify_file(self, path):
- valid = False
- if super(InventoryModule, self).verify_file(path):
- if path.endswith(('cobbler.yaml', 'cobbler.yml')):
- valid = True
- else:
- self.display.vvv('Skipping due to inventory source not ending in "cobbler.yaml" nor "cobbler.yml"')
- return valid
-
- def _get_connection(self):
- if not HAS_XMLRPC_CLIENT:
- raise AnsibleError('Could not import xmlrpc client library')
-
- if self.connection is None:
- self.display.vvvv('Connecting to %s\n' % self.cobbler_url)
- self.connection = xmlrpc_client.Server(self.cobbler_url, allow_none=True)
- self.token = None
- if self.get_option('user') is not None:
- self.token = self.connection.login(self.get_option('user'), self.get_option('password'))
- return self.connection
-
- def _init_cache(self):
- if self.cache_key not in self._cache:
- self._cache[self.cache_key] = {}
-
- def _reload_cache(self):
- if self.get_option('cache_fallback'):
- self.display.vvv('Cannot connect to server, loading cache\n')
- self._options['cache_timeout'] = 0
- self.load_cache_plugin()
- self._cache.get(self.cache_key, {})
-
- def _get_profiles(self):
- if not self.use_cache or 'profiles' not in self._cache.get(self.cache_key, {}):
- c = self._get_connection()
- try:
- if self.token is not None:
- data = c.get_profiles(self.token)
- else:
- data = c.get_profiles()
- except (socket.gaierror, socket.error, xmlrpc_client.ProtocolError):
- self._reload_cache()
- else:
- self._init_cache()
- self._cache[self.cache_key]['profiles'] = data
-
- return self._cache[self.cache_key]['profiles']
-
- def _get_systems(self):
- if not self.use_cache or 'systems' not in self._cache.get(self.cache_key, {}):
- c = self._get_connection()
- try:
- if self.token is not None:
- data = c.get_systems(self.token)
- else:
- data = c.get_systems()
- except (socket.gaierror, socket.error, xmlrpc_client.ProtocolError):
- self._reload_cache()
- else:
- self._init_cache()
- self._cache[self.cache_key]['systems'] = data
-
- return self._cache[self.cache_key]['systems']
-
- def _add_safe_group_name(self, group, child=None):
- group_name = self.inventory.add_group(to_safe_group_name('%s%s' % (self.get_option('group_prefix'), group.lower().replace(" ", ""))))
- if child is not None:
- self.inventory.add_child(group_name, child)
- return group_name
-
- def _exclude_profile(self, profile):
- if self.include_profiles:
- return profile not in self.include_profiles
- else:
- return profile in self.exclude_profiles
-
- def parse(self, inventory, loader, path, cache=True):
-
- super(InventoryModule, self).parse(inventory, loader, path)
-
- # read config from file, this sets 'options'
- self._read_config_data(path)
-
- # get connection host
- self.cobbler_url = self.get_option('url')
- self.cache_key = self.get_cache_key(path)
- self.use_cache = cache and self.get_option('cache')
-
- self.exclude_profiles = self.get_option('exclude_profiles')
- self.include_profiles = self.get_option('include_profiles')
- self.group_by = self.get_option('group_by')
-
- for profile in self._get_profiles():
- if profile['parent']:
- self.display.vvvv('Processing profile %s with parent %s\n' % (profile['name'], profile['parent']))
- if not self._exclude_profile(profile['parent']):
- parent_group_name = self._add_safe_group_name(profile['parent'])
- self.display.vvvv('Added profile parent group %s\n' % parent_group_name)
- if not self._exclude_profile(profile['name']):
- group_name = self._add_safe_group_name(profile['name'])
- self.display.vvvv('Added profile group %s\n' % group_name)
- self.inventory.add_child(parent_group_name, group_name)
- else:
- self.display.vvvv('Processing profile %s without parent\n' % profile['name'])
- # Create a heirarchy of profile names
- profile_elements = profile['name'].split('-')
- i = 0
- while i < len(profile_elements) - 1:
- profile_group = '-'.join(profile_elements[0:i + 1])
- profile_group_child = '-'.join(profile_elements[0:i + 2])
- if self._exclude_profile(profile_group):
- self.display.vvvv('Excluding profile %s\n' % profile_group)
- break
- group_name = self._add_safe_group_name(profile_group)
- self.display.vvvv('Added profile group %s\n' % group_name)
- child_group_name = self._add_safe_group_name(profile_group_child)
- self.display.vvvv('Added profile child group %s to %s\n' % (child_group_name, group_name))
- self.inventory.add_child(group_name, child_group_name)
- i = i + 1
-
- # Add default group for this inventory if specified
- self.group = to_safe_group_name(self.get_option('group'))
- if self.group is not None and self.group != '':
- self.inventory.add_group(self.group)
- self.display.vvvv('Added site group %s\n' % self.group)
-
- for host in self._get_systems():
- # Get the FQDN for the host and add it to the right groups
- hostname = host['hostname'] # None
- interfaces = host['interfaces']
-
- if self._exclude_profile(host['profile']):
- self.display.vvvv('Excluding host %s in profile %s\n' % (host['name'], host['profile']))
- continue
-
- # hostname is often empty for non-static IP hosts
- if hostname == '':
- for (iname, ivalue) in iteritems(interfaces):
- if ivalue['management'] or not ivalue['static']:
- this_dns_name = ivalue.get('dns_name', None)
- if this_dns_name is not None and this_dns_name != "":
- hostname = this_dns_name
- self.display.vvvv('Set hostname to %s from %s\n' % (hostname, iname))
-
- if hostname == '':
- self.display.vvvv('Cannot determine hostname for host %s, skipping\n' % host['name'])
- continue
-
- self.inventory.add_host(hostname)
- self.display.vvvv('Added host %s hostname %s\n' % (host['name'], hostname))
-
- # Add host to profile group
- group_name = self._add_safe_group_name(host['profile'], child=hostname)
- self.display.vvvv('Added host %s to profile group %s\n' % (hostname, group_name))
-
- # Add host to groups specified by group_by fields
- for group_by in self.group_by:
- if host[group_by] == '<>':
- groups = []
- else:
- groups = [host[group_by]] if isinstance(host[group_by], str) else host[group_by]
- for group in groups:
- group_name = self._add_safe_group_name(group, child=hostname)
- self.display.vvvv('Added host %s to group_by %s group %s\n' % (hostname, group_by, group_name))
-
- # Add to group for this inventory
- if self.group is not None:
- self.inventory.add_child(self.group, hostname)
-
- # Add host variables
- if self.get_option('want_facts'):
- try:
- self.inventory.set_variable(hostname, 'cobbler', host)
- except ValueError as e:
- self.display.warning("Could not set host info for %s: %s" % (hostname, to_text(e)))
diff --git a/ansible_collections/community/general/plugins/inventory/linode.py b/ansible_collections/community/general/plugins/inventory/linode.py
deleted file mode 100644
index 33ecc513..00000000
--- a/ansible_collections/community/general/plugins/inventory/linode.py
+++ /dev/null
@@ -1,324 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright (c) 2017 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-DOCUMENTATION = r'''
- name: linode
- author:
- - Luke Murphy (@decentral1se)
- short_description: Ansible dynamic inventory plugin for Linode.
- requirements:
- - python >= 2.7
- - linode_api4 >= 2.0.0
- description:
- - Reads inventories from the Linode API v4.
- - Uses a YAML configuration file that ends with linode.(yml|yaml).
- - Linode labels are used by default as the hostnames.
- - The default inventory groups are built from groups (deprecated by
- Linode) and not tags.
- extends_documentation_fragment:
- - constructed
- - inventory_cache
- options:
- cache:
- version_added: 4.5.0
- cache_plugin:
- version_added: 4.5.0
- cache_timeout:
- version_added: 4.5.0
- cache_connection:
- version_added: 4.5.0
- cache_prefix:
- version_added: 4.5.0
- plugin:
- description: Marks this as an instance of the 'linode' plugin.
- required: true
- choices: ['linode', 'community.general.linode']
- ip_style:
- description: Populate hostvars with all information available from the Linode APIv4.
- type: string
- default: plain
- choices:
- - plain
- - api
- version_added: 3.6.0
- access_token:
- description: The Linode account personal access token.
- required: true
- env:
- - name: LINODE_ACCESS_TOKEN
- regions:
- description: Populate inventory with instances in this region.
- default: []
- type: list
- elements: string
- tags:
- description: Populate inventory only with instances which have at least one of the tags listed here.
- default: []
- type: list
- elements: string
- version_added: 2.0.0
- types:
- description: Populate inventory with instances with this type.
- default: []
- type: list
- elements: string
- strict:
- version_added: 2.0.0
- compose:
- version_added: 2.0.0
- groups:
- version_added: 2.0.0
- keyed_groups:
- version_added: 2.0.0
-'''
-
-EXAMPLES = r'''
-# Minimal example. `LINODE_ACCESS_TOKEN` is exposed in environment.
-plugin: community.general.linode
-
-# You can use Jinja to template the access token.
-plugin: community.general.linode
-access_token: "{{ lookup('ini', 'token', section='your_username', file='~/.config/linode-cli') }}"
-# For older Ansible versions, you need to write this as:
-# access_token: "{{ lookup('ini', 'token section=your_username file=~/.config/linode-cli') }}"
-
-# Example with regions, types, groups and access token
-plugin: community.general.linode
-access_token: foobar
-regions:
- - eu-west
-types:
- - g5-standard-2
-
-# Example with keyed_groups, groups, and compose
-plugin: community.general.linode
-access_token: foobar
-keyed_groups:
- - key: tags
- separator: ''
- - key: region
- prefix: region
-groups:
- webservers: "'web' in (tags|list)"
- mailservers: "'mail' in (tags|list)"
-compose:
- # By default, Ansible tries to connect to the label of the instance.
- # Since that might not be a valid name to connect to, you can
- # replace it with the first IPv4 address of the linode as follows:
- ansible_ssh_host: ipv4[0]
- ansible_port: 2222
-
-# Example where control traffic limited to internal network
-plugin: community.general.linode
-access_token: foobar
-ip_style: api
-compose:
- ansible_host: "ipv4 | community.general.json_query('[?public==`false`].address') | first"
-'''
-
-import os
-
-from ansible.errors import AnsibleError, AnsibleParserError
-from ansible.module_utils.six import string_types
-from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
-from ansible.template import Templar
-
-
-try:
- from linode_api4 import LinodeClient
- from linode_api4.objects.linode import Instance
- from linode_api4.errors import ApiError as LinodeApiError
- HAS_LINODE = True
-except ImportError:
- HAS_LINODE = False
-
-
-class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
-
- NAME = 'community.general.linode'
-
- def _build_client(self, loader):
- """Build the Linode client."""
-
- t = Templar(loader=loader)
-
- access_token = self.get_option('access_token')
- if t.is_template(access_token):
- access_token = t.template(variable=access_token, disable_lookups=False)
-
- if access_token is None:
- try:
- access_token = os.environ['LINODE_ACCESS_TOKEN']
- except KeyError:
- pass
-
- if access_token is None:
- raise AnsibleError((
- 'Could not retrieve Linode access token '
- 'from plugin configuration or environment'
- ))
-
- self.client = LinodeClient(access_token)
-
- def _get_instances_inventory(self):
- """Retrieve Linode instance information from cloud inventory."""
- try:
- self.instances = self.client.linode.instances()
- except LinodeApiError as exception:
- raise AnsibleError('Linode client raised: %s' % exception)
-
- def _add_groups(self):
- """Add Linode instance groups to the dynamic inventory."""
- self.linode_groups = set(
- filter(None, [
- instance.group
- for instance
- in self.instances
- ])
- )
-
- for linode_group in self.linode_groups:
- self.inventory.add_group(linode_group)
-
- def _filter_by_config(self):
- """Filter instances by user specified configuration."""
- regions = self.get_option('regions')
- if regions:
- self.instances = [
- instance for instance in self.instances
- if instance.region.id in regions
- ]
-
- types = self.get_option('types')
- if types:
- self.instances = [
- instance for instance in self.instances
- if instance.type.id in types
- ]
-
- tags = self.get_option('tags')
- if tags:
- self.instances = [
- instance for instance in self.instances
- if any(tag in instance.tags for tag in tags)
- ]
-
- def _add_instances_to_groups(self):
- """Add instance names to their dynamic inventory groups."""
- for instance in self.instances:
- self.inventory.add_host(instance.label, group=instance.group)
-
- def _add_hostvars_for_instances(self):
- """Add hostvars for instances in the dynamic inventory."""
- ip_style = self.get_option('ip_style')
- for instance in self.instances:
- hostvars = instance._raw_json
- for hostvar_key in hostvars:
- if ip_style == 'api' and hostvar_key in ['ipv4', 'ipv6']:
- continue
- self.inventory.set_variable(
- instance.label,
- hostvar_key,
- hostvars[hostvar_key]
- )
- if ip_style == 'api':
- ips = instance.ips.ipv4.public + instance.ips.ipv4.private
- ips += [instance.ips.ipv6.slaac, instance.ips.ipv6.link_local]
- ips += instance.ips.ipv6.pools
-
- for ip_type in set(ip.type for ip in ips):
- self.inventory.set_variable(
- instance.label,
- ip_type,
- self._ip_data([ip for ip in ips if ip.type == ip_type])
- )
-
- def _ip_data(self, ip_list):
- data = []
- for ip in list(ip_list):
- data.append(
- {
- 'address': ip.address,
- 'subnet_mask': ip.subnet_mask,
- 'gateway': ip.gateway,
- 'public': ip.public,
- 'prefix': ip.prefix,
- 'rdns': ip.rdns,
- 'type': ip.type
- }
- )
- return data
-
- def _cacheable_inventory(self):
- return [i._raw_json for i in self.instances]
-
- def populate(self):
- strict = self.get_option('strict')
-
- self._filter_by_config()
-
- self._add_groups()
- self._add_instances_to_groups()
- self._add_hostvars_for_instances()
- for instance in self.instances:
- variables = self.inventory.get_host(instance.label).get_vars()
- self._add_host_to_composed_groups(
- self.get_option('groups'),
- variables,
- instance.label,
- strict=strict)
- self._add_host_to_keyed_groups(
- self.get_option('keyed_groups'),
- variables,
- instance.label,
- strict=strict)
- self._set_composite_vars(
- self.get_option('compose'),
- variables,
- instance.label,
- strict=strict)
-
- def verify_file(self, path):
- """Verify the Linode configuration file."""
- if super(InventoryModule, self).verify_file(path):
- endings = ('linode.yaml', 'linode.yml')
- if any((path.endswith(ending) for ending in endings)):
- return True
- return False
-
- def parse(self, inventory, loader, path, cache=True):
- """Dynamically parse Linode the cloud inventory."""
- super(InventoryModule, self).parse(inventory, loader, path)
- self.instances = None
-
- if not HAS_LINODE:
- raise AnsibleError('the Linode dynamic inventory plugin requires linode_api4.')
-
- self._read_config_data(path)
-
- cache_key = self.get_cache_key(path)
-
- if cache:
- cache = self.get_option('cache')
-
- update_cache = False
- if cache:
- try:
- self.instances = [Instance(None, i["id"], i) for i in self._cache[cache_key]]
- except KeyError:
- update_cache = True
-
- # Check for None rather than False in order to allow
- # for empty sets of cached instances
- if self.instances is None:
- self._build_client(loader)
- self._get_instances_inventory()
-
- if update_cache:
- self._cache[cache_key] = self._cacheable_inventory()
-
- self.populate()
diff --git a/ansible_collections/community/general/plugins/inventory/lxd.py b/ansible_collections/community/general/plugins/inventory/lxd.py
deleted file mode 100644
index 91263850..00000000
--- a/ansible_collections/community/general/plugins/inventory/lxd.py
+++ /dev/null
@@ -1,1051 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright: (c) 2021, Frank Dornheim
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-DOCUMENTATION = r'''
- name: lxd
- short_description: Returns Ansible inventory from lxd host
- description:
- - Get inventory from the lxd.
- - Uses a YAML configuration file that ends with 'lxd.(yml|yaml)'.
- version_added: "3.0.0"
- author: "Frank Dornheim (@conloos)"
- requirements:
- - ipaddress
- - lxd >= 4.0
- options:
- plugin:
- description: Token that ensures this is a source file for the 'lxd' plugin.
- required: true
- choices: [ 'community.general.lxd' ]
- url:
- description:
- - The unix domain socket path or the https URL for the lxd server.
- - Sockets in filesystem have to start with C(unix:).
- - Mostly C(unix:/var/lib/lxd/unix.socket) or C(unix:/var/snap/lxd/common/lxd/unix.socket).
- default: unix:/var/snap/lxd/common/lxd/unix.socket
- type: str
- client_key:
- description:
- - The client certificate key file path.
- aliases: [ key_file ]
- default: $HOME/.config/lxc/client.key
- type: path
- client_cert:
- description:
- - The client certificate file path.
- aliases: [ cert_file ]
- default: $HOME/.config/lxc/client.crt
- type: path
- trust_password:
- description:
- - The client trusted password.
- - You need to set this password on the lxd server before
- running this module using the following command
- C(lxc config set core.trust_password )
- See U(https://www.stgraber.org/2016/04/18/lxd-api-direct-interaction/).
- - If I(trust_password) is set, this module send a request for authentication before sending any requests.
- type: str
- state:
- description: Filter the instance according to the current status.
- type: str
- default: none
- choices: [ 'STOPPED', 'STARTING', 'RUNNING', 'none' ]
- type_filter:
- description:
- - Filter the instances by type C(virtual-machine), C(container) or C(both).
- - The first version of the inventory only supported containers.
- type: str
- default: container
- choices: [ 'virtual-machine', 'container', 'both' ]
- version_added: 4.2.0
- prefered_instance_network_interface:
- description:
- - If an instance has multiple network interfaces, select which one is the prefered as pattern.
- - Combined with the first number that can be found e.g. 'eth' + 0.
- - The option has been renamed from I(prefered_container_network_interface) to I(prefered_instance_network_interface) in community.general 3.8.0.
- The old name still works as an alias.
- type: str
- default: eth
- aliases:
- - prefered_container_network_interface
- prefered_instance_network_family:
- description:
- - If an instance has multiple network interfaces, which one is the prefered by family.
- - Specify C(inet) for IPv4 and C(inet6) for IPv6.
- type: str
- default: inet
- choices: [ 'inet', 'inet6' ]
- groupby:
- description:
- - Create groups by the following keywords C(location), C(network_range), C(os), C(pattern), C(profile), C(release), C(type), C(vlanid).
- - See example for syntax.
- type: dict
-'''
-
-EXAMPLES = '''
-# simple lxd.yml
-plugin: community.general.lxd
-url: unix:/var/snap/lxd/common/lxd/unix.socket
-
-# simple lxd.yml including filter
-plugin: community.general.lxd
-url: unix:/var/snap/lxd/common/lxd/unix.socket
-state: RUNNING
-
-# simple lxd.yml including virtual machines and containers
-plugin: community.general.lxd
-url: unix:/var/snap/lxd/common/lxd/unix.socket
-type_filter: both
-
-# grouping lxd.yml
-groupby:
- locationBerlin:
- type: location
- attribute: Berlin
- netRangeIPv4:
- type: network_range
- attribute: 10.98.143.0/24
- netRangeIPv6:
- type: network_range
- attribute: fd42:bd00:7b11:2167:216:3eff::/24
- osUbuntu:
- type: os
- attribute: ubuntu
- testpattern:
- type: pattern
- attribute: test
- profileDefault:
- type: profile
- attribute: default
- profileX11:
- type: profile
- attribute: x11
- releaseFocal:
- type: release
- attribute: focal
- releaseBionic:
- type: release
- attribute: bionic
- typeVM:
- type: type
- attribute: virtual-machine
- typeContainer:
- type: type
- attribute: container
- vlan666:
- type: vlanid
- attribute: 666
-'''
-
-import binascii
-import json
-import re
-import time
-import os
-import socket
-from ansible.plugins.inventory import BaseInventoryPlugin
-from ansible.module_utils.common.text.converters import to_native, to_text
-from ansible.module_utils.common.dict_transformations import dict_merge
-from ansible.module_utils.six import raise_from
-from ansible.errors import AnsibleError, AnsibleParserError
-from ansible_collections.community.general.plugins.module_utils.lxd import LXDClient, LXDClientException
-
-try:
- import ipaddress
-except ImportError as exc:
- IPADDRESS_IMPORT_ERROR = exc
-else:
- IPADDRESS_IMPORT_ERROR = None
-
-
-class InventoryModule(BaseInventoryPlugin):
- DEBUG = 4
- NAME = 'community.general.lxd'
- SNAP_SOCKET_URL = 'unix:/var/snap/lxd/common/lxd/unix.socket'
- SOCKET_URL = 'unix:/var/lib/lxd/unix.socket'
-
- @staticmethod
- def load_json_data(path):
- """Load json data
-
- Load json data from file
-
- Args:
- list(path): Path elements
- str(file_name): Filename of data
- Kwargs:
- None
- Raises:
- None
- Returns:
- dict(json_data): json data"""
- try:
- with open(path, 'r') as json_file:
- return json.load(json_file)
- except (IOError, json.decoder.JSONDecodeError) as err:
- raise AnsibleParserError('Could not load the test data from {0}: {1}'.format(to_native(path), to_native(err)))
-
- def save_json_data(self, path, file_name=None):
- """save data as json
-
- Save data as json file
-
- Args:
- list(path): Path elements
- str(file_name): Filename of data
- Kwargs:
- None
- Raises:
- None
- Returns:
- None"""
-
- if file_name:
- path.append(file_name)
- else:
- prefix = 'lxd_data-'
- time_stamp = time.strftime('%Y%m%d-%H%M%S')
- suffix = '.atd'
- path.append(prefix + time_stamp + suffix)
-
- try:
- cwd = os.path.abspath(os.path.dirname(__file__))
- with open(os.path.abspath(os.path.join(cwd, *path)), 'w') as json_file:
- json.dump(self.data, json_file)
- except IOError as err:
- raise AnsibleParserError('Could not save data: {0}'.format(to_native(err)))
-
- def verify_file(self, path):
- """Check the config
-
- Return true/false if the config-file is valid for this plugin
-
- Args:
- str(path): path to the config
- Kwargs:
- None
- Raises:
- None
- Returns:
- bool(valid): is valid"""
- valid = False
- if super(InventoryModule, self).verify_file(path):
- if path.endswith(('lxd.yaml', 'lxd.yml')):
- valid = True
- else:
- self.display.vvv('Inventory source not ending in "lxd.yaml" or "lxd.yml"')
- return valid
-
- @staticmethod
- def validate_url(url):
- """validate url
-
- check whether the url is correctly formatted
-
- Args:
- url
- Kwargs:
- None
- Raises:
- AnsibleError
- Returns:
- bool"""
- if not isinstance(url, str):
- return False
- if not url.startswith(('unix:', 'https:')):
- raise AnsibleError('URL is malformed: {0}'.format(to_native(url)))
- return True
-
- def _connect_to_socket(self):
- """connect to lxd socket
-
- Connect to lxd socket by provided url or defaults
-
- Args:
- None
- Kwargs:
- None
- Raises:
- AnsibleError
- Returns:
- None"""
- error_storage = {}
- url_list = [self.get_option('url'), self.SNAP_SOCKET_URL, self.SOCKET_URL]
- urls = (url for url in url_list if self.validate_url(url))
- for url in urls:
- try:
- socket_connection = LXDClient(url, self.client_key, self.client_cert, self.debug)
- return socket_connection
- except LXDClientException as err:
- error_storage[url] = err
- raise AnsibleError('No connection to the socket: {0}'.format(to_native(error_storage)))
-
- def _get_networks(self):
- """Get Networknames
-
- Returns all network config names
-
- Args:
- None
- Kwargs:
- None
- Raises:
- None
- Returns:
- list(names): names of all network_configs"""
- # e.g. {'type': 'sync',
- # 'status': 'Success',
- # 'status_code': 200,
- # 'operation': '',
- # 'error_code': 0,
- # 'error': '',
- # 'metadata': ['/1.0/networks/lxdbr0']}
- network_configs = self.socket.do('GET', '/1.0/networks')
- return [m.split('/')[3] for m in network_configs['metadata']]
-
- def _get_instances(self):
- """Get instancenames
-
- Returns all instancenames
-
- Args:
- None
- Kwargs:
- None
- Raises:
- None
- Returns:
- list(names): names of all instances"""
- # e.g. {
- # "metadata": [
- # "/1.0/instances/foo",
- # "/1.0/instances/bar"
- # ],
- # "status": "Success",
- # "status_code": 200,
- # "type": "sync"
- # }
- instances = self.socket.do('GET', '/1.0/instances')
- return [m.split('/')[3] for m in instances['metadata']]
-
- def _get_config(self, branch, name):
- """Get inventory of instance
-
- Get config of instance
-
- Args:
- str(branch): Name oft the API-Branch
- str(name): Name of instance
- Kwargs:
- None
- Source:
- https://github.com/lxc/lxd/blob/master/doc/rest-api.md
- Raises:
- None
- Returns:
- dict(config): Config of the instance"""
- config = {}
- if isinstance(branch, (tuple, list)):
- config[name] = {branch[1]: self.socket.do('GET', '/1.0/{0}/{1}/{2}'.format(to_native(branch[0]), to_native(name), to_native(branch[1])))}
- else:
- config[name] = {branch: self.socket.do('GET', '/1.0/{0}/{1}'.format(to_native(branch), to_native(name)))}
- return config
-
- def get_instance_data(self, names):
- """Create Inventory of the instance
-
- Iterate through the different branches of the instances and collect Informations.
-
- Args:
- list(names): List of instance names
- Kwargs:
- None
- Raises:
- None
- Returns:
- None"""
- # tuple(('instances','metadata/templates')) to get section in branch
- # e.g. /1.0/instances//metadata/templates
- branches = ['instances', ('instances', 'state')]
- instance_config = {}
- for branch in branches:
- for name in names:
- instance_config['instances'] = self._get_config(branch, name)
- self.data = dict_merge(instance_config, self.data)
-
- def get_network_data(self, names):
- """Create Inventory of the instance
-
- Iterate through the different branches of the instances and collect Informations.
-
- Args:
- list(names): List of instance names
- Kwargs:
- None
- Raises:
- None
- Returns:
- None"""
- # tuple(('instances','metadata/templates')) to get section in branch
- # e.g. /1.0/instances//metadata/templates
- branches = [('networks', 'state')]
- network_config = {}
- for branch in branches:
- for name in names:
- try:
- network_config['networks'] = self._get_config(branch, name)
- except LXDClientException:
- network_config['networks'] = {name: None}
- self.data = dict_merge(network_config, self.data)
-
- def extract_network_information_from_instance_config(self, instance_name):
- """Returns the network interface configuration
-
- Returns the network ipv4 and ipv6 config of the instance without local-link
-
- Args:
- str(instance_name): Name oft he instance
- Kwargs:
- None
- Raises:
- None
- Returns:
- dict(network_configuration): network config"""
- instance_network_interfaces = self._get_data_entry('instances/{0}/state/metadata/network'.format(instance_name))
- network_configuration = None
- if instance_network_interfaces:
- network_configuration = {}
- gen_interface_names = [interface_name for interface_name in instance_network_interfaces if interface_name != 'lo']
- for interface_name in gen_interface_names:
- gen_address = [address for address in instance_network_interfaces[interface_name]['addresses'] if address.get('scope') != 'link']
- network_configuration[interface_name] = []
- for address in gen_address:
- address_set = {}
- address_set['family'] = address.get('family')
- address_set['address'] = address.get('address')
- address_set['netmask'] = address.get('netmask')
- address_set['combined'] = address.get('address') + '/' + address.get('netmask')
- network_configuration[interface_name].append(address_set)
- return network_configuration
-
- def get_prefered_instance_network_interface(self, instance_name):
- """Helper to get the prefered interface of thr instance
-
- Helper to get the prefered interface provide by neme pattern from 'prefered_instance_network_interface'.
-
- Args:
- str(containe_name): name of instance
- Kwargs:
- None
- Raises:
- None
- Returns:
- str(prefered_interface): None or interface name"""
- instance_network_interfaces = self._get_data_entry('inventory/{0}/network_interfaces'.format(instance_name))
- prefered_interface = None # init
- if instance_network_interfaces: # instance have network interfaces
- # generator if interfaces which start with the desired pattern
- net_generator = [interface for interface in instance_network_interfaces if interface.startswith(self.prefered_instance_network_interface)]
- selected_interfaces = [] # init
- for interface in net_generator:
- selected_interfaces.append(interface)
- if len(selected_interfaces) > 0:
- prefered_interface = sorted(selected_interfaces)[0]
- return prefered_interface
-
- def get_instance_vlans(self, instance_name):
- """Get VLAN(s) from instance
-
- Helper to get the VLAN_ID from the instance
-
- Args:
- str(containe_name): name of instance
- Kwargs:
- None
- Raises:
- None
- Returns:
- None"""
- # get network device configuration and store {network: vlan_id}
- network_vlans = {}
- for network in self._get_data_entry('networks'):
- if self._get_data_entry('state/metadata/vlan/vid', data=self.data['networks'].get(network)):
- network_vlans[network] = self._get_data_entry('state/metadata/vlan/vid', data=self.data['networks'].get(network))
-
- # get networkdevices of instance and return
- # e.g.
- # "eth0":{ "name":"eth0",
- # "network":"lxdbr0",
- # "type":"nic"},
- vlan_ids = {}
- devices = self._get_data_entry('instances/{0}/instances/metadata/expanded_devices'.format(to_native(instance_name)))
- for device in devices:
- if 'network' in devices[device]:
- if devices[device]['network'] in network_vlans:
- vlan_ids[devices[device].get('network')] = network_vlans[devices[device].get('network')]
- return vlan_ids if vlan_ids else None
-
- def _get_data_entry(self, path, data=None, delimiter='/'):
- """Helper to get data
-
- Helper to get data from self.data by a path like 'path/to/target'
- Attention: Escaping of the delimiter is not (yet) provided.
-
- Args:
- str(path): path to nested dict
- Kwargs:
- dict(data): datastore
- str(delimiter): delimiter in Path.
- Raises:
- None
- Returns:
- *(value)"""
- try:
- if not data:
- data = self.data
- if delimiter in path:
- path = path.split(delimiter)
-
- if isinstance(path, list) and len(path) > 1:
- data = data[path.pop(0)]
- path = delimiter.join(path)
- return self._get_data_entry(path, data, delimiter) # recursion
- return data[path]
- except KeyError:
- return None
-
- def _set_data_entry(self, instance_name, key, value, path=None):
- """Helper to save data
-
- Helper to save the data in self.data
- Detect if data is allready in branch and use dict_merge() to prevent that branch is overwritten.
-
- Args:
- str(instance_name): name of instance
- str(key): same as dict
- *(value): same as dict
- Kwargs:
- str(path): path to branch-part
- Raises:
- AnsibleParserError
- Returns:
- None"""
- if not path:
- path = self.data['inventory']
- if instance_name not in path:
- path[instance_name] = {}
-
- try:
- if isinstance(value, dict) and key in path[instance_name]:
- path[instance_name] = dict_merge(value, path[instance_name][key])
- else:
- path[instance_name][key] = value
- except KeyError as err:
- raise AnsibleParserError("Unable to store Informations: {0}".format(to_native(err)))
-
- def extract_information_from_instance_configs(self):
- """Process configuration information
-
- Preparation of the data
-
- Args:
- dict(configs): instance configurations
- Kwargs:
- None
- Raises:
- None
- Returns:
- None"""
- # create branch "inventory"
- if 'inventory' not in self.data:
- self.data['inventory'] = {}
-
- for instance_name in self.data['instances']:
- self._set_data_entry(instance_name, 'os', self._get_data_entry(
- 'instances/{0}/instances/metadata/config/image.os'.format(instance_name)))
- self._set_data_entry(instance_name, 'release', self._get_data_entry(
- 'instances/{0}/instances/metadata/config/image.release'.format(instance_name)))
- self._set_data_entry(instance_name, 'version', self._get_data_entry(
- 'instances/{0}/instances/metadata/config/image.version'.format(instance_name)))
- self._set_data_entry(instance_name, 'profile', self._get_data_entry(
- 'instances/{0}/instances/metadata/profiles'.format(instance_name)))
- self._set_data_entry(instance_name, 'location', self._get_data_entry(
- 'instances/{0}/instances/metadata/location'.format(instance_name)))
- self._set_data_entry(instance_name, 'state', self._get_data_entry(
- 'instances/{0}/instances/metadata/config/volatile.last_state.power'.format(instance_name)))
- self._set_data_entry(instance_name, 'type', self._get_data_entry(
- 'instances/{0}/instances/metadata/type'.format(instance_name)))
- self._set_data_entry(instance_name, 'network_interfaces', self.extract_network_information_from_instance_config(instance_name))
- self._set_data_entry(instance_name, 'preferred_interface', self.get_prefered_instance_network_interface(instance_name))
- self._set_data_entry(instance_name, 'vlan_ids', self.get_instance_vlans(instance_name))
-
- def build_inventory_network(self, instance_name):
- """Add the network interfaces of the instance to the inventory
-
- Logic:
- - if the instance have no interface -> 'ansible_connection: local'
- - get preferred_interface & prefered_instance_network_family -> 'ansible_connection: ssh' & 'ansible_host: '
- - first Interface from: network_interfaces prefered_instance_network_family -> 'ansible_connection: ssh' & 'ansible_host: '
-
- Args:
- str(instance_name): name of instance
- Kwargs:
- None
- Raises:
- None
- Returns:
- None"""
-
- def interface_selection(instance_name):
- """Select instance Interface for inventory
-
- Logic:
- - get preferred_interface & prefered_instance_network_family -> str(IP)
- - first Interface from: network_interfaces prefered_instance_network_family -> str(IP)
-
- Args:
- str(instance_name): name of instance
- Kwargs:
- None
- Raises:
- None
- Returns:
- dict(interface_name: ip)"""
- prefered_interface = self._get_data_entry('inventory/{0}/preferred_interface'.format(instance_name)) # name or None
- prefered_instance_network_family = self.prefered_instance_network_family
-
- ip_address = ''
- if prefered_interface:
- interface = self._get_data_entry('inventory/{0}/network_interfaces/{1}'.format(instance_name, prefered_interface))
- for config in interface:
- if config['family'] == prefered_instance_network_family:
- ip_address = config['address']
- break
- else:
- interfaces = self._get_data_entry('inventory/{0}/network_interfaces'.format(instance_name))
- for interface in interfaces.values():
- for config in interface:
- if config['family'] == prefered_instance_network_family:
- ip_address = config['address']
- break
- return ip_address
-
- if self._get_data_entry('inventory/{0}/network_interfaces'.format(instance_name)): # instance have network interfaces
- self.inventory.set_variable(instance_name, 'ansible_connection', 'ssh')
- self.inventory.set_variable(instance_name, 'ansible_host', interface_selection(instance_name))
- else:
- self.inventory.set_variable(instance_name, 'ansible_connection', 'local')
-
- def build_inventory_hosts(self):
- """Build host-part dynamic inventory
-
- Build the host-part of the dynamic inventory.
- Add Hosts and host_vars to the inventory.
-
- Args:
- None
- Kwargs:
- None
- Raises:
- None
- Returns:
- None"""
- for instance_name in self.data['inventory']:
- instance_state = str(self._get_data_entry('inventory/{0}/state'.format(instance_name)) or "STOPPED").lower()
-
- # Only consider instances that match the "state" filter, if self.state is not None
- if self.filter:
- if self.filter.lower() != instance_state:
- continue
- # add instance
- self.inventory.add_host(instance_name)
- # add network informations
- self.build_inventory_network(instance_name)
- # add os
- v = self._get_data_entry('inventory/{0}/os'.format(instance_name))
- if v:
- self.inventory.set_variable(instance_name, 'ansible_lxd_os', v.lower())
- # add release
- v = self._get_data_entry('inventory/{0}/release'.format(instance_name))
- if v:
- self.inventory.set_variable(instance_name, 'ansible_lxd_release', v.lower())
- # add profile
- self.inventory.set_variable(instance_name, 'ansible_lxd_profile', self._get_data_entry('inventory/{0}/profile'.format(instance_name)))
- # add state
- self.inventory.set_variable(instance_name, 'ansible_lxd_state', instance_state)
- # add type
- self.inventory.set_variable(instance_name, 'ansible_lxd_type', self._get_data_entry('inventory/{0}/type'.format(instance_name)))
- # add location information
- if self._get_data_entry('inventory/{0}/location'.format(instance_name)) != "none": # wrong type by lxd 'none' != 'None'
- self.inventory.set_variable(instance_name, 'ansible_lxd_location', self._get_data_entry('inventory/{0}/location'.format(instance_name)))
- # add VLAN_ID information
- if self._get_data_entry('inventory/{0}/vlan_ids'.format(instance_name)):
- self.inventory.set_variable(instance_name, 'ansible_lxd_vlan_ids', self._get_data_entry('inventory/{0}/vlan_ids'.format(instance_name)))
-
- def build_inventory_groups_location(self, group_name):
- """create group by attribute: location
-
- Args:
- str(group_name): Group name
- Kwargs:
- None
- Raises:
- None
- Returns:
- None"""
- # maybe we just want to expand one group
- if group_name not in self.inventory.groups:
- self.inventory.add_group(group_name)
-
- for instance_name in self.inventory.hosts:
- if 'ansible_lxd_location' in self.inventory.get_host(instance_name).get_vars():
- self.inventory.add_child(group_name, instance_name)
-
- def build_inventory_groups_pattern(self, group_name):
- """create group by name pattern
-
- Args:
- str(group_name): Group name
- Kwargs:
- None
- Raises:
- None
- Returns:
- None"""
- # maybe we just want to expand one group
- if group_name not in self.inventory.groups:
- self.inventory.add_group(group_name)
-
- regex_pattern = self.groupby[group_name].get('attribute')
-
- for instance_name in self.inventory.hosts:
- result = re.search(regex_pattern, instance_name)
- if result:
- self.inventory.add_child(group_name, instance_name)
-
- def build_inventory_groups_network_range(self, group_name):
- """check if IP is in network-class
-
- Args:
- str(group_name): Group name
- Kwargs:
- None
- Raises:
- None
- Returns:
- None"""
- # maybe we just want to expand one group
- if group_name not in self.inventory.groups:
- self.inventory.add_group(group_name)
-
- try:
- network = ipaddress.ip_network(to_text(self.groupby[group_name].get('attribute')))
- except ValueError as err:
- raise AnsibleParserError(
- 'Error while parsing network range {0}: {1}'.format(self.groupby[group_name].get('attribute'), to_native(err)))
-
- for instance_name in self.inventory.hosts:
- if self.data['inventory'][instance_name].get('network_interfaces') is not None:
- for interface in self.data['inventory'][instance_name].get('network_interfaces'):
- for interface_family in self.data['inventory'][instance_name].get('network_interfaces')[interface]:
- try:
- address = ipaddress.ip_address(to_text(interface_family['address']))
- if address.version == network.version and address in network:
- self.inventory.add_child(group_name, instance_name)
- except ValueError:
- # Ignore invalid IP addresses returned by lxd
- pass
-
- def build_inventory_groups_os(self, group_name):
- """create group by attribute: os
-
- Args:
- str(group_name): Group name
- Kwargs:
- None
- Raises:
- None
- Returns:
- None"""
- # maybe we just want to expand one group
- if group_name not in self.inventory.groups:
- self.inventory.add_group(group_name)
-
- gen_instances = [
- instance_name for instance_name in self.inventory.hosts
- if 'ansible_lxd_os' in self.inventory.get_host(instance_name).get_vars()]
- for instance_name in gen_instances:
- if self.groupby[group_name].get('attribute').lower() == self.inventory.get_host(instance_name).get_vars().get('ansible_lxd_os'):
- self.inventory.add_child(group_name, instance_name)
-
- def build_inventory_groups_release(self, group_name):
- """create group by attribute: release
-
- Args:
- str(group_name): Group name
- Kwargs:
- None
- Raises:
- None
- Returns:
- None"""
- # maybe we just want to expand one group
- if group_name not in self.inventory.groups:
- self.inventory.add_group(group_name)
-
- gen_instances = [
- instance_name for instance_name in self.inventory.hosts
- if 'ansible_lxd_release' in self.inventory.get_host(instance_name).get_vars()]
- for instance_name in gen_instances:
- if self.groupby[group_name].get('attribute').lower() == self.inventory.get_host(instance_name).get_vars().get('ansible_lxd_release'):
- self.inventory.add_child(group_name, instance_name)
-
- def build_inventory_groups_profile(self, group_name):
- """create group by attribute: profile
-
- Args:
- str(group_name): Group name
- Kwargs:
- None
- Raises:
- None
- Returns:
- None"""
- # maybe we just want to expand one group
- if group_name not in self.inventory.groups:
- self.inventory.add_group(group_name)
-
- gen_instances = [
- instance_name for instance_name in self.inventory.hosts.keys()
- if 'ansible_lxd_profile' in self.inventory.get_host(instance_name).get_vars().keys()]
- for instance_name in gen_instances:
- if self.groupby[group_name].get('attribute').lower() in self.inventory.get_host(instance_name).get_vars().get('ansible_lxd_profile'):
- self.inventory.add_child(group_name, instance_name)
-
- def build_inventory_groups_vlanid(self, group_name):
- """create group by attribute: vlanid
-
- Args:
- str(group_name): Group name
- Kwargs:
- None
- Raises:
- None
- Returns:
- None"""
- # maybe we just want to expand one group
- if group_name not in self.inventory.groups:
- self.inventory.add_group(group_name)
-
- gen_instances = [
- instance_name for instance_name in self.inventory.hosts.keys()
- if 'ansible_lxd_vlan_ids' in self.inventory.get_host(instance_name).get_vars().keys()]
- for instance_name in gen_instances:
- if self.groupby[group_name].get('attribute') in self.inventory.get_host(instance_name).get_vars().get('ansible_lxd_vlan_ids').values():
- self.inventory.add_child(group_name, instance_name)
-
- def build_inventory_groups_type(self, group_name):
- """create group by attribute: type
-
- Args:
- str(group_name): Group name
- Kwargs:
- None
- Raises:
- None
- Returns:
- None"""
- # maybe we just want to expand one group
- if group_name not in self.inventory.groups:
- self.inventory.add_group(group_name)
-
- gen_instances = [
- instance_name for instance_name in self.inventory.hosts
- if 'ansible_lxd_type' in self.inventory.get_host(instance_name).get_vars()]
- for instance_name in gen_instances:
- if self.groupby[group_name].get('attribute').lower() == self.inventory.get_host(instance_name).get_vars().get('ansible_lxd_type'):
- self.inventory.add_child(group_name, instance_name)
-
- def build_inventory_groups(self):
- """Build group-part dynamic inventory
-
- Build the group-part of the dynamic inventory.
- Add groups to the inventory.
-
- Args:
- None
- Kwargs:
- None
- Raises:
- None
- Returns:
- None"""
-
- def group_type(group_name):
- """create groups defined by lxd.yml or defaultvalues
-
- create groups defined by lxd.yml or defaultvalues
- supportetd:
- * 'location'
- * 'pattern'
- * 'network_range'
- * 'os'
- * 'release'
- * 'profile'
- * 'vlanid'
- * 'type'
-
- Args:
- str(group_name): Group name
- Kwargs:
- None
- Raises:
- None
- Returns:
- None"""
-
- # Due to the compatibility with python 2 no use of map
- if self.groupby[group_name].get('type') == 'location':
- self.build_inventory_groups_location(group_name)
- elif self.groupby[group_name].get('type') == 'pattern':
- self.build_inventory_groups_pattern(group_name)
- elif self.groupby[group_name].get('type') == 'network_range':
- self.build_inventory_groups_network_range(group_name)
- elif self.groupby[group_name].get('type') == 'os':
- self.build_inventory_groups_os(group_name)
- elif self.groupby[group_name].get('type') == 'release':
- self.build_inventory_groups_release(group_name)
- elif self.groupby[group_name].get('type') == 'profile':
- self.build_inventory_groups_profile(group_name)
- elif self.groupby[group_name].get('type') == 'vlanid':
- self.build_inventory_groups_vlanid(group_name)
- elif self.groupby[group_name].get('type') == 'type':
- self.build_inventory_groups_type(group_name)
- else:
- raise AnsibleParserError('Unknown group type: {0}'.format(to_native(group_name)))
-
- if self.groupby:
- for group_name in self.groupby:
- if not group_name.isalnum():
- raise AnsibleParserError('Invalid character(s) in groupname: {0}'.format(to_native(group_name)))
- group_type(group_name)
-
- def build_inventory(self):
- """Build dynamic inventory
-
- Build the dynamic inventory.
-
- Args:
- None
- Kwargs:
- None
- Raises:
- None
- Returns:
- None"""
-
- self.build_inventory_hosts()
- self.build_inventory_groups()
-
- def cleandata(self):
- """Clean the dynamic inventory
-
- The first version of the inventory only supported container.
- This will change in the future.
- The following function cleans up the data and remove the all items with the wrong type.
-
- Args:
- None
- Kwargs:
- None
- Raises:
- None
- Returns:
- None"""
- iter_keys = list(self.data['instances'].keys())
- for instance_name in iter_keys:
- if self._get_data_entry('instances/{0}/instances/metadata/type'.format(instance_name)) != self.type_filter:
- del self.data['instances'][instance_name]
-
- def _populate(self):
- """Return the hosts and groups
-
- Returns the processed instance configurations from the lxd import
-
- Args:
- None
- Kwargs:
- None
- Raises:
- None
- Returns:
- None"""
-
- if len(self.data) == 0: # If no data is injected by unittests open socket
- self.socket = self._connect_to_socket()
- self.get_instance_data(self._get_instances())
- self.get_network_data(self._get_networks())
-
- # The first version of the inventory only supported containers.
- # This will change in the future.
- # The following function cleans up the data.
- if self.type_filter != 'both':
- self.cleandata()
-
- self.extract_information_from_instance_configs()
-
- # self.display.vvv(self.save_json_data([os.path.abspath(__file__)]))
-
- self.build_inventory()
-
- def parse(self, inventory, loader, path, cache):
- """Return dynamic inventory from source
-
- Returns the processed inventory from the lxd import
-
- Args:
- str(inventory): inventory object with existing data and
- the methods to add hosts/groups/variables
- to inventory
- str(loader): Ansible's DataLoader
- str(path): path to the config
- bool(cache): use or avoid caches
- Kwargs:
- None
- Raises:
- AnsibleParserError
- Returns:
- None"""
- if IPADDRESS_IMPORT_ERROR:
- raise_from(
- AnsibleError('another_library must be installed to use this plugin'),
- IPADDRESS_IMPORT_ERROR)
-
- super(InventoryModule, self).parse(inventory, loader, path, cache=False)
- # Read the inventory YAML file
- self._read_config_data(path)
- try:
- self.client_key = self.get_option('client_key')
- self.client_cert = self.get_option('client_cert')
- self.debug = self.DEBUG
- self.data = {} # store for inventory-data
- self.groupby = self.get_option('groupby')
- self.plugin = self.get_option('plugin')
- self.prefered_instance_network_family = self.get_option('prefered_instance_network_family')
- self.prefered_instance_network_interface = self.get_option('prefered_instance_network_interface')
- self.type_filter = self.get_option('type_filter')
- if self.get_option('state').lower() == 'none': # none in config is str()
- self.filter = None
- else:
- self.filter = self.get_option('state').lower()
- self.trust_password = self.get_option('trust_password')
- self.url = self.get_option('url')
- except Exception as err:
- raise AnsibleParserError(
- 'All correct options required: {0}'.format(to_native(err)))
- # Call our internal helper to populate the dynamic inventory
- self._populate()
diff --git a/ansible_collections/community/general/plugins/inventory/nmap.py b/ansible_collections/community/general/plugins/inventory/nmap.py
deleted file mode 100644
index 44d68750..00000000
--- a/ansible_collections/community/general/plugins/inventory/nmap.py
+++ /dev/null
@@ -1,215 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright (c) 2017 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-DOCUMENTATION = '''
- author: Unknown (!UNKNOWN)
- name: nmap
- short_description: Uses nmap to find hosts to target
- description:
- - Uses a YAML configuration file with a valid YAML extension.
- extends_documentation_fragment:
- - constructed
- - inventory_cache
- requirements:
- - nmap CLI installed
- options:
- plugin:
- description: token that ensures this is a source file for the 'nmap' plugin.
- required: True
- choices: ['nmap', 'community.general.nmap']
- address:
- description: Network IP or range of IPs to scan, you can use a simple range (10.2.2.15-25) or CIDR notation.
- required: True
- exclude:
- description: list of addresses to exclude
- type: list
- elements: string
- ports:
- description: Enable/disable scanning for open ports
- type: boolean
- default: True
- ipv4:
- description: use IPv4 type addresses
- type: boolean
- default: True
- ipv6:
- description: use IPv6 type addresses
- type: boolean
- default: True
- notes:
- - At least one of ipv4 or ipv6 is required to be True, both can be True, but they cannot both be False.
- - 'TODO: add OS fingerprinting'
-'''
-EXAMPLES = '''
-# inventory.config file in YAML format
-plugin: community.general.nmap
-strict: False
-address: 192.168.0.0/24
-'''
-
-import os
-import re
-
-from subprocess import Popen, PIPE
-
-from ansible import constants as C
-from ansible.errors import AnsibleParserError
-from ansible.module_utils.common.text.converters import to_native, to_text
-from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
-from ansible.module_utils.common.process import get_bin_path
-
-
-class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
-
- NAME = 'community.general.nmap'
- find_host = re.compile(r'^Nmap scan report for ([\w,.,-]+)(?: \(([\w,.,:,\[,\]]+)\))?')
- find_port = re.compile(r'^(\d+)/(\w+)\s+(\w+)\s+(\w+)')
-
- def __init__(self):
- self._nmap = None
- super(InventoryModule, self).__init__()
-
- def _populate(self, hosts):
- # Use constructed if applicable
- strict = self.get_option('strict')
-
- for host in hosts:
- hostname = host['name']
- self.inventory.add_host(hostname)
- for var, value in host.items():
- self.inventory.set_variable(hostname, var, value)
-
- # Composed variables
- self._set_composite_vars(self.get_option('compose'), host, hostname, strict=strict)
-
- # Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
- self._add_host_to_composed_groups(self.get_option('groups'), host, hostname, strict=strict)
-
- # Create groups based on variable values and add the corresponding hosts to it
- self._add_host_to_keyed_groups(self.get_option('keyed_groups'), host, hostname, strict=strict)
-
- def verify_file(self, path):
-
- valid = False
- if super(InventoryModule, self).verify_file(path):
- file_name, ext = os.path.splitext(path)
-
- if not ext or ext in C.YAML_FILENAME_EXTENSIONS:
- valid = True
-
- return valid
-
- def parse(self, inventory, loader, path, cache=True):
-
- try:
- self._nmap = get_bin_path('nmap')
- except ValueError as e:
- raise AnsibleParserError('nmap inventory plugin requires the nmap cli tool to work: {0}'.format(to_native(e)))
-
- super(InventoryModule, self).parse(inventory, loader, path, cache=cache)
-
- self._read_config_data(path)
-
- cache_key = self.get_cache_key(path)
-
- # cache may be True or False at this point to indicate if the inventory is being refreshed
- # get the user's cache option too to see if we should save the cache if it is changing
- user_cache_setting = self.get_option('cache')
-
- # read if the user has caching enabled and the cache isn't being refreshed
- attempt_to_read_cache = user_cache_setting and cache
- # update if the user has caching enabled and the cache is being refreshed; update this value to True if the cache has expired below
- cache_needs_update = user_cache_setting and not cache
-
- if attempt_to_read_cache:
- try:
- results = self._cache[cache_key]
- except KeyError:
- # This occurs if the cache_key is not in the cache or if the cache_key expired, so the cache needs to be updated
- cache_needs_update = True
-
- if not user_cache_setting or cache_needs_update:
- # setup command
- cmd = [self._nmap]
- if not self._options['ports']:
- cmd.append('-sP')
-
- if self._options['ipv4'] and not self._options['ipv6']:
- cmd.append('-4')
- elif self._options['ipv6'] and not self._options['ipv4']:
- cmd.append('-6')
- elif not self._options['ipv6'] and not self._options['ipv4']:
- raise AnsibleParserError('One of ipv4 or ipv6 must be enabled for this plugin')
-
- if self._options['exclude']:
- cmd.append('--exclude')
- cmd.append(','.join(self._options['exclude']))
-
- cmd.append(self._options['address'])
- try:
- # execute
- p = Popen(cmd, stdout=PIPE, stderr=PIPE)
- stdout, stderr = p.communicate()
- if p.returncode != 0:
- raise AnsibleParserError('Failed to run nmap, rc=%s: %s' % (p.returncode, to_native(stderr)))
-
- # parse results
- host = None
- ip = None
- ports = []
- results = []
-
- try:
- t_stdout = to_text(stdout, errors='surrogate_or_strict')
- except UnicodeError as e:
- raise AnsibleParserError('Invalid (non unicode) input returned: %s' % to_native(e))
-
- for line in t_stdout.splitlines():
- hits = self.find_host.match(line)
- if hits:
- if host is not None and ports:
- results[-1]['ports'] = ports
-
- # if dns only shows arpa, just use ip instead as hostname
- if hits.group(1).endswith('.in-addr.arpa'):
- host = hits.group(2)
- else:
- host = hits.group(1)
-
- # if no reverse dns exists, just use ip instead as hostname
- if hits.group(2) is not None:
- ip = hits.group(2)
- else:
- ip = hits.group(1)
-
- if host is not None:
- # update inventory
- results.append(dict())
- results[-1]['name'] = host
- results[-1]['ip'] = ip
- ports = []
- continue
-
- host_ports = self.find_port.match(line)
- if host is not None and host_ports:
- ports.append({'port': host_ports.group(1),
- 'protocol': host_ports.group(2),
- 'state': host_ports.group(3),
- 'service': host_ports.group(4)})
- continue
-
- # if any leftovers
- if host and ports:
- results[-1]['ports'] = ports
-
- except Exception as e:
- raise AnsibleParserError("failed to parse %s: %s " % (to_native(path), to_native(e)))
-
- if cache_needs_update:
- self._cache[cache_key] = results
-
- self._populate(results)
diff --git a/ansible_collections/community/general/plugins/inventory/online.py b/ansible_collections/community/general/plugins/inventory/online.py
deleted file mode 100644
index 00454f55..00000000
--- a/ansible_collections/community/general/plugins/inventory/online.py
+++ /dev/null
@@ -1,262 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright (c) 2018 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-DOCUMENTATION = r'''
- name: online
- author:
- - Remy Leone (@remyleone)
- short_description: Scaleway (previously Online SAS or Online.net) inventory source
- description:
- - Get inventory hosts from Scaleway (previously Online SAS or Online.net).
- options:
- plugin:
- description: token that ensures this is a source file for the 'online' plugin.
- required: True
- choices: ['online', 'community.general.online']
- oauth_token:
- required: True
- description: Online OAuth token.
- env:
- # in order of precedence
- - name: ONLINE_TOKEN
- - name: ONLINE_API_KEY
- - name: ONLINE_OAUTH_TOKEN
- hostnames:
- description: List of preference about what to use as an hostname.
- type: list
- elements: string
- default:
- - public_ipv4
- choices:
- - public_ipv4
- - private_ipv4
- - hostname
- groups:
- description: List of groups.
- type: list
- elements: string
- choices:
- - location
- - offer
- - rpn
-'''
-
-EXAMPLES = r'''
-# online_inventory.yml file in YAML format
-# Example command line: ansible-inventory --list -i online_inventory.yml
-
-plugin: community.general.online
-hostnames:
- - public_ipv4
-groups:
- - location
- - offer
- - rpn
-'''
-
-import json
-from sys import version as python_version
-
-from ansible.errors import AnsibleError
-from ansible.module_utils.urls import open_url
-from ansible.plugins.inventory import BaseInventoryPlugin
-from ansible.module_utils.common.text.converters import to_native, to_text
-from ansible.module_utils.ansible_release import __version__ as ansible_version
-from ansible.module_utils.six.moves.urllib.parse import urljoin
-
-
-class InventoryModule(BaseInventoryPlugin):
- NAME = 'community.general.online'
- API_ENDPOINT = "https://api.online.net"
-
- def extract_public_ipv4(self, host_infos):
- try:
- return host_infos["network"]["ip"][0]
- except (KeyError, TypeError, IndexError):
- self.display.warning("An error happened while extracting public IPv4 address. Information skipped.")
- return None
-
- def extract_private_ipv4(self, host_infos):
- try:
- return host_infos["network"]["private"][0]
- except (KeyError, TypeError, IndexError):
- self.display.warning("An error happened while extracting private IPv4 address. Information skipped.")
- return None
-
- def extract_os_name(self, host_infos):
- try:
- return host_infos["os"]["name"]
- except (KeyError, TypeError):
- self.display.warning("An error happened while extracting OS name. Information skipped.")
- return None
-
- def extract_os_version(self, host_infos):
- try:
- return host_infos["os"]["version"]
- except (KeyError, TypeError):
- self.display.warning("An error happened while extracting OS version. Information skipped.")
- return None
-
- def extract_hostname(self, host_infos):
- try:
- return host_infos["hostname"]
- except (KeyError, TypeError):
- self.display.warning("An error happened while extracting hostname. Information skipped.")
- return None
-
- def extract_location(self, host_infos):
- try:
- return host_infos["location"]["datacenter"]
- except (KeyError, TypeError):
- self.display.warning("An error happened while extracting datacenter location. Information skipped.")
- return None
-
- def extract_offer(self, host_infos):
- try:
- return host_infos["offer"]
- except (KeyError, TypeError):
- self.display.warning("An error happened while extracting commercial offer. Information skipped.")
- return None
-
- def extract_rpn(self, host_infos):
- try:
- return self.rpn_lookup_cache[host_infos["id"]]
- except (KeyError, TypeError):
- self.display.warning("An error happened while extracting RPN information. Information skipped.")
- return None
-
- def _fetch_information(self, url):
- try:
- response = open_url(url, headers=self.headers)
- except Exception as e:
- self.display.warning("An error happened while fetching: %s" % url)
- return None
-
- try:
- raw_data = to_text(response.read(), errors='surrogate_or_strict')
- except UnicodeError:
- raise AnsibleError("Incorrect encoding of fetched payload from Online servers")
-
- try:
- return json.loads(raw_data)
- except ValueError:
- raise AnsibleError("Incorrect JSON payload")
-
- @staticmethod
- def extract_rpn_lookup_cache(rpn_list):
- lookup = {}
- for rpn in rpn_list:
- for member in rpn["members"]:
- lookup[member["id"]] = rpn["name"]
- return lookup
-
- def _fill_host_variables(self, hostname, host_infos):
- targeted_attributes = (
- "offer",
- "id",
- "hostname",
- "location",
- "boot_mode",
- "power",
- "last_reboot",
- "anti_ddos",
- "hardware_watch",
- "support"
- )
- for attribute in targeted_attributes:
- self.inventory.set_variable(hostname, attribute, host_infos[attribute])
-
- if self.extract_public_ipv4(host_infos=host_infos):
- self.inventory.set_variable(hostname, "public_ipv4", self.extract_public_ipv4(host_infos=host_infos))
- self.inventory.set_variable(hostname, "ansible_host", self.extract_public_ipv4(host_infos=host_infos))
-
- if self.extract_private_ipv4(host_infos=host_infos):
- self.inventory.set_variable(hostname, "public_ipv4", self.extract_private_ipv4(host_infos=host_infos))
-
- if self.extract_os_name(host_infos=host_infos):
- self.inventory.set_variable(hostname, "os_name", self.extract_os_name(host_infos=host_infos))
-
- if self.extract_os_version(host_infos=host_infos):
- self.inventory.set_variable(hostname, "os_version", self.extract_os_name(host_infos=host_infos))
-
- def _filter_host(self, host_infos, hostname_preferences):
-
- for pref in hostname_preferences:
- if self.extractors[pref](host_infos):
- return self.extractors[pref](host_infos)
-
- return None
-
- def do_server_inventory(self, host_infos, hostname_preferences, group_preferences):
-
- hostname = self._filter_host(host_infos=host_infos,
- hostname_preferences=hostname_preferences)
-
- # No suitable hostname were found in the attributes and the host won't be in the inventory
- if not hostname:
- return
-
- self.inventory.add_host(host=hostname)
- self._fill_host_variables(hostname=hostname, host_infos=host_infos)
-
- for g in group_preferences:
- group = self.group_extractors[g](host_infos)
-
- if not group:
- return
-
- self.inventory.add_group(group=group)
- self.inventory.add_host(group=group, host=hostname)
-
- def parse(self, inventory, loader, path, cache=True):
- super(InventoryModule, self).parse(inventory, loader, path)
- self._read_config_data(path=path)
-
- token = self.get_option("oauth_token")
- hostname_preferences = self.get_option("hostnames")
-
- group_preferences = self.get_option("groups")
- if group_preferences is None:
- group_preferences = []
-
- self.extractors = {
- "public_ipv4": self.extract_public_ipv4,
- "private_ipv4": self.extract_private_ipv4,
- "hostname": self.extract_hostname,
- }
-
- self.group_extractors = {
- "location": self.extract_location,
- "offer": self.extract_offer,
- "rpn": self.extract_rpn
- }
-
- self.headers = {
- 'Authorization': "Bearer %s" % token,
- 'User-Agent': "ansible %s Python %s" % (ansible_version, python_version.split(' ', 1)[0]),
- 'Content-type': 'application/json'
- }
-
- servers_url = urljoin(InventoryModule.API_ENDPOINT, "api/v1/server")
- servers_api_path = self._fetch_information(url=servers_url)
-
- if "rpn" in group_preferences:
- rpn_groups_url = urljoin(InventoryModule.API_ENDPOINT, "api/v1/rpn/group")
- rpn_list = self._fetch_information(url=rpn_groups_url)
- self.rpn_lookup_cache = self.extract_rpn_lookup_cache(rpn_list)
-
- for server_api_path in servers_api_path:
-
- server_url = urljoin(InventoryModule.API_ENDPOINT, server_api_path)
- raw_server_info = self._fetch_information(url=server_url)
-
- if raw_server_info is None:
- continue
-
- self.do_server_inventory(host_infos=raw_server_info,
- hostname_preferences=hostname_preferences,
- group_preferences=group_preferences)
diff --git a/ansible_collections/community/general/plugins/inventory/opennebula.py b/ansible_collections/community/general/plugins/inventory/opennebula.py
deleted file mode 100644
index d967e13f..00000000
--- a/ansible_collections/community/general/plugins/inventory/opennebula.py
+++ /dev/null
@@ -1,239 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright (c) 2020, FELDSAM s.r.o. - FeldHost™
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-
-__metaclass__ = type
-
-DOCUMENTATION = r'''
- name: opennebula
- author:
- - Kristian Feldsam (@feldsam)
- short_description: OpenNebula inventory source
- version_added: "3.8.0"
- extends_documentation_fragment:
- - constructed
- description:
- - Get inventory hosts from OpenNebula cloud.
- - Uses an YAML configuration file ending with either I(opennebula.yml) or I(opennebula.yaml)
- to set parameter values.
- - Uses I(api_authfile), C(~/.one/one_auth), or C(ONE_AUTH) pointing to a OpenNebula credentials file.
- options:
- plugin:
- description: Token that ensures this is a source file for the 'opennebula' plugin.
- type: string
- required: true
- choices: [ community.general.opennebula ]
- api_url:
- description:
- - URL of the OpenNebula RPC server.
- - It is recommended to use HTTPS so that the username/password are not
- transferred over the network unencrypted.
- - If not set then the value of the C(ONE_URL) environment variable is used.
- env:
- - name: ONE_URL
- required: True
- type: string
- api_username:
- description:
- - Name of the user to login into the OpenNebula RPC server. If not set
- then the value of the C(ONE_USERNAME) environment variable is used.
- env:
- - name: ONE_USERNAME
- type: string
- api_password:
- description:
- - Password or a token of the user to login into OpenNebula RPC server.
- - If not set, the value of the C(ONE_PASSWORD) environment variable is used.
- env:
- - name: ONE_PASSWORD
- required: False
- type: string
- api_authfile:
- description:
- - If both I(api_username) or I(api_password) are not set, then it will try
- authenticate with ONE auth file. Default path is C(~/.one/one_auth).
- - Set environment variable C(ONE_AUTH) to override this path.
- env:
- - name: ONE_AUTH
- required: False
- type: string
- hostname:
- description: Field to match the hostname. Note C(v4_first_ip) corresponds to the first IPv4 found on VM.
- type: string
- default: v4_first_ip
- choices:
- - v4_first_ip
- - v6_first_ip
- - name
- filter_by_label:
- description: Only return servers filtered by this label.
- type: string
- group_by_labels:
- description: Create host groups by vm labels
- type: bool
- default: True
-'''
-
-EXAMPLES = r'''
-# inventory_opennebula.yml file in YAML format
-# Example command line: ansible-inventory --list -i inventory_opennebula.yml
-
-# Pass a label filter to the API
-plugin: community.general.opennebula
-api_url: https://opennebula:2633/RPC2
-filter_by_label: Cache
-'''
-
-try:
- import pyone
-
- HAS_PYONE = True
-except ImportError:
- HAS_PYONE = False
-
-from ansible.errors import AnsibleError
-from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
-from ansible.module_utils.common.text.converters import to_native
-
-from collections import namedtuple
-import os
-
-
-class InventoryModule(BaseInventoryPlugin, Constructable):
- NAME = 'community.general.opennebula'
-
- def verify_file(self, path):
- valid = False
- if super(InventoryModule, self).verify_file(path):
- if path.endswith(('opennebula.yaml', 'opennebula.yml')):
- valid = True
- return valid
-
- def _get_connection_info(self):
- url = self.get_option('api_url')
- username = self.get_option('api_username')
- password = self.get_option('api_password')
- authfile = self.get_option('api_authfile')
-
- if not username and not password:
- if authfile is None:
- authfile = os.path.join(os.environ.get("HOME"), ".one", "one_auth")
- try:
- with open(authfile, "r") as fp:
- authstring = fp.read().rstrip()
- username, password = authstring.split(":")
- except (OSError, IOError):
- raise AnsibleError("Could not find or read ONE_AUTH file at '{e}'".format(e=authfile))
- except Exception:
- raise AnsibleError("Error occurs when reading ONE_AUTH file at '{e}'".format(e=authfile))
-
- auth_params = namedtuple('auth', ('url', 'username', 'password'))
-
- return auth_params(url=url, username=username, password=password)
-
- def _get_vm_ipv4(self, vm):
- nic = vm.TEMPLATE.get('NIC')
-
- if isinstance(nic, dict):
- nic = [nic]
-
- for net in nic:
- return net['IP']
-
- return False
-
- def _get_vm_ipv6(self, vm):
- nic = vm.TEMPLATE.get('NIC')
-
- if isinstance(nic, dict):
- nic = [nic]
-
- for net in nic:
- if net.get('IP6_GLOBAL'):
- return net['IP6_GLOBAL']
-
- return False
-
- def _get_vm_pool(self):
- auth = self._get_connection_info()
-
- if not (auth.username and auth.password):
- raise AnsibleError('API Credentials missing. Check OpenNebula inventory file.')
- else:
- one_client = pyone.OneServer(auth.url, session=auth.username + ':' + auth.password)
-
- # get hosts (VMs)
- try:
- vm_pool = one_client.vmpool.infoextended(-2, -1, -1, 3)
- except Exception as e:
- raise AnsibleError("Something happened during XML-RPC call: {e}".format(e=to_native(e)))
-
- return vm_pool
-
- def _retrieve_servers(self, label_filter=None):
- vm_pool = self._get_vm_pool()
-
- result = []
-
- # iterate over hosts
- for vm in vm_pool.VM:
- server = vm.USER_TEMPLATE
-
- labels = []
- if vm.USER_TEMPLATE.get('LABELS'):
- labels = [s for s in vm.USER_TEMPLATE.get('LABELS') if s == ',' or s == '-' or s.isalnum() or s.isspace()]
- labels = ''.join(labels)
- labels = labels.replace(' ', '_')
- labels = labels.replace('-', '_')
- labels = labels.split(',')
-
- # filter by label
- if label_filter is not None:
- if label_filter not in labels:
- continue
-
- server['name'] = vm.NAME
- server['LABELS'] = labels
- server['v4_first_ip'] = self._get_vm_ipv4(vm)
- server['v6_first_ip'] = self._get_vm_ipv6(vm)
-
- result.append(server)
-
- return result
-
- def _populate(self):
- hostname_preference = self.get_option('hostname')
- group_by_labels = self.get_option('group_by_labels')
-
- # Add a top group 'one'
- self.inventory.add_group(group='all')
-
- filter_by_label = self.get_option('filter_by_label')
- for server in self._retrieve_servers(filter_by_label):
- # check for labels
- if group_by_labels and server['LABELS']:
- for label in server['LABELS']:
- self.inventory.add_group(group=label)
- self.inventory.add_host(host=server['name'], group=label)
-
- self.inventory.add_host(host=server['name'], group='all')
-
- for attribute, value in server.items():
- self.inventory.set_variable(server['name'], attribute, value)
-
- if hostname_preference != 'name':
- self.inventory.set_variable(server['name'], 'ansible_host', server[hostname_preference])
-
- if server.get('SSH_PORT'):
- self.inventory.set_variable(server['name'], 'ansible_port', server['SSH_PORT'])
-
- def parse(self, inventory, loader, path, cache=True):
- if not HAS_PYONE:
- raise AnsibleError('OpenNebula Inventory plugin requires pyone to work!')
-
- super(InventoryModule, self).parse(inventory, loader, path)
- self._read_config_data(path=path)
-
- self._populate()
diff --git a/ansible_collections/community/general/plugins/inventory/proxmox.py b/ansible_collections/community/general/plugins/inventory/proxmox.py
deleted file mode 100644
index fc562974..00000000
--- a/ansible_collections/community/general/plugins/inventory/proxmox.py
+++ /dev/null
@@ -1,513 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright (C) 2016 Guido Günther , Daniel Lobato Garcia
-# Copyright (c) 2018 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-DOCUMENTATION = '''
- name: proxmox
- short_description: Proxmox inventory source
- version_added: "1.2.0"
- author:
- - Jeffrey van Pelt (@Thulium-Drake)
- requirements:
- - requests >= 1.1
- description:
- - Get inventory hosts from a Proxmox PVE cluster.
- - "Uses a configuration file as an inventory source, it must end in C(.proxmox.yml) or C(.proxmox.yaml)"
- - Will retrieve the first network interface with an IP for Proxmox nodes.
- - Can retrieve LXC/QEMU configuration as facts.
- extends_documentation_fragment:
- - constructed
- - inventory_cache
- options:
- plugin:
- description: The name of this plugin, it should always be set to C(community.general.proxmox) for this plugin to recognize it as it's own.
- required: yes
- choices: ['community.general.proxmox']
- type: str
- url:
- description:
- - URL to Proxmox cluster.
- - If the value is not specified in the inventory configuration, the value of environment variable C(PROXMOX_URL) will be used instead.
- default: 'http://localhost:8006'
- type: str
- env:
- - name: PROXMOX_URL
- version_added: 2.0.0
- user:
- description:
- - Proxmox authentication user.
- - If the value is not specified in the inventory configuration, the value of environment variable C(PROXMOX_USER) will be used instead.
- required: yes
- type: str
- env:
- - name: PROXMOX_USER
- version_added: 2.0.0
- password:
- description:
- - Proxmox authentication password.
- - If the value is not specified in the inventory configuration, the value of environment variable C(PROXMOX_PASSWORD) will be used instead.
- required: yes
- type: str
- env:
- - name: PROXMOX_PASSWORD
- version_added: 2.0.0
- validate_certs:
- description: Verify SSL certificate if using HTTPS.
- type: boolean
- default: yes
- group_prefix:
- description: Prefix to apply to Proxmox groups.
- default: proxmox_
- type: str
- facts_prefix:
- description: Prefix to apply to LXC/QEMU config facts.
- default: proxmox_
- type: str
- want_facts:
- description: Gather LXC/QEMU configuration facts.
- default: no
- type: bool
- want_proxmox_nodes_ansible_host:
- version_added: 3.0.0
- description:
- - Whether to set C(ansbile_host) for proxmox nodes.
- - When set to C(true) (default), will use the first available interface. This can be different from what you expect.
- default: true
- type: bool
- filters:
- version_added: 4.6.0
- description: A list of Jinja templates that allow filtering hosts.
- type: list
- elements: str
- default: []
- strict:
- version_added: 2.5.0
- compose:
- version_added: 2.5.0
- groups:
- version_added: 2.5.0
- keyed_groups:
- version_added: 2.5.0
-'''
-
-EXAMPLES = '''
-# Minimal example which will not gather additional facts for QEMU/LXC guests
-# By not specifying a URL the plugin will attempt to connect to the controller host on port 8006
-# my.proxmox.yml
-plugin: community.general.proxmox
-user: ansible@pve
-password: secure
-
-# More complete example demonstrating the use of 'want_facts' and the constructed options
-# Note that using facts returned by 'want_facts' in constructed options requires 'want_facts=true'
-# my.proxmox.yml
-plugin: community.general.proxmox
-url: http://pve.domain.com:8006
-user: ansible@pve
-password: secure
-validate_certs: false
-want_facts: true
-keyed_groups:
- # proxmox_tags_parsed is an example of a fact only returned when 'want_facts=true'
- - key: proxmox_tags_parsed
- separator: ""
- prefix: group
-groups:
- webservers: "'web' in (proxmox_tags_parsed|list)"
- mailservers: "'mail' in (proxmox_tags_parsed|list)"
-compose:
- ansible_port: 2222
-
-# Using the inventory to allow ansible to connect via the first IP address of the VM / Container
-# (Default is connection by name of QEMU/LXC guests)
-# Note: my_inv_var demonstrates how to add a string variable to every host used by the inventory.
-# my.proxmox.yml
-plugin: community.general.proxmox
-url: http://pve.domain.com:8006
-user: ansible@pve
-password: secure
-validate_certs: false
-want_facts: true
-compose:
- ansible_host: proxmox_ipconfig0.ip | default(proxmox_net0.ip) | ipaddr('address')
- my_inv_var_1: "'my_var1_value'"
- my_inv_var_2: >
- "my_var_2_value"
-'''
-
-import itertools
-import re
-
-from ansible.module_utils.common._collections_compat import MutableMapping
-
-from ansible.errors import AnsibleError
-from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
-from ansible.module_utils.common.text.converters import to_native
-from ansible.module_utils.six.moves.urllib.parse import urlencode
-from ansible.utils.display import Display
-
-from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
-
-# 3rd party imports
-try:
- import requests
- if LooseVersion(requests.__version__) < LooseVersion('1.1.0'):
- raise ImportError
- HAS_REQUESTS = True
-except ImportError:
- HAS_REQUESTS = False
-
-display = Display()
-
-
-class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
- ''' Host inventory parser for ansible using Proxmox as source. '''
-
- NAME = 'community.general.proxmox'
-
- def __init__(self):
-
- super(InventoryModule, self).__init__()
-
- # from config
- self.proxmox_url = None
-
- self.session = None
- self.cache_key = None
- self.use_cache = None
-
- def verify_file(self, path):
-
- valid = False
- if super(InventoryModule, self).verify_file(path):
- if path.endswith(('proxmox.yaml', 'proxmox.yml')):
- valid = True
- else:
- self.display.vvv('Skipping due to inventory source not ending in "proxmox.yaml" nor "proxmox.yml"')
- return valid
-
- def _get_session(self):
- if not self.session:
- self.session = requests.session()
- self.session.verify = self.get_option('validate_certs')
- return self.session
-
- def _get_auth(self):
- credentials = urlencode({'username': self.proxmox_user, 'password': self.proxmox_password, })
-
- a = self._get_session()
- ret = a.post('%s/api2/json/access/ticket' % self.proxmox_url, data=credentials)
-
- json = ret.json()
-
- self.credentials = {
- 'ticket': json['data']['ticket'],
- 'CSRFPreventionToken': json['data']['CSRFPreventionToken'],
- }
-
- def _get_json(self, url, ignore_errors=None):
-
- if not self.use_cache or url not in self._cache.get(self.cache_key, {}):
-
- if self.cache_key not in self._cache:
- self._cache[self.cache_key] = {'url': ''}
-
- data = []
- s = self._get_session()
- while True:
- headers = {'Cookie': 'PVEAuthCookie={0}'.format(self.credentials['ticket'])}
- ret = s.get(url, headers=headers)
- if ignore_errors and ret.status_code in ignore_errors:
- break
- ret.raise_for_status()
- json = ret.json()
-
- # process results
- # FIXME: This assumes 'return type' matches a specific query,
- # it will break if we expand the queries and they dont have different types
- if 'data' not in json:
- # /hosts/:id does not have a 'data' key
- data = json
- break
- elif isinstance(json['data'], MutableMapping):
- # /facts are returned as dict in 'data'
- data = json['data']
- break
- else:
- # /hosts 's 'results' is a list of all hosts, returned is paginated
- data = data + json['data']
- break
-
- self._cache[self.cache_key][url] = data
-
- return self._cache[self.cache_key][url]
-
- def _get_nodes(self):
- return self._get_json("%s/api2/json/nodes" % self.proxmox_url)
-
- def _get_pools(self):
- return self._get_json("%s/api2/json/pools" % self.proxmox_url)
-
- def _get_lxc_per_node(self, node):
- return self._get_json("%s/api2/json/nodes/%s/lxc" % (self.proxmox_url, node))
-
- def _get_qemu_per_node(self, node):
- return self._get_json("%s/api2/json/nodes/%s/qemu" % (self.proxmox_url, node))
-
- def _get_members_per_pool(self, pool):
- ret = self._get_json("%s/api2/json/pools/%s" % (self.proxmox_url, pool))
- return ret['members']
-
- def _get_node_ip(self, node):
- ret = self._get_json("%s/api2/json/nodes/%s/network" % (self.proxmox_url, node))
-
- for iface in ret:
- try:
- return iface['address']
- except Exception:
- return None
-
- def _get_agent_network_interfaces(self, node, vmid, vmtype):
- result = []
-
- try:
- ifaces = self._get_json(
- "%s/api2/json/nodes/%s/%s/%s/agent/network-get-interfaces" % (
- self.proxmox_url, node, vmtype, vmid
- )
- )['result']
-
- if "error" in ifaces:
- if "class" in ifaces["error"]:
- # This happens on Windows, even though qemu agent is running, the IP address
- # cannot be fetched, as it's unsupported, also a command disabled can happen.
- errorClass = ifaces["error"]["class"]
- if errorClass in ["Unsupported"]:
- self.display.v("Retrieving network interfaces from guest agents on windows with older qemu-guest-agents is not supported")
- elif errorClass in ["CommandDisabled"]:
- self.display.v("Retrieving network interfaces from guest agents has been disabled")
- return result
-
- for iface in ifaces:
- result.append({
- 'name': iface['name'],
- 'mac-address': iface['hardware-address'] if 'hardware-address' in iface else '',
- 'ip-addresses': ["%s/%s" % (ip['ip-address'], ip['prefix']) for ip in iface['ip-addresses']] if 'ip-addresses' in iface else []
- })
- except requests.HTTPError:
- pass
-
- return result
-
- def _get_vm_config(self, properties, node, vmid, vmtype, name):
- ret = self._get_json("%s/api2/json/nodes/%s/%s/%s/config" % (self.proxmox_url, node, vmtype, vmid))
-
- properties[self._fact('node')] = node
- properties[self._fact('vmid')] = vmid
- properties[self._fact('vmtype')] = vmtype
-
- plaintext_configs = [
- 'description',
- ]
-
- for config in ret:
- key = self._fact(config)
- value = ret[config]
- try:
- # fixup disk images as they have no key
- if config == 'rootfs' or config.startswith(('virtio', 'sata', 'ide', 'scsi')):
- value = ('disk_image=' + value)
-
- # Additional field containing parsed tags as list
- if config == 'tags':
- parsed_key = self.to_safe('%s%s' % (key, "_parsed"))
- properties[parsed_key] = [tag.strip() for tag in value.split(",")]
-
- # The first field in the agent string tells you whether the agent is enabled
- # the rest of the comma separated string is extra config for the agent
- if config == 'agent' and int(value.split(',')[0]):
- agent_iface_value = self._get_agent_network_interfaces(node, vmid, vmtype)
- if agent_iface_value:
- agent_iface_key = self.to_safe('%s%s' % (key, "_interfaces"))
- properties[agent_iface_key] = agent_iface_value
-
- if config not in plaintext_configs and not isinstance(value, int) and all("=" in v for v in value.split(",")):
- # split off strings with commas to a dict
- # skip over any keys that cannot be processed
- try:
- value = dict(key.split("=", 1) for key in value.split(","))
- except Exception:
- continue
-
- properties[key] = value
- except NameError:
- return None
-
- def _get_vm_status(self, properties, node, vmid, vmtype, name):
- ret = self._get_json("%s/api2/json/nodes/%s/%s/%s/status/current" % (self.proxmox_url, node, vmtype, vmid))
- properties[self._fact('status')] = ret['status']
-
- def _get_vm_snapshots(self, properties, node, vmid, vmtype, name):
- ret = self._get_json("%s/api2/json/nodes/%s/%s/%s/snapshot" % (self.proxmox_url, node, vmtype, vmid))
- snapshots = [snapshot['name'] for snapshot in ret if snapshot['name'] != 'current']
- properties[self._fact('snapshots')] = snapshots
-
- def to_safe(self, word):
- '''Converts 'bad' characters in a string to underscores so they can be used as Ansible groups
- #> ProxmoxInventory.to_safe("foo-bar baz")
- 'foo_barbaz'
- '''
- regex = r"[^A-Za-z0-9\_]"
- return re.sub(regex, "_", word.replace(" ", ""))
-
- def _fact(self, name):
- '''Generate a fact's full name from the common prefix and a name.'''
- return self.to_safe('%s%s' % (self.facts_prefix, name.lower()))
-
- def _group(self, name):
- '''Generate a group's full name from the common prefix and a name.'''
- return self.to_safe('%s%s' % (self.group_prefix, name.lower()))
-
- def _can_add_host(self, name, properties):
- '''Ensure that a host satisfies all defined hosts filters. If strict mode is
- enabled, any error during host filter compositing will lead to an AnsibleError
- being raised, otherwise the filter will be ignored.
- '''
- for host_filter in self.host_filters:
- try:
- if not self._compose(host_filter, properties):
- return False
- except Exception as e: # pylint: disable=broad-except
- message = "Could not evaluate host filter %s for host %s - %s" % (host_filter, name, to_native(e))
- if self.strict:
- raise AnsibleError(message)
- display.warning(message)
- return True
-
- def _add_host(self, name, variables):
- self.inventory.add_host(name)
- for k, v in variables.items():
- self.inventory.set_variable(name, k, v)
- variables = self.inventory.get_host(name).get_vars()
- self._set_composite_vars(self.get_option('compose'), variables, name, strict=self.strict)
- self._add_host_to_composed_groups(self.get_option('groups'), variables, name, strict=self.strict)
- self._add_host_to_keyed_groups(self.get_option('keyed_groups'), variables, name, strict=self.strict)
-
- def _handle_item(self, node, ittype, item):
- '''Handle an item from the list of LXC containers and Qemu VM. The
- return value will be either None if the item was skipped or the name of
- the item if it was added to the inventory.'''
- if item.get('template'):
- return None
-
- properties = dict()
- name, vmid = item['name'], item['vmid']
-
- # get status, config and snapshots if want_facts == True
- if self.get_option('want_facts'):
- self._get_vm_status(properties, node, vmid, ittype, name)
- self._get_vm_config(properties, node, vmid, ittype, name)
- self._get_vm_snapshots(properties, node, vmid, ittype, name)
-
- # ensure the host satisfies filters
- if not self._can_add_host(name, properties):
- return None
-
- # add the host to the inventory
- self._add_host(name, properties)
- node_type_group = self._group('%s_%s' % (node, ittype))
- self.inventory.add_child(self._group('all_' + ittype), name)
- self.inventory.add_child(node_type_group, name)
- if item['status'] == 'stopped':
- self.inventory.add_child(self._group('all_stopped'), name)
- elif item['status'] == 'running':
- self.inventory.add_child(self._group('all_running'), name)
-
- return name
-
- def _populate_pool_groups(self, added_hosts):
- '''Generate groups from Proxmox resource pools, ignoring VMs and
- containers that were skipped.'''
- for pool in self._get_pools():
- poolid = pool.get('poolid')
- if not poolid:
- continue
- pool_group = self._group('pool_' + poolid)
- self.inventory.add_group(pool_group)
-
- for member in self._get_members_per_pool(poolid):
- name = member.get('name')
- if name and name in added_hosts:
- self.inventory.add_child(pool_group, name)
-
- def _populate(self):
-
- # create common groups
- self.inventory.add_group(self._group('all_lxc'))
- self.inventory.add_group(self._group('all_qemu'))
- self.inventory.add_group(self._group('all_running'))
- self.inventory.add_group(self._group('all_stopped'))
- nodes_group = self._group('nodes')
- self.inventory.add_group(nodes_group)
-
- # gather vm's on nodes
- self._get_auth()
- hosts = []
- for node in self._get_nodes():
- if not node.get('node'):
- continue
-
- self.inventory.add_host(node['node'])
- if node['type'] == 'node':
- self.inventory.add_child(nodes_group, node['node'])
-
- if node['status'] == 'offline':
- continue
-
- # get node IP address
- if self.get_option("want_proxmox_nodes_ansible_host"):
- ip = self._get_node_ip(node['node'])
- self.inventory.set_variable(node['node'], 'ansible_host', ip)
-
- # add LXC/Qemu groups for the node
- for ittype in ('lxc', 'qemu'):
- node_type_group = self._group('%s_%s' % (node['node'], ittype))
- self.inventory.add_group(node_type_group)
-
- # get LXC containers and Qemu VMs for this node
- lxc_objects = zip(itertools.repeat('lxc'), self._get_lxc_per_node(node['node']))
- qemu_objects = zip(itertools.repeat('qemu'), self._get_qemu_per_node(node['node']))
- for ittype, item in itertools.chain(lxc_objects, qemu_objects):
- name = self._handle_item(node['node'], ittype, item)
- if name is not None:
- hosts.append(name)
-
- # gather vm's in pools
- self._populate_pool_groups(hosts)
-
- def parse(self, inventory, loader, path, cache=True):
- if not HAS_REQUESTS:
- raise AnsibleError('This module requires Python Requests 1.1.0 or higher: '
- 'https://github.com/psf/requests.')
-
- super(InventoryModule, self).parse(inventory, loader, path)
-
- # read config from file, this sets 'options'
- self._read_config_data(path)
-
- # read options
- self.proxmox_url = self.get_option('url').rstrip('/')
- self.proxmox_user = self.get_option('user')
- self.proxmox_password = self.get_option('password')
- self.cache_key = self.get_cache_key(path)
- self.use_cache = cache and self.get_option('cache')
- self.host_filters = self.get_option('filters')
- self.group_prefix = self.get_option('group_prefix')
- self.facts_prefix = self.get_option('facts_prefix')
- self.strict = self.get_option('strict')
-
- # actually populate inventory
- self._populate()
diff --git a/ansible_collections/community/general/plugins/inventory/scaleway.py b/ansible_collections/community/general/plugins/inventory/scaleway.py
deleted file mode 100644
index d48cc97a..00000000
--- a/ansible_collections/community/general/plugins/inventory/scaleway.py
+++ /dev/null
@@ -1,343 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright: (c) 2017 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-
-__metaclass__ = type
-
-DOCUMENTATION = r'''
- name: scaleway
- author:
- - Remy Leone (@remyleone)
- short_description: Scaleway inventory source
- description:
- - Get inventory hosts from Scaleway.
- requirements:
- - PyYAML
- options:
- plugin:
- description: Token that ensures this is a source file for the 'scaleway' plugin.
- required: True
- choices: ['scaleway', 'community.general.scaleway']
- regions:
- description: Filter results on a specific Scaleway region.
- type: list
- elements: string
- default:
- - ams1
- - par1
- - par2
- - waw1
- tags:
- description: Filter results on a specific tag.
- type: list
- elements: string
- scw_profile:
- description:
- - The config profile to use in config file.
- - By default uses the one specified as C(active_profile) in the config file, or falls back to C(default) if that is not defined.
- type: string
- version_added: 4.4.0
- oauth_token:
- description:
- - Scaleway OAuth token.
- - If not explicitly defined or in environment variables, it will try to lookup in the scaleway-cli configuration file
- (C($SCW_CONFIG_PATH), C($XDG_CONFIG_HOME/scw/config.yaml), or C(~/.config/scw/config.yaml)).
- - More details on L(how to generate token, https://www.scaleway.com/en/docs/generate-api-keys/).
- env:
- # in order of precedence
- - name: SCW_TOKEN
- - name: SCW_API_KEY
- - name: SCW_OAUTH_TOKEN
- hostnames:
- description: List of preference about what to use as an hostname.
- type: list
- elements: string
- default:
- - public_ipv4
- choices:
- - public_ipv4
- - private_ipv4
- - public_ipv6
- - hostname
- - id
- variables:
- description: 'Set individual variables: keys are variable names and
- values are templates. Any value returned by the
- L(Scaleway API, https://developer.scaleway.com/#servers-server-get)
- can be used.'
- type: dict
-'''
-
-EXAMPLES = r'''
-# scaleway_inventory.yml file in YAML format
-# Example command line: ansible-inventory --list -i scaleway_inventory.yml
-
-# use hostname as inventory_hostname
-# use the private IP address to connect to the host
-plugin: community.general.scaleway
-regions:
- - ams1
- - par1
-tags:
- - foobar
-hostnames:
- - hostname
-variables:
- ansible_host: private_ip
- state: state
-
-# use hostname as inventory_hostname and public IP address to connect to the host
-plugin: community.general.scaleway
-hostnames:
- - hostname
-regions:
- - par1
-variables:
- ansible_host: public_ip.address
-
-# Using static strings as variables
-plugin: community.general.scaleway
-hostnames:
- - hostname
-variables:
- ansible_host: public_ip.address
- ansible_connection: "'ssh'"
- ansible_user: "'admin'"
-'''
-
-import os
-import json
-
-try:
- import yaml
-except ImportError as exc:
- YAML_IMPORT_ERROR = exc
-else:
- YAML_IMPORT_ERROR = None
-
-from ansible.errors import AnsibleError
-from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
-from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, parse_pagination_link
-from ansible.module_utils.urls import open_url
-from ansible.module_utils.common.text.converters import to_native, to_text
-from ansible.module_utils.six import raise_from
-
-import ansible.module_utils.six.moves.urllib.parse as urllib_parse
-
-
-def _fetch_information(token, url):
- results = []
- paginated_url = url
- while True:
- try:
- response = open_url(paginated_url,
- headers={'X-Auth-Token': token,
- 'Content-type': 'application/json'})
- except Exception as e:
- raise AnsibleError("Error while fetching %s: %s" % (url, to_native(e)))
- try:
- raw_json = json.loads(to_text(response.read()))
- except ValueError:
- raise AnsibleError("Incorrect JSON payload")
-
- try:
- results.extend(raw_json["servers"])
- except KeyError:
- raise AnsibleError("Incorrect format from the Scaleway API response")
-
- link = response.headers['Link']
- if not link:
- return results
- relations = parse_pagination_link(link)
- if 'next' not in relations:
- return results
- paginated_url = urllib_parse.urljoin(paginated_url, relations['next'])
-
-
-def _build_server_url(api_endpoint):
- return "/".join([api_endpoint, "servers"])
-
-
-def extract_public_ipv4(server_info):
- try:
- return server_info["public_ip"]["address"]
- except (KeyError, TypeError):
- return None
-
-
-def extract_private_ipv4(server_info):
- try:
- return server_info["private_ip"]
- except (KeyError, TypeError):
- return None
-
-
-def extract_hostname(server_info):
- try:
- return server_info["hostname"]
- except (KeyError, TypeError):
- return None
-
-
-def extract_server_id(server_info):
- try:
- return server_info["id"]
- except (KeyError, TypeError):
- return None
-
-
-def extract_public_ipv6(server_info):
- try:
- return server_info["ipv6"]["address"]
- except (KeyError, TypeError):
- return None
-
-
-def extract_tags(server_info):
- try:
- return server_info["tags"]
- except (KeyError, TypeError):
- return None
-
-
-def extract_zone(server_info):
- try:
- return server_info["location"]["zone_id"]
- except (KeyError, TypeError):
- return None
-
-
-extractors = {
- "public_ipv4": extract_public_ipv4,
- "private_ipv4": extract_private_ipv4,
- "public_ipv6": extract_public_ipv6,
- "hostname": extract_hostname,
- "id": extract_server_id
-}
-
-
-class InventoryModule(BaseInventoryPlugin, Constructable):
- NAME = 'community.general.scaleway'
-
- def _fill_host_variables(self, host, server_info):
- targeted_attributes = (
- "arch",
- "commercial_type",
- "id",
- "organization",
- "state",
- "hostname",
- )
- for attribute in targeted_attributes:
- self.inventory.set_variable(host, attribute, server_info[attribute])
-
- self.inventory.set_variable(host, "tags", server_info["tags"])
-
- if extract_public_ipv6(server_info=server_info):
- self.inventory.set_variable(host, "public_ipv6", extract_public_ipv6(server_info=server_info))
-
- if extract_public_ipv4(server_info=server_info):
- self.inventory.set_variable(host, "public_ipv4", extract_public_ipv4(server_info=server_info))
-
- if extract_private_ipv4(server_info=server_info):
- self.inventory.set_variable(host, "private_ipv4", extract_private_ipv4(server_info=server_info))
-
- def _get_zones(self, config_zones):
- return set(SCALEWAY_LOCATION.keys()).intersection(config_zones)
-
- def match_groups(self, server_info, tags):
- server_zone = extract_zone(server_info=server_info)
- server_tags = extract_tags(server_info=server_info)
-
- # If a server does not have a zone, it means it is archived
- if server_zone is None:
- return set()
-
- # If no filtering is defined, all tags are valid groups
- if tags is None:
- return set(server_tags).union((server_zone,))
-
- matching_tags = set(server_tags).intersection(tags)
-
- if not matching_tags:
- return set()
- return matching_tags.union((server_zone,))
-
- def _filter_host(self, host_infos, hostname_preferences):
-
- for pref in hostname_preferences:
- if extractors[pref](host_infos):
- return extractors[pref](host_infos)
-
- return None
-
- def do_zone_inventory(self, zone, token, tags, hostname_preferences):
- self.inventory.add_group(zone)
- zone_info = SCALEWAY_LOCATION[zone]
-
- url = _build_server_url(zone_info["api_endpoint"])
- raw_zone_hosts_infos = _fetch_information(url=url, token=token)
-
- for host_infos in raw_zone_hosts_infos:
-
- hostname = self._filter_host(host_infos=host_infos,
- hostname_preferences=hostname_preferences)
-
- # No suitable hostname were found in the attributes and the host won't be in the inventory
- if not hostname:
- continue
-
- groups = self.match_groups(host_infos, tags)
-
- for group in groups:
- self.inventory.add_group(group=group)
- self.inventory.add_host(group=group, host=hostname)
- self._fill_host_variables(host=hostname, server_info=host_infos)
-
- # Composed variables
- self._set_composite_vars(self.get_option('variables'), host_infos, hostname, strict=False)
-
- def get_oauth_token(self):
- oauth_token = self.get_option('oauth_token')
-
- if 'SCW_CONFIG_PATH' in os.environ:
- scw_config_path = os.getenv('SCW_CONFIG_PATH')
- elif 'XDG_CONFIG_HOME' in os.environ:
- scw_config_path = os.path.join(os.getenv('XDG_CONFIG_HOME'), 'scw', 'config.yaml')
- else:
- scw_config_path = os.path.join(os.path.expanduser('~'), '.config', 'scw', 'config.yaml')
-
- if not oauth_token and os.path.exists(scw_config_path):
- with open(scw_config_path) as fh:
- scw_config = yaml.safe_load(fh)
- ansible_profile = self.get_option('scw_profile')
-
- if ansible_profile:
- active_profile = ansible_profile
- else:
- active_profile = scw_config.get('active_profile', 'default')
-
- if active_profile == 'default':
- oauth_token = scw_config.get('secret_key')
- else:
- oauth_token = scw_config['profiles'][active_profile].get('secret_key')
-
- return oauth_token
-
- def parse(self, inventory, loader, path, cache=True):
- if YAML_IMPORT_ERROR:
- raise_from(AnsibleError('PyYAML is probably missing'), YAML_IMPORT_ERROR)
- super(InventoryModule, self).parse(inventory, loader, path)
- self._read_config_data(path=path)
-
- config_zones = self.get_option("regions")
- tags = self.get_option("tags")
- token = self.get_oauth_token()
- if not token:
- raise AnsibleError("'oauth_token' value is null, you must configure it either in inventory, envvars or scaleway-cli config.")
- hostname_preference = self.get_option("hostnames")
-
- for zone in self._get_zones(config_zones):
- self.do_zone_inventory(zone=zone, token=token, tags=tags, hostname_preferences=hostname_preference)
diff --git a/ansible_collections/community/general/plugins/lookup/cartesian.py b/ansible_collections/community/general/plugins/lookup/cartesian.py
deleted file mode 100644
index 98043eba..00000000
--- a/ansible_collections/community/general/plugins/lookup/cartesian.py
+++ /dev/null
@@ -1,78 +0,0 @@
-# -*- coding: utf-8 -*-
-# (c) 2013, Bradley Young
-# (c) 2017 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-DOCUMENTATION = '''
- author: Unknown (!UNKNOWN)
- name: cartesian
- short_description: returns the cartesian product of lists
- description:
- - Takes the input lists and returns a list that represents the product of the input lists.
- - It is clearer with an example, it turns [1, 2, 3], [a, b] into [1, a], [1, b], [2, a], [2, b], [3, a], [3, b].
- You can see the exact syntax in the examples section.
- options:
- _raw:
- description:
- - a set of lists
- required: True
-'''
-
-EXAMPLES = """
-- name: Example of the change in the description
- ansible.builtin.debug:
- msg: "{{ lookup('community.general.cartesian', [1,2,3], [a, b])}}"
-
-- name: loops over the cartesian product of the supplied lists
- ansible.builtin.debug:
- msg: "{{item}}"
- with_community.general.cartesian:
- - "{{list1}}"
- - "{{list2}}"
- - [1,2,3,4,5,6]
-"""
-
-RETURN = """
- _list:
- description:
- - list of lists composed of elements of the input lists
- type: list
- elements: list
-"""
-
-from itertools import product
-
-from ansible.errors import AnsibleError
-from ansible.plugins.lookup import LookupBase
-from ansible.utils.listify import listify_lookup_plugin_terms
-
-
-class LookupModule(LookupBase):
- """
- Create the cartesian product of lists
- """
-
- def _lookup_variables(self, terms):
- """
- Turn this:
- terms == ["1,2,3", "a,b"]
- into this:
- terms == [[1,2,3], [a, b]]
- """
- results = []
- for x in terms:
- intermediate = listify_lookup_plugin_terms(x, templar=self._templar, loader=self._loader)
- results.append(intermediate)
- return results
-
- def run(self, terms, variables=None, **kwargs):
-
- terms = self._lookup_variables(terms)
-
- my_list = terms[:]
- if len(my_list) == 0:
- raise AnsibleError("with_cartesian requires at least one element in each list")
-
- return [self._flatten(x) for x in product(*my_list)]
diff --git a/ansible_collections/community/general/plugins/lookup/consul_kv.py b/ansible_collections/community/general/plugins/lookup/consul_kv.py
deleted file mode 100644
index 3ad03bfe..00000000
--- a/ansible_collections/community/general/plugins/lookup/consul_kv.py
+++ /dev/null
@@ -1,193 +0,0 @@
-# -*- coding: utf-8 -*-
-# (c) 2015, Steve Gargan
-# (c) 2017 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import (absolute_import, division, print_function)
-
-__metaclass__ = type
-
-DOCUMENTATION = '''
- author: Unknown (!UNKNOWN)
- name: consul_kv
- short_description: Fetch metadata from a Consul key value store.
- description:
- - Lookup metadata for a playbook from the key value store in a Consul cluster.
- Values can be easily set in the kv store with simple rest commands
- - C(curl -X PUT -d 'some-value' http://localhost:8500/v1/kv/ansible/somedata)
- requirements:
- - 'python-consul python library U(https://python-consul.readthedocs.io/en/latest/#installation)'
- options:
- _raw:
- description: List of key(s) to retrieve.
- type: list
- elements: string
- recurse:
- type: boolean
- description: If true, will retrieve all the values that have the given key as prefix.
- default: False
- index:
- description:
- - If the key has a value with the specified index then this is returned allowing access to historical values.
- datacenter:
- description:
- - Retrieve the key from a consul datacenter other than the default for the consul host.
- token:
- description: The acl token to allow access to restricted values.
- host:
- default: localhost
- description:
- - The target to connect to, must be a resolvable address.
- Will be determined from C(ANSIBLE_CONSUL_URL) if that is set.
- - "C(ANSIBLE_CONSUL_URL) should look like this: C(https://my.consul.server:8500)"
- env:
- - name: ANSIBLE_CONSUL_URL
- ini:
- - section: lookup_consul
- key: host
- port:
- description:
- - The port of the target host to connect to.
- - If you use C(ANSIBLE_CONSUL_URL) this value will be used from there.
- default: 8500
- scheme:
- default: http
- description:
- - Whether to use http or https.
- - If you use C(ANSIBLE_CONSUL_URL) this value will be used from there.
- validate_certs:
- default: True
- description: Whether to verify the ssl connection or not.
- env:
- - name: ANSIBLE_CONSUL_VALIDATE_CERTS
- ini:
- - section: lookup_consul
- key: validate_certs
- client_cert:
- description: The client cert to verify the ssl connection.
- env:
- - name: ANSIBLE_CONSUL_CLIENT_CERT
- ini:
- - section: lookup_consul
- key: client_cert
- url:
- description: "The target to connect to, should look like this: C(https://my.consul.server:8500)."
- type: str
- version_added: 1.0.0
- env:
- - name: ANSIBLE_CONSUL_URL
- ini:
- - section: lookup_consul
- key: url
-'''
-
-EXAMPLES = """
- - ansible.builtin.debug:
- msg: 'key contains {{item}}'
- with_community.general.consul_kv:
- - 'key/to/retrieve'
-
- - name: Parameters can be provided after the key be more specific about what to retrieve
- ansible.builtin.debug:
- msg: 'key contains {{item}}'
- with_community.general.consul_kv:
- - 'key/to recurse=true token=E6C060A9-26FB-407A-B83E-12DDAFCB4D98'
-
- - name: retrieving a KV from a remote cluster on non default port
- ansible.builtin.debug:
- msg: "{{ lookup('community.general.consul_kv', 'my/key', host='10.10.10.10', port='2000') }}"
-"""
-
-RETURN = """
- _raw:
- description:
- - Value(s) stored in consul.
- type: dict
-"""
-
-import os
-from ansible.module_utils.six.moves.urllib.parse import urlparse
-from ansible.errors import AnsibleError, AnsibleAssertionError
-from ansible.plugins.lookup import LookupBase
-from ansible.module_utils.common.text.converters import to_text
-
-try:
- import consul
-
- HAS_CONSUL = True
-except ImportError as e:
- HAS_CONSUL = False
-
-
-class LookupModule(LookupBase):
-
- def run(self, terms, variables=None, **kwargs):
-
- if not HAS_CONSUL:
- raise AnsibleError(
- 'python-consul is required for consul_kv lookup. see http://python-consul.readthedocs.org/en/latest/#installation')
-
- # get options
- self.set_options(direct=kwargs)
-
- scheme = self.get_option('scheme')
- host = self.get_option('host')
- port = self.get_option('port')
- url = self.get_option('url')
- if url is not None:
- u = urlparse(url)
- if u.scheme:
- scheme = u.scheme
- host = u.hostname
- if u.port is not None:
- port = u.port
-
- validate_certs = self.get_option('validate_certs')
- client_cert = self.get_option('client_cert')
-
- values = []
- try:
- for term in terms:
- params = self.parse_params(term)
- consul_api = consul.Consul(host=host, port=port, scheme=scheme, verify=validate_certs, cert=client_cert)
-
- results = consul_api.kv.get(params['key'],
- token=params['token'],
- index=params['index'],
- recurse=params['recurse'],
- dc=params['datacenter'])
- if results[1]:
- # responds with a single or list of result maps
- if isinstance(results[1], list):
- for r in results[1]:
- values.append(to_text(r['Value']))
- else:
- values.append(to_text(results[1]['Value']))
- except Exception as e:
- raise AnsibleError(
- "Error locating '%s' in kv store. Error was %s" % (term, e))
-
- return values
-
- def parse_params(self, term):
- params = term.split(' ')
-
- paramvals = {
- 'key': params[0],
- 'token': self.get_option('token'),
- 'recurse': self.get_option('recurse'),
- 'index': self.get_option('index'),
- 'datacenter': self.get_option('datacenter')
- }
-
- # parameters specified?
- try:
- for param in params[1:]:
- if param and len(param) > 0:
- name, value = param.split('=')
- if name not in paramvals:
- raise AnsibleAssertionError("%s not a valid consul lookup parameter" % name)
- paramvals[name] = value
- except (ValueError, AssertionError) as e:
- raise AnsibleError(e)
-
- return paramvals
diff --git a/ansible_collections/community/general/plugins/lookup/credstash.py b/ansible_collections/community/general/plugins/lookup/credstash.py
deleted file mode 100644
index 143c66c1..00000000
--- a/ansible_collections/community/general/plugins/lookup/credstash.py
+++ /dev/null
@@ -1,126 +0,0 @@
-# -*- coding: utf-8 -*-
-# (c) 2015, Ensighten
-# (c) 2017 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-DOCUMENTATION = '''
- author: Unknown (!UNKNOWN)
- name: credstash
- short_description: retrieve secrets from Credstash on AWS
- requirements:
- - credstash (python library)
- description:
- - "Credstash is a small utility for managing secrets using AWS's KMS and DynamoDB: https://github.com/fugue/credstash"
- options:
- _terms:
- description: term or list of terms to lookup in the credit store
- type: list
- elements: string
- required: true
- table:
- description: name of the credstash table to query
- default: 'credential-store'
- version:
- description: Credstash version
- region:
- description: AWS region
- profile_name:
- description: AWS profile to use for authentication
- env:
- - name: AWS_PROFILE
- aws_access_key_id:
- description: AWS access key ID
- env:
- - name: AWS_ACCESS_KEY_ID
- aws_secret_access_key:
- description: AWS access key
- env:
- - name: AWS_SECRET_ACCESS_KEY
- aws_session_token:
- description: AWS session token
- env:
- - name: AWS_SESSION_TOKEN
-'''
-
-EXAMPLES = """
-- name: first use credstash to store your secrets
- ansible.builtin.shell: credstash put my-github-password secure123
-
-- name: "Test credstash lookup plugin -- get my github password"
- ansible.builtin.debug:
- msg: "Credstash lookup! {{ lookup('community.general.credstash', 'my-github-password') }}"
-
-- name: "Test credstash lookup plugin -- get my other password from us-west-1"
- ansible.builtin.debug:
- msg: "Credstash lookup! {{ lookup('community.general.credstash', 'my-other-password', region='us-west-1') }}"
-
-- name: "Test credstash lookup plugin -- get the company's github password"
- ansible.builtin.debug:
- msg: "Credstash lookup! {{ lookup('community.general.credstash', 'company-github-password', table='company-passwords') }}"
-
-- name: Example play using the 'context' feature
- hosts: localhost
- vars:
- context:
- app: my_app
- environment: production
- tasks:
-
- - name: "Test credstash lookup plugin -- get the password with a context passed as a variable"
- ansible.builtin.debug:
- msg: "{{ lookup('community.general.credstash', 'some-password', context=context) }}"
-
- - name: "Test credstash lookup plugin -- get the password with a context defined here"
- ansible.builtin.debug:
- msg: "{{ lookup('community.general.credstash', 'some-password', context=dict(app='my_app', environment='production')) }}"
-"""
-
-RETURN = """
- _raw:
- description:
- - Value(s) stored in Credstash.
- type: str
-"""
-
-import os
-
-from ansible.errors import AnsibleError
-from ansible.plugins.lookup import LookupBase
-
-CREDSTASH_INSTALLED = False
-
-try:
- import credstash
- CREDSTASH_INSTALLED = True
-except ImportError:
- CREDSTASH_INSTALLED = False
-
-
-class LookupModule(LookupBase):
- def run(self, terms, variables, **kwargs):
-
- if not CREDSTASH_INSTALLED:
- raise AnsibleError('The credstash lookup plugin requires credstash to be installed.')
-
- ret = []
- for term in terms:
- try:
- version = kwargs.pop('version', '')
- region = kwargs.pop('region', None)
- table = kwargs.pop('table', 'credential-store')
- profile_name = kwargs.pop('profile_name', os.getenv('AWS_PROFILE', None))
- aws_access_key_id = kwargs.pop('aws_access_key_id', os.getenv('AWS_ACCESS_KEY_ID', None))
- aws_secret_access_key = kwargs.pop('aws_secret_access_key', os.getenv('AWS_SECRET_ACCESS_KEY', None))
- aws_session_token = kwargs.pop('aws_session_token', os.getenv('AWS_SESSION_TOKEN', None))
- kwargs_pass = {'profile_name': profile_name, 'aws_access_key_id': aws_access_key_id,
- 'aws_secret_access_key': aws_secret_access_key, 'aws_session_token': aws_session_token}
- val = credstash.getSecret(term, version, region, table, context=kwargs, **kwargs_pass)
- except credstash.ItemNotFound:
- raise AnsibleError('Key {0} not found'.format(term))
- except Exception as e:
- raise AnsibleError('Encountered exception while fetching {0}: {1}'.format(term, e))
- ret.append(val)
-
- return ret
diff --git a/ansible_collections/community/general/plugins/lookup/dig.py b/ansible_collections/community/general/plugins/lookup/dig.py
deleted file mode 100644
index 19ded61d..00000000
--- a/ansible_collections/community/general/plugins/lookup/dig.py
+++ /dev/null
@@ -1,368 +0,0 @@
-# -*- coding: utf-8 -*-
-# (c) 2015, Jan-Piet Mens
-# (c) 2017 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-DOCUMENTATION = '''
- name: dig
- author: Jan-Piet Mens (@jpmens)
- short_description: query DNS using the dnspython library
- requirements:
- - dnspython (python library, http://www.dnspython.org/)
- description:
- - The dig lookup runs queries against DNS servers to retrieve DNS records for a specific name (FQDN - fully qualified domain name).
- It is possible to lookup any DNS record in this manner.
- - There is a couple of different syntaxes that can be used to specify what record should be retrieved, and for which name.
- It is also possible to explicitly specify the DNS server(s) to use for lookups.
- - In its simplest form, the dig lookup plugin can be used to retrieve an IPv4 address (DNS A record) associated with FQDN
- - In addition to (default) A record, it is also possible to specify a different record type that should be queried.
- This can be done by either passing-in additional parameter of format qtype=TYPE to the dig lookup, or by appending /TYPE to the FQDN being queried.
- - If multiple values are associated with the requested record, the results will be returned as a comma-separated list.
- In such cases you may want to pass option wantlist=True to the plugin, which will result in the record values being returned as a list
- over which you can iterate later on.
- - By default, the lookup will rely on system-wide configured DNS servers for performing the query.
- It is also possible to explicitly specify DNS servers to query using the @DNS_SERVER_1,DNS_SERVER_2,...,DNS_SERVER_N notation.
- This needs to be passed-in as an additional parameter to the lookup
- options:
- _terms:
- description: domain(s) to query
- qtype:
- description: record type to query
- default: 'A'
- choices: [A, ALL, AAAA, CNAME, DNAME, DLV, DNSKEY, DS, HINFO, LOC, MX, NAPTR, NS, NSEC3PARAM, PTR, RP, RRSIG, SOA, SPF, SRV, SSHFP, TLSA, TXT]
- flat:
- description: If 0 each record is returned as a dictionary, otherwise a string
- default: 1
- retry_servfail:
- description: Retry a nameserver if it returns SERVFAIL.
- default: false
- type: bool
- version_added: 3.6.0
- notes:
- - ALL is not a record per-se, merely the listed fields are available for any record results you retrieve in the form of a dictionary.
- - While the 'dig' lookup plugin supports anything which dnspython supports out of the box, only a subset can be converted into a dictionary.
- - If you need to obtain the AAAA record (IPv6 address), you must specify the record type explicitly.
- Syntax for specifying the record type is shown in the examples below.
- - The trailing dot in most of the examples listed is purely optional, but is specified for completeness/correctness sake.
-'''
-
-EXAMPLES = """
-- name: Simple A record (IPV4 address) lookup for example.com
- ansible.builtin.debug:
- msg: "{{ lookup('community.general.dig', 'example.com.')}}"
-
-- name: "The TXT record for example.org."
- ansible.builtin.debug:
- msg: "{{ lookup('community.general.dig', 'example.org.', 'qtype=TXT') }}"
-
-- name: "The TXT record for example.org, alternative syntax."
- ansible.builtin.debug:
- msg: "{{ lookup('community.general.dig', 'example.org./TXT') }}"
-
-- name: use in a loop
- ansible.builtin.debug:
- msg: "MX record for gmail.com {{ item }}"
- with_items: "{{ lookup('community.general.dig', 'gmail.com./MX', wantlist=True) }}"
-
-- ansible.builtin.debug:
- msg: "Reverse DNS for 192.0.2.5 is {{ lookup('community.general.dig', '192.0.2.5/PTR') }}"
-- ansible.builtin.debug:
- msg: "Reverse DNS for 192.0.2.5 is {{ lookup('community.general.dig', '5.2.0.192.in-addr.arpa./PTR') }}"
-- ansible.builtin.debug:
- msg: "Reverse DNS for 192.0.2.5 is {{ lookup('community.general.dig', '5.2.0.192.in-addr.arpa.', 'qtype=PTR') }}"
-- ansible.builtin.debug:
- msg: "Querying 198.51.100.23 for IPv4 address for example.com. produces {{ lookup('dig', 'example.com', '@198.51.100.23') }}"
-
-- ansible.builtin.debug:
- msg: "XMPP service for gmail.com. is available at {{ item.target }} on port {{ item.port }}"
- with_items: "{{ lookup('community.general.dig', '_xmpp-server._tcp.gmail.com./SRV', 'flat=0', wantlist=True) }}"
-
-- name: Retry nameservers that return SERVFAIL
- ansible.builtin.debug:
- msg: "{{ lookup('community.general.dig', 'example.org./A', 'retry_servfail=True') }}"
-"""
-
-RETURN = """
- _list:
- description:
- - List of composed strings or dictionaries with key and value
- If a dictionary, fields shows the keys returned depending on query type
- type: list
- elements: raw
- contains:
- ALL:
- description:
- - owner, ttl, type
- A:
- description:
- - address
- AAAA:
- description:
- - address
- CNAME:
- description:
- - target
- DNAME:
- description:
- - target
- DLV:
- description:
- - algorithm, digest_type, key_tag, digest
- DNSKEY:
- description:
- - flags, algorithm, protocol, key
- DS:
- description:
- - algorithm, digest_type, key_tag, digest
- HINFO:
- description:
- - cpu, os
- LOC:
- description:
- - latitude, longitude, altitude, size, horizontal_precision, vertical_precision
- MX:
- description:
- - preference, exchange
- NAPTR:
- description:
- - order, preference, flags, service, regexp, replacement
- NS:
- description:
- - target
- NSEC3PARAM:
- description:
- - algorithm, flags, iterations, salt
- PTR:
- description:
- - target
- RP:
- description:
- - mbox, txt
- SOA:
- description:
- - mname, rname, serial, refresh, retry, expire, minimum
- SPF:
- description:
- - strings
- SRV:
- description:
- - priority, weight, port, target
- SSHFP:
- description:
- - algorithm, fp_type, fingerprint
- TLSA:
- description:
- - usage, selector, mtype, cert
- TXT:
- description:
- - strings
-"""
-
-from ansible.errors import AnsibleError
-from ansible.plugins.lookup import LookupBase
-from ansible.module_utils.common.text.converters import to_native
-import socket
-
-try:
- import dns.exception
- import dns.name
- import dns.resolver
- import dns.reversename
- import dns.rdataclass
- from dns.rdatatype import (A, AAAA, CNAME, DLV, DNAME, DNSKEY, DS, HINFO, LOC,
- MX, NAPTR, NS, NSEC3PARAM, PTR, RP, SOA, SPF, SRV, SSHFP, TLSA, TXT)
- HAVE_DNS = True
-except ImportError:
- HAVE_DNS = False
-
-
-def make_rdata_dict(rdata):
- ''' While the 'dig' lookup plugin supports anything which dnspython supports
- out of the box, the following supported_types list describes which
- DNS query types we can convert to a dict.
-
- Note: adding support for RRSIG is hard work. :)
- '''
- supported_types = {
- A: ['address'],
- AAAA: ['address'],
- CNAME: ['target'],
- DNAME: ['target'],
- DLV: ['algorithm', 'digest_type', 'key_tag', 'digest'],
- DNSKEY: ['flags', 'algorithm', 'protocol', 'key'],
- DS: ['algorithm', 'digest_type', 'key_tag', 'digest'],
- HINFO: ['cpu', 'os'],
- LOC: ['latitude', 'longitude', 'altitude', 'size', 'horizontal_precision', 'vertical_precision'],
- MX: ['preference', 'exchange'],
- NAPTR: ['order', 'preference', 'flags', 'service', 'regexp', 'replacement'],
- NS: ['target'],
- NSEC3PARAM: ['algorithm', 'flags', 'iterations', 'salt'],
- PTR: ['target'],
- RP: ['mbox', 'txt'],
- # RRSIG: ['algorithm', 'labels', 'original_ttl', 'expiration', 'inception', 'signature'],
- SOA: ['mname', 'rname', 'serial', 'refresh', 'retry', 'expire', 'minimum'],
- SPF: ['strings'],
- SRV: ['priority', 'weight', 'port', 'target'],
- SSHFP: ['algorithm', 'fp_type', 'fingerprint'],
- TLSA: ['usage', 'selector', 'mtype', 'cert'],
- TXT: ['strings'],
- }
-
- rd = {}
-
- if rdata.rdtype in supported_types:
- fields = supported_types[rdata.rdtype]
- for f in fields:
- val = rdata.__getattribute__(f)
-
- if isinstance(val, dns.name.Name):
- val = dns.name.Name.to_text(val)
-
- if rdata.rdtype == DLV and f == 'digest':
- val = dns.rdata._hexify(rdata.digest).replace(' ', '')
- if rdata.rdtype == DS and f == 'digest':
- val = dns.rdata._hexify(rdata.digest).replace(' ', '')
- if rdata.rdtype == DNSKEY and f == 'key':
- val = dns.rdata._base64ify(rdata.key).replace(' ', '')
- if rdata.rdtype == NSEC3PARAM and f == 'salt':
- val = dns.rdata._hexify(rdata.salt).replace(' ', '')
- if rdata.rdtype == SSHFP and f == 'fingerprint':
- val = dns.rdata._hexify(rdata.fingerprint).replace(' ', '')
- if rdata.rdtype == TLSA and f == 'cert':
- val = dns.rdata._hexify(rdata.cert).replace(' ', '')
-
- rd[f] = val
-
- return rd
-
-
-# ==============================================================
-# dig: Lookup DNS records
-#
-# --------------------------------------------------------------
-
-class LookupModule(LookupBase):
-
- def run(self, terms, variables=None, **kwargs):
-
- '''
- terms contains a string with things to `dig' for. We support the
- following formats:
- example.com # A record
- example.com qtype=A # same
- example.com/TXT # specific qtype
- example.com qtype=txt # same
- 192.0.2.23/PTR # reverse PTR
- ^^ shortcut for 23.2.0.192.in-addr.arpa/PTR
- example.net/AAAA @nameserver # query specified server
- ^^^ can be comma-sep list of names/addresses
-
- ... flat=0 # returns a dict; default is 1 == string
- '''
-
- if HAVE_DNS is False:
- raise AnsibleError("The dig lookup requires the python 'dnspython' library and it is not installed")
-
- # Create Resolver object so that we can set NS if necessary
- myres = dns.resolver.Resolver(configure=True)
- edns_size = 4096
- myres.use_edns(0, ednsflags=dns.flags.DO, payload=edns_size)
-
- domain = None
- qtype = 'A'
- flat = True
- rdclass = dns.rdataclass.from_text('IN')
-
- for t in terms:
- if t.startswith('@'): # e.g. "@10.0.1.2,192.0.2.1" is ok.
- nsset = t[1:].split(',')
- for ns in nsset:
- nameservers = []
- # Check if we have a valid IP address. If so, use that, otherwise
- # try to resolve name to address using system's resolver. If that
- # fails we bail out.
- try:
- socket.inet_aton(ns)
- nameservers.append(ns)
- except Exception:
- try:
- nsaddr = dns.resolver.query(ns)[0].address
- nameservers.append(nsaddr)
- except Exception as e:
- raise AnsibleError("dns lookup NS: %s" % to_native(e))
- myres.nameservers = nameservers
- continue
- if '=' in t:
- try:
- opt, arg = t.split('=')
- except Exception:
- pass
-
- if opt == 'qtype':
- qtype = arg.upper()
- elif opt == 'flat':
- flat = int(arg)
- elif opt == 'class':
- try:
- rdclass = dns.rdataclass.from_text(arg)
- except Exception as e:
- raise AnsibleError("dns lookup illegal CLASS: %s" % to_native(e))
- elif opt == 'retry_servfail':
- myres.retry_servfail = bool(arg)
-
- continue
-
- if '/' in t:
- try:
- domain, qtype = t.split('/')
- except Exception:
- domain = t
- else:
- domain = t
-
- # print "--- domain = {0} qtype={1} rdclass={2}".format(domain, qtype, rdclass)
-
- ret = []
-
- if qtype.upper() == 'PTR':
- try:
- n = dns.reversename.from_address(domain)
- domain = n.to_text()
- except dns.exception.SyntaxError:
- pass
- except Exception as e:
- raise AnsibleError("dns.reversename unhandled exception %s" % to_native(e))
-
- try:
- answers = myres.query(domain, qtype, rdclass=rdclass)
- for rdata in answers:
- s = rdata.to_text()
- if qtype.upper() == 'TXT':
- s = s[1:-1] # Strip outside quotes on TXT rdata
-
- if flat:
- ret.append(s)
- else:
- try:
- rd = make_rdata_dict(rdata)
- rd['owner'] = answers.canonical_name.to_text()
- rd['type'] = dns.rdatatype.to_text(rdata.rdtype)
- rd['ttl'] = answers.rrset.ttl
- rd['class'] = dns.rdataclass.to_text(rdata.rdclass)
-
- ret.append(rd)
- except Exception as e:
- ret.append(str(e))
-
- except dns.resolver.NXDOMAIN:
- ret.append('NXDOMAIN')
- except dns.resolver.NoAnswer:
- ret.append("")
- except dns.resolver.Timeout:
- ret.append('')
- except dns.exception.DNSException as e:
- raise AnsibleError("dns.resolver unhandled exception %s" % to_native(e))
-
- return ret
diff --git a/ansible_collections/community/general/plugins/lookup/etcd3.py b/ansible_collections/community/general/plugins/lookup/etcd3.py
deleted file mode 100644
index a34fae7b..00000000
--- a/ansible_collections/community/general/plugins/lookup/etcd3.py
+++ /dev/null
@@ -1,229 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# (c) 2020, SCC France, Eric Belhomme
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-DOCUMENTATION = '''
- author:
- - Eric Belhomme (@eric-belhomme)
- version_added: '0.2.0'
- name: etcd3
- short_description: Get key values from etcd3 server
- description:
- - Retrieves key values and/or key prefixes from etcd3 server using its native gRPC API.
- - Try to reuse M(community.general.etcd3) options for connection parameters, but add support for some C(ETCDCTL_*) environment variables.
- - See U(https://github.com/etcd-io/etcd/tree/master/Documentation/op-guide) for etcd overview.
-
- options:
- _terms:
- description:
- - The list of keys (or key prefixes) to look up on the etcd3 server.
- type: list
- elements: str
- required: True
- prefix:
- description:
- - Look for key or prefix key.
- type: bool
- default: False
- endpoints:
- description:
- - Counterpart of C(ETCDCTL_ENDPOINTS) environment variable.
- Specify the etcd3 connection with and URL form eg. C(https://hostname:2379) or C(:) form.
- - The C(host) part is overwritten by I(host) option, if defined.
- - The C(port) part is overwritten by I(port) option, if defined.
- env:
- - name: ETCDCTL_ENDPOINTS
- default: '127.0.0.1:2379'
- type: str
- host:
- description:
- - etcd3 listening client host.
- - Takes precedence over I(endpoints).
- type: str
- port:
- description:
- - etcd3 listening client port.
- - Takes precedence over I(endpoints).
- type: int
- ca_cert:
- description:
- - etcd3 CA authority.
- env:
- - name: ETCDCTL_CACERT
- type: str
- cert_cert:
- description:
- - etcd3 client certificate.
- env:
- - name: ETCDCTL_CERT
- type: str
- cert_key:
- description:
- - etcd3 client private key.
- env:
- - name: ETCDCTL_KEY
- type: str
- timeout:
- description:
- - Client timeout.
- default: 60
- env:
- - name: ETCDCTL_DIAL_TIMEOUT
- type: int
- user:
- description:
- - Authenticated user name.
- env:
- - name: ETCDCTL_USER
- type: str
- password:
- description:
- - Authenticated user password.
- env:
- - name: ETCDCTL_PASSWORD
- type: str
-
- notes:
- - I(host) and I(port) options take precedence over (endpoints) option.
- - The recommended way to connect to etcd3 server is using C(ETCDCTL_ENDPOINT)
- environment variable and keep I(endpoints), I(host), and I(port) unused.
- seealso:
- - module: community.general.etcd3
- - ref: ansible_collections.community.general.etcd_lookup
- description: The etcd v2 lookup.
-
- requirements:
- - "etcd3 >= 0.10"
-'''
-
-EXAMPLES = '''
-- name: "a value from a locally running etcd"
- ansible.builtin.debug:
- msg: "{{ lookup('community.general.etcd3', 'foo/bar') }}"
-
-- name: "values from multiple folders on a locally running etcd"
- ansible.builtin.debug:
- msg: "{{ lookup('community.general.etcd3', 'foo', 'bar', 'baz') }}"
-
-- name: "look for a key prefix"
- ansible.builtin.debug:
- msg: "{{ lookup('community.general.etcd3', '/foo/bar', prefix=True) }}"
-
-- name: "connect to etcd3 with a client certificate"
- ansible.builtin.debug:
- msg: "{{ lookup('community.general.etcd3', 'foo/bar', cert_cert='/etc/ssl/etcd/client.pem', cert_key='/etc/ssl/etcd/client.key') }}"
-'''
-
-RETURN = '''
- _raw:
- description:
- - List of keys and associated values.
- type: list
- elements: dict
- contains:
- key:
- description: The element's key.
- type: str
- value:
- description: The element's value.
- type: str
-'''
-
-import re
-
-from ansible.plugins.lookup import LookupBase
-from ansible.utils.display import Display
-from ansible.module_utils.basic import missing_required_lib
-from ansible.module_utils.common.text.converters import to_native
-from ansible.plugins.lookup import LookupBase
-from ansible.errors import AnsibleError, AnsibleLookupError
-
-try:
- import etcd3
- HAS_ETCD = True
-except ImportError:
- HAS_ETCD = False
-
-display = Display()
-
-etcd3_cnx_opts = (
- 'host',
- 'port',
- 'ca_cert',
- 'cert_key',
- 'cert_cert',
- 'timeout',
- 'user',
- 'password',
- # 'grpc_options' Etcd3Client() option currently not supported by lookup module (maybe in future ?)
-)
-
-
-def etcd3_client(client_params):
- try:
- etcd = etcd3.client(**client_params)
- etcd.status()
- except Exception as exp:
- raise AnsibleLookupError('Cannot connect to etcd cluster: %s' % (to_native(exp)))
- return etcd
-
-
-class LookupModule(LookupBase):
-
- def run(self, terms, variables, **kwargs):
-
- self.set_options(var_options=variables, direct=kwargs)
-
- if not HAS_ETCD:
- display.error(missing_required_lib('etcd3'))
- return None
-
- # create the etcd3 connection parameters dict to pass to etcd3 class
- client_params = {}
-
- # etcd3 class expects host and port as connection parameters, so endpoints
- # must be mangled a bit to fit in this scheme.
- # so here we use a regex to extract server and port
- match = re.compile(
- r'^(https?://)?(?P(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})|([-_\d\w\.]+))(:(?P\d{1,5}))?/?$'
- ).match(self.get_option('endpoints'))
- if match:
- if match.group('host'):
- client_params['host'] = match.group('host')
- if match.group('port'):
- client_params['port'] = match.group('port')
-
- for opt in etcd3_cnx_opts:
- if self.get_option(opt):
- client_params[opt] = self.get_option(opt)
-
- cnx_log = dict(client_params)
- if 'password' in cnx_log:
- cnx_log['password'] = ''
- display.verbose("etcd3 connection parameters: %s" % cnx_log)
-
- # connect to etcd3 server
- etcd = etcd3_client(client_params)
-
- ret = []
- # we can pass many keys to lookup
- for term in terms:
- if self.get_option('prefix'):
- try:
- for val, meta in etcd.get_prefix(term):
- if val and meta:
- ret.append({'key': to_native(meta.key), 'value': to_native(val)})
- except Exception as exp:
- display.warning('Caught except during etcd3.get_prefix: %s' % (to_native(exp)))
- else:
- try:
- val, meta = etcd.get(term)
- if val and meta:
- ret.append({'key': to_native(meta.key), 'value': to_native(val)})
- except Exception as exp:
- display.warning('Caught except during etcd3.get: %s' % (to_native(exp)))
- return ret
diff --git a/ansible_collections/community/general/plugins/lookup/flattened.py b/ansible_collections/community/general/plugins/lookup/flattened.py
deleted file mode 100644
index edc546ff..00000000
--- a/ansible_collections/community/general/plugins/lookup/flattened.py
+++ /dev/null
@@ -1,85 +0,0 @@
-# -*- coding: utf-8 -*-
-# (c) 2013, Serge van Ginderachter
-# (c) 2017 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-DOCUMENTATION = '''
- name: flattened
- author: Serge van Ginderachter (!UNKNOWN)
- short_description: return single list completely flattened
- description:
- - given one or more lists, this lookup will flatten any list elements found recursively until only 1 list is left.
- options:
- _terms:
- description: lists to flatten
- required: True
- notes:
- - unlike 'items' which only flattens 1 level, this plugin will continue to flatten until it cannot find lists anymore.
- - aka highlander plugin, there can only be one (list).
-'''
-
-EXAMPLES = """
-- name: "'unnest' all elements into single list"
- ansible.builtin.debug:
- msg: "all in one list {{lookup('community.general.flattened', [1,2,3,[5,6]], ['a','b','c'], [[5,6,1,3], [34,'a','b','c']])}}"
-"""
-
-RETURN = """
- _raw:
- description:
- - flattened list
- type: list
-"""
-from ansible.errors import AnsibleError
-from ansible.module_utils.six import string_types
-from ansible.plugins.lookup import LookupBase
-from ansible.utils.listify import listify_lookup_plugin_terms
-
-
-class LookupModule(LookupBase):
-
- def _check_list_of_one_list(self, term):
- # make sure term is not a list of one (list of one..) item
- # return the final non list item if so
-
- if isinstance(term, list) and len(term) == 1:
- term = term[0]
- if isinstance(term, list):
- term = self._check_list_of_one_list(term)
-
- return term
-
- def _do_flatten(self, terms, variables):
-
- ret = []
- for term in terms:
- term = self._check_list_of_one_list(term)
-
- if term == 'None' or term == 'null':
- # ignore undefined items
- break
-
- if isinstance(term, string_types):
- # convert a variable to a list
- term2 = listify_lookup_plugin_terms(term, templar=self._templar, loader=self._loader)
- # but avoid converting a plain string to a list of one string
- if term2 != [term]:
- term = term2
-
- if isinstance(term, list):
- # if it's a list, check recursively for items that are a list
- term = self._do_flatten(term, variables)
- ret.extend(term)
- else:
- ret.append(term)
-
- return ret
-
- def run(self, terms, variables, **kwargs):
-
- if not isinstance(terms, list):
- raise AnsibleError("with_flattened expects a list")
-
- return self._do_flatten(terms, variables)
diff --git a/ansible_collections/community/general/plugins/lookup/hiera.py b/ansible_collections/community/general/plugins/lookup/hiera.py
deleted file mode 100644
index 5b440469..00000000
--- a/ansible_collections/community/general/plugins/lookup/hiera.py
+++ /dev/null
@@ -1,90 +0,0 @@
-# -*- coding: utf-8 -*-
-# (c) 2017, Juan Manuel Parrilla
-# (c) 2012-17 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-DOCUMENTATION = '''
- author:
- - Juan Manuel Parrilla (@jparrill)
- name: hiera
- short_description: get info from hiera data
- requirements:
- - hiera (command line utility)
- description:
- - Retrieves data from an Puppetmaster node using Hiera as ENC
- options:
- _hiera_key:
- description:
- - The list of keys to lookup on the Puppetmaster
- type: list
- elements: string
- required: True
- _bin_file:
- description:
- - Binary file to execute Hiera
- default: '/usr/bin/hiera'
- env:
- - name: ANSIBLE_HIERA_BIN
- _hierarchy_file:
- description:
- - File that describes the hierarchy of Hiera
- default: '/etc/hiera.yaml'
- env:
- - name: ANSIBLE_HIERA_CFG
-# FIXME: incomplete options .. _terms? environment/fqdn?
-'''
-
-EXAMPLES = """
-# All this examples depends on hiera.yml that describes the hierarchy
-
-- name: "a value from Hiera 'DB'"
- ansible.builtin.debug:
- msg: "{{ lookup('community.general.hiera', 'foo') }}"
-
-- name: "a value from a Hiera 'DB' on other environment"
- ansible.builtin.debug:
- msg: "{{ lookup('community.general.hiera', 'foo environment=production') }}"
-
-- name: "a value from a Hiera 'DB' for a concrete node"
- ansible.builtin.debug:
- msg: "{{ lookup('community.general.hiera', 'foo fqdn=puppet01.localdomain') }}"
-"""
-
-RETURN = """
- _raw:
- description:
- - a value associated with input key
- type: list
- elements: str
-"""
-
-import os
-
-from ansible.plugins.lookup import LookupBase
-from ansible.utils.cmd_functions import run_cmd
-from ansible.module_utils.common.text.converters import to_text
-
-ANSIBLE_HIERA_CFG = os.getenv('ANSIBLE_HIERA_CFG', '/etc/hiera.yaml')
-ANSIBLE_HIERA_BIN = os.getenv('ANSIBLE_HIERA_BIN', '/usr/bin/hiera')
-
-
-class Hiera(object):
- def get(self, hiera_key):
- pargs = [ANSIBLE_HIERA_BIN]
- pargs.extend(['-c', ANSIBLE_HIERA_CFG])
-
- pargs.extend(hiera_key)
-
- rc, output, err = run_cmd("{0} -c {1} {2}".format(
- ANSIBLE_HIERA_BIN, ANSIBLE_HIERA_CFG, hiera_key[0]))
-
- return to_text(output.strip())
-
-
-class LookupModule(LookupBase):
- def run(self, terms, variables=''):
- hiera = Hiera()
- ret = [hiera.get(terms)]
- return ret
diff --git a/ansible_collections/community/general/plugins/lookup/keyring.py b/ansible_collections/community/general/plugins/lookup/keyring.py
deleted file mode 100644
index 73f9c5f4..00000000
--- a/ansible_collections/community/general/plugins/lookup/keyring.py
+++ /dev/null
@@ -1,68 +0,0 @@
-# -*- coding: utf-8 -*-
-# (c) 2016, Samuel Boucher
-# (c) 2017 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-DOCUMENTATION = '''
- name: keyring
- author:
- - Samuel Boucher (!UNKNOWN)
- requirements:
- - keyring (python library)
- short_description: grab secrets from the OS keyring
- description:
- - Allows you to access data stored in the OS provided keyring/keychain.
-'''
-
-EXAMPLES = """
-- name : output secrets to screen (BAD IDEA)
- ansible.builtin.debug:
- msg: "Password: {{item}}"
- with_community.general.keyring:
- - 'servicename username'
-
-- name: access mysql with password from keyring
- mysql_db: login_password={{lookup('community.general.keyring','mysql joe')}} login_user=joe
-"""
-
-RETURN = """
- _raw:
- description: Secrets stored.
- type: list
- elements: str
-"""
-
-HAS_KEYRING = True
-
-from ansible.errors import AnsibleError
-from ansible.utils.display import Display
-
-try:
- import keyring
-except ImportError:
- HAS_KEYRING = False
-
-from ansible.plugins.lookup import LookupBase
-
-display = Display()
-
-
-class LookupModule(LookupBase):
-
- def run(self, terms, **kwargs):
- if not HAS_KEYRING:
- raise AnsibleError(u"Can't LOOKUP(keyring): missing required python library 'keyring'")
-
- display.vvvv(u"keyring: %s" % keyring.get_keyring())
- ret = []
- for term in terms:
- (servicename, username) = (term.split()[0], term.split()[1])
- display.vvvv(u"username: %s, servicename: %s " % (username, servicename))
- password = keyring.get_password(servicename, username)
- if password is None:
- raise AnsibleError(u"servicename: %s for user %s not found" % (servicename, username))
- ret.append(password.rstrip())
- return ret
diff --git a/ansible_collections/community/general/plugins/lookup/lastpass.py b/ansible_collections/community/general/plugins/lookup/lastpass.py
deleted file mode 100644
index 920d3317..00000000
--- a/ansible_collections/community/general/plugins/lookup/lastpass.py
+++ /dev/null
@@ -1,100 +0,0 @@
-# -*- coding: utf-8 -*-
-# (c) 2016, Andrew Zenk
-# (c) 2017 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-DOCUMENTATION = '''
- name: lastpass
- author:
- - Andrew Zenk (!UNKNOWN)
- requirements:
- - lpass (command line utility)
- - must have already logged into lastpass
- short_description: fetch data from lastpass
- description:
- - use the lpass command line utility to fetch specific fields from lastpass
- options:
- _terms:
- description: key from which you want to retrieve the field
- required: True
- field:
- description: field to return from lastpass
- default: 'password'
-'''
-
-EXAMPLES = """
-- name: get 'custom_field' from lastpass entry 'entry-name'
- ansible.builtin.debug:
- msg: "{{ lookup('community.general.lastpass', 'entry-name', field='custom_field') }}"
-"""
-
-RETURN = """
- _raw:
- description: secrets stored
- type: list
- elements: str
-"""
-
-from subprocess import Popen, PIPE
-
-from ansible.errors import AnsibleError
-from ansible.module_utils.common.text.converters import to_bytes, to_text
-from ansible.plugins.lookup import LookupBase
-
-
-class LPassException(AnsibleError):
- pass
-
-
-class LPass(object):
-
- def __init__(self, path='lpass'):
- self._cli_path = path
-
- @property
- def cli_path(self):
- return self._cli_path
-
- @property
- def logged_in(self):
- out, err = self._run(self._build_args("logout"), stdin="n\n", expected_rc=1)
- return err.startswith("Are you sure you would like to log out?")
-
- def _run(self, args, stdin=None, expected_rc=0):
- p = Popen([self.cli_path] + args, stdout=PIPE, stderr=PIPE, stdin=PIPE)
- out, err = p.communicate(to_bytes(stdin))
- rc = p.wait()
- if rc != expected_rc:
- raise LPassException(err)
- return to_text(out, errors='surrogate_or_strict'), to_text(err, errors='surrogate_or_strict')
-
- def _build_args(self, command, args=None):
- if args is None:
- args = []
- args = [command] + args
- args += ["--color=never"]
- return args
-
- def get_field(self, key, field):
- if field in ['username', 'password', 'url', 'notes', 'id', 'name']:
- out, err = self._run(self._build_args("show", ["--{0}".format(field), key]))
- else:
- out, err = self._run(self._build_args("show", ["--field={0}".format(field), key]))
- return out.strip()
-
-
-class LookupModule(LookupBase):
-
- def run(self, terms, variables=None, **kwargs):
- lp = LPass()
-
- if not lp.logged_in:
- raise AnsibleError("Not logged into lastpass: please run 'lpass login' first")
-
- field = kwargs.get('field', 'password')
- values = []
- for term in terms:
- values.append(lp.get_field(term, field))
- return values
diff --git a/ansible_collections/community/general/plugins/lookup/onepassword.py b/ansible_collections/community/general/plugins/lookup/onepassword.py
deleted file mode 100644
index 9f97a90e..00000000
--- a/ansible_collections/community/general/plugins/lookup/onepassword.py
+++ /dev/null
@@ -1,284 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright: (c) 2018, Scott Buchanan
-# Copyright: (c) 2016, Andrew Zenk (lastpass.py used as starting point)
-# Copyright: (c) 2018, Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-DOCUMENTATION = '''
- name: onepassword
- author:
- - Scott Buchanan (@scottsb)
- - Andrew Zenk (@azenk)
- - Sam Doran (@samdoran)
- requirements:
- - C(op) 1Password command line utility. See U(https://support.1password.com/command-line/)
- short_description: fetch field values from 1Password
- description:
- - C(onepassword) wraps the C(op) command line utility to fetch specific field values from 1Password.
- options:
- _terms:
- description: identifier(s) (UUID, name, or subdomain; case-insensitive) of item(s) to retrieve.
- required: True
- field:
- description: field to return from each matching item (case-insensitive).
- default: 'password'
- master_password:
- description: The password used to unlock the specified vault.
- aliases: ['vault_password']
- section:
- description: Item section containing the field to retrieve (case-insensitive). If absent will return first match from any section.
- domain:
- description: Domain of 1Password. Default is U(1password.com).
- version_added: 3.2.0
- default: '1password.com'
- type: str
- subdomain:
- description: The 1Password subdomain to authenticate against.
- username:
- description: The username used to sign in.
- secret_key:
- description: The secret key used when performing an initial sign in.
- vault:
- description: Vault containing the item to retrieve (case-insensitive). If absent will search all vaults.
- notes:
- - This lookup will use an existing 1Password session if one exists. If not, and you have already
- performed an initial sign in (meaning C(~/.op/config exists)), then only the C(master_password) is required.
- You may optionally specify C(subdomain) in this scenario, otherwise the last used subdomain will be used by C(op).
- - This lookup can perform an initial login by providing C(subdomain), C(username), C(secret_key), and C(master_password).
- - Due to the B(very) sensitive nature of these credentials, it is B(highly) recommended that you only pass in the minimal credentials
- needed at any given time. Also, store these credentials in an Ansible Vault using a key that is equal to or greater in strength
- to the 1Password master password.
- - This lookup stores potentially sensitive data from 1Password as Ansible facts.
- Facts are subject to caching if enabled, which means this data could be stored in clear text
- on disk or in a database.
- - Tested with C(op) version 0.5.3
-'''
-
-EXAMPLES = """
-# These examples only work when already signed in to 1Password
-- name: Retrieve password for KITT when already signed in to 1Password
- ansible.builtin.debug:
- var: lookup('community.general.onepassword', 'KITT')
-
-- name: Retrieve password for Wintermute when already signed in to 1Password
- ansible.builtin.debug:
- var: lookup('community.general.onepassword', 'Tessier-Ashpool', section='Wintermute')
-
-- name: Retrieve username for HAL when already signed in to 1Password
- ansible.builtin.debug:
- var: lookup('community.general.onepassword', 'HAL 9000', field='username', vault='Discovery')
-
-- name: Retrieve password for HAL when not signed in to 1Password
- ansible.builtin.debug:
- var: lookup('community.general.onepassword'
- 'HAL 9000'
- subdomain='Discovery'
- master_password=vault_master_password)
-
-- name: Retrieve password for HAL when never signed in to 1Password
- ansible.builtin.debug:
- var: lookup('community.general.onepassword'
- 'HAL 9000'
- subdomain='Discovery'
- master_password=vault_master_password
- username='tweety@acme.com'
- secret_key=vault_secret_key)
-"""
-
-RETURN = """
- _raw:
- description: field data requested
- type: list
- elements: str
-"""
-
-import errno
-import json
-import os
-
-from subprocess import Popen, PIPE
-
-from ansible.plugins.lookup import LookupBase
-from ansible.errors import AnsibleLookupError
-from ansible.module_utils.common.text.converters import to_bytes, to_text
-
-
-class OnePass(object):
-
- def __init__(self, path='op'):
- self.cli_path = path
- self.config_file_path = os.path.expanduser('~/.op/config')
- self.logged_in = False
- self.token = None
- self.subdomain = None
- self.domain = None
- self.username = None
- self.secret_key = None
- self.master_password = None
-
- def get_token(self):
- # If the config file exists, assume an initial signin has taken place and try basic sign in
- if os.path.isfile(self.config_file_path):
-
- if not self.master_password:
- raise AnsibleLookupError('Unable to sign in to 1Password. master_password is required.')
-
- try:
- args = ['signin', '--output=raw']
-
- if self.subdomain:
- args = ['signin', self.subdomain, '--output=raw']
-
- rc, out, err = self._run(args, command_input=to_bytes(self.master_password))
- self.token = out.strip()
-
- except AnsibleLookupError:
- self.full_login()
-
- else:
- # Attempt a full sign in since there appears to be no existing sign in
- self.full_login()
-
- def assert_logged_in(self):
- try:
- rc, out, err = self._run(['get', 'account'], ignore_errors=True)
- if rc == 0:
- self.logged_in = True
- if not self.logged_in:
- self.get_token()
- except OSError as e:
- if e.errno == errno.ENOENT:
- raise AnsibleLookupError("1Password CLI tool '%s' not installed in path on control machine" % self.cli_path)
- raise e
-
- def get_raw(self, item_id, vault=None):
- args = ["get", "item", item_id]
- if vault is not None:
- args += ['--vault={0}'.format(vault)]
- if not self.logged_in:
- args += [to_bytes('--session=') + self.token]
- rc, output, dummy = self._run(args)
- return output
-
- def get_field(self, item_id, field, section=None, vault=None):
- output = self.get_raw(item_id, vault)
- return self._parse_field(output, field, section) if output != '' else ''
-
- def full_login(self):
- if None in [self.subdomain, self.username, self.secret_key, self.master_password]:
- raise AnsibleLookupError('Unable to perform initial sign in to 1Password. '
- 'subdomain, username, secret_key, and master_password are required to perform initial sign in.')
-
- args = [
- 'signin',
- '{0}.{1}'.format(self.subdomain, self.domain),
- to_bytes(self.username),
- to_bytes(self.secret_key),
- '--output=raw',
- ]
-
- rc, out, err = self._run(args, command_input=to_bytes(self.master_password))
- self.token = out.strip()
-
- def _run(self, args, expected_rc=0, command_input=None, ignore_errors=False):
- command = [self.cli_path] + args
- p = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)
- out, err = p.communicate(input=command_input)
- rc = p.wait()
- if not ignore_errors and rc != expected_rc:
- raise AnsibleLookupError(to_text(err))
- return rc, out, err
-
- def _parse_field(self, data_json, field_name, section_title=None):
- """
- Retrieves the desired field from the `op` response payload
-
- When the item is a `password` type, the password is a key within the `details` key:
-
- $ op get item 'test item' | jq
- {
- [...]
- "templateUuid": "005",
- "details": {
- "notesPlain": "",
- "password": "foobar",
- "passwordHistory": [],
- "sections": [
- {
- "name": "linked items",
- "title": "Related Items"
- }
- ]
- },
- [...]
- }
-
- However, when the item is a `login` type, the password is within a fields array:
-
- $ op get item 'test item' | jq
- {
- [...]
- "details": {
- "fields": [
- {
- "designation": "username",
- "name": "username",
- "type": "T",
- "value": "foo"
- },
- {
- "designation": "password",
- "name": "password",
- "type": "P",
- "value": "bar"
- }
- ],
- [...]
- },
- [...]
- """
- data = json.loads(data_json)
- if section_title is None:
- # https://github.com/ansible-collections/community.general/pull/1610:
- # check the details dictionary for `field_name` and return it immediately if it exists
- # when the entry is a "password" instead of a "login" item, the password field is a key
- # in the `details` dictionary:
- if field_name in data['details']:
- return data['details'][field_name]
-
- # when the field is not found above, iterate through the fields list in the object details
- for field_data in data['details'].get('fields', []):
- if field_data.get('name', '').lower() == field_name.lower():
- return field_data.get('value', '')
- for section_data in data['details'].get('sections', []):
- if section_title is not None and section_title.lower() != section_data['title'].lower():
- continue
- for field_data in section_data.get('fields', []):
- if field_data.get('t', '').lower() == field_name.lower():
- return field_data.get('v', '')
- return ''
-
-
-class LookupModule(LookupBase):
-
- def run(self, terms, variables=None, **kwargs):
- op = OnePass()
-
- field = kwargs.get('field', 'password')
- section = kwargs.get('section')
- vault = kwargs.get('vault')
- op.subdomain = kwargs.get('subdomain')
- op.domain = kwargs.get('domain', '1password.com')
- op.username = kwargs.get('username')
- op.secret_key = kwargs.get('secret_key')
- op.master_password = kwargs.get('master_password', kwargs.get('vault_password'))
-
- op.assert_logged_in()
-
- values = []
- for term in terms:
- values.append(op.get_field(term, field, section, vault))
- return values
diff --git a/ansible_collections/community/general/plugins/lookup/onepassword_raw.py b/ansible_collections/community/general/plugins/lookup/onepassword_raw.py
deleted file mode 100644
index d1958f78..00000000
--- a/ansible_collections/community/general/plugins/lookup/onepassword_raw.py
+++ /dev/null
@@ -1,92 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright: (c) 2018, Scott Buchanan
-# Copyright: (c) 2016, Andrew Zenk (lastpass.py used as starting point)
-# Copyright: (c) 2018, Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-DOCUMENTATION = '''
- name: onepassword_raw
- author:
- - Scott Buchanan (@scottsb)
- - Andrew Zenk (@azenk)
- - Sam Doran (@samdoran)
- requirements:
- - C(op) 1Password command line utility. See U(https://support.1password.com/command-line/)
- short_description: fetch an entire item from 1Password
- description:
- - C(onepassword_raw) wraps C(op) command line utility to fetch an entire item from 1Password
- options:
- _terms:
- description: identifier(s) (UUID, name, or domain; case-insensitive) of item(s) to retrieve.
- required: True
- master_password:
- description: The password used to unlock the specified vault.
- aliases: ['vault_password']
- section:
- description: Item section containing the field to retrieve (case-insensitive). If absent will return first match from any section.
- subdomain:
- description: The 1Password subdomain to authenticate against.
- username:
- description: The username used to sign in.
- secret_key:
- description: The secret key used when performing an initial sign in.
- vault:
- description: Vault containing the item to retrieve (case-insensitive). If absent will search all vaults.
- notes:
- - This lookup will use an existing 1Password session if one exists. If not, and you have already
- performed an initial sign in (meaning C(~/.op/config exists)), then only the C(master_password) is required.
- You may optionally specify C(subdomain) in this scenario, otherwise the last used subdomain will be used by C(op).
- - This lookup can perform an initial login by providing C(subdomain), C(username), C(secret_key), and C(master_password).
- - Due to the B(very) sensitive nature of these credentials, it is B(highly) recommended that you only pass in the minimal credentials
- needed at any given time. Also, store these credentials in an Ansible Vault using a key that is equal to or greater in strength
- to the 1Password master password.
- - This lookup stores potentially sensitive data from 1Password as Ansible facts.
- Facts are subject to caching if enabled, which means this data could be stored in clear text
- on disk or in a database.
- - Tested with C(op) version 0.5.3
-'''
-
-EXAMPLES = """
-- name: Retrieve all data about Wintermute
- ansible.builtin.debug:
- var: lookup('community.general.onepassword_raw', 'Wintermute')
-
-- name: Retrieve all data about Wintermute when not signed in to 1Password
- ansible.builtin.debug:
- var: lookup('community.general.onepassword_raw', 'Wintermute', subdomain='Turing', vault_password='DmbslfLvasjdl')
-"""
-
-RETURN = """
- _raw:
- description: field data requested
- type: list
- elements: dict
-"""
-
-import json
-
-from ansible_collections.community.general.plugins.lookup.onepassword import OnePass
-from ansible.plugins.lookup import LookupBase
-
-
-class LookupModule(LookupBase):
-
- def run(self, terms, variables=None, **kwargs):
- op = OnePass()
-
- vault = kwargs.get('vault')
- op.subdomain = kwargs.get('subdomain')
- op.username = kwargs.get('username')
- op.secret_key = kwargs.get('secret_key')
- op.master_password = kwargs.get('master_password', kwargs.get('vault_password'))
-
- op.assert_logged_in()
-
- values = []
- for term in terms:
- data = json.loads(op.get_raw(term, vault))
- values.append(data)
- return values
diff --git a/ansible_collections/community/general/plugins/lookup/passwordstore.py b/ansible_collections/community/general/plugins/lookup/passwordstore.py
deleted file mode 100644
index a221e496..00000000
--- a/ansible_collections/community/general/plugins/lookup/passwordstore.py
+++ /dev/null
@@ -1,427 +0,0 @@
-# -*- coding: utf-8 -*-
-# (c) 2017, Patrick Deelman
-# (c) 2017 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
- name: passwordstore
- author:
- - Patrick Deelman (!UNKNOWN)
- short_description: manage passwords with passwordstore.org's pass utility
- description:
- - Enables Ansible to retrieve, create or update passwords from the passwordstore.org pass utility.
- It also retrieves YAML style keys stored as multilines in the passwordfile.
- - To avoid problems when accessing multiple secrets at once, add C(auto-expand-secmem) to
- C(~/.gnupg/gpg-agent.conf). Where this is not possible, consider using I(lock=readwrite) instead.
- options:
- _terms:
- description: query key.
- required: True
- passwordstore:
- description: location of the password store.
- default: '~/.password-store'
- directory:
- description: The directory of the password store.
- env:
- - name: PASSWORD_STORE_DIR
- create:
- description: Create the password if it does not already exist. Takes precedence over C(missing).
- type: bool
- default: false
- overwrite:
- description: Overwrite the password if it does already exist.
- type: bool
- default: 'no'
- umask:
- description:
- - Sets the umask for the created .gpg files. The first octed must be greater than 3 (user readable).
- - Note pass' default value is C('077').
- env:
- - name: PASSWORD_STORE_UMASK
- version_added: 1.3.0
- returnall:
- description: Return all the content of the password, not only the first line.
- type: bool
- default: 'no'
- subkey:
- description: Return a specific subkey of the password. When set to C(password), always returns the first line.
- default: password
- userpass:
- description: Specify a password to save, instead of a generated one.
- length:
- description: The length of the generated password.
- type: integer
- default: 16
- backup:
- description: Used with C(overwrite=yes). Backup the previous password in a subkey.
- type: bool
- default: 'no'
- nosymbols:
- description: use alphanumeric characters.
- type: bool
- default: 'no'
- missing:
- description:
- - List of preference about what to do if the password file is missing.
- - If I(create=true), the value for this option is ignored and assumed to be C(create).
- - If set to C(error), the lookup will error out if the passname does not exist.
- - If set to C(create), the passname will be created with the provided length I(length) if it does not exist.
- - If set to C(empty) or C(warn), will return a C(none) in case the passname does not exist.
- When using C(lookup) and not C(query), this will be translated to an empty string.
- version_added: 3.1.0
- type: str
- default: error
- choices:
- - error
- - warn
- - empty
- - create
- lock:
- description:
- - How to synchronize operations.
- - The default of C(write) only synchronizes write operations.
- - C(readwrite) synchronizes all operations (including read). This makes sure that gpg-agent is never called in parallel.
- - C(none) does not do any synchronization.
- ini:
- - section: passwordstore_lookup
- key: lock
- type: str
- default: write
- choices:
- - readwrite
- - write
- - none
- version_added: 4.5.0
- locktimeout:
- description:
- - Lock timeout applied when I(lock) is not C(none).
- - Time with a unit suffix, C(s), C(m), C(h) for seconds, minutes, and hours, respectively. For example, C(900s) equals C(15m).
- - Correlates with C(pinentry-timeout) in C(~/.gnupg/gpg-agent.conf), see C(man gpg-agent) for details.
- ini:
- - section: passwordstore_lookup
- key: locktimeout
- type: str
- default: 15m
- version_added: 4.5.0
-'''
-EXAMPLES = """
-ansible.cfg: |
- [passwordstore_lookup]
- lock=readwrite
- locktimeout=45s
-
-playbook.yml: |
- ---
-
- # Debug is used for examples, BAD IDEA to show passwords on screen
- - name: Basic lookup. Fails if example/test does not exist
- ansible.builtin.debug:
- msg: "{{ lookup('community.general.passwordstore', 'example/test')}}"
-
- - name: Basic lookup. Warns if example/test does not exist and returns empty string
- ansible.builtin.debug:
- msg: "{{ lookup('community.general.passwordstore', 'example/test missing=warn')}}"
-
- - name: Create pass with random 16 character password. If password exists just give the password
- ansible.builtin.debug:
- var: mypassword
- vars:
- mypassword: "{{ lookup('community.general.passwordstore', 'example/test create=true')}}"
-
- - name: Create pass with random 16 character password. If password exists just give the password
- ansible.builtin.debug:
- var: mypassword
- vars:
- mypassword: "{{ lookup('community.general.passwordstore', 'example/test missing=create')}}"
-
- - name: Prints 'abc' if example/test does not exist, just give the password otherwise
- ansible.builtin.debug:
- var: mypassword
- vars:
- mypassword: "{{ lookup('community.general.passwordstore', 'example/test missing=empty') | default('abc', true) }}"
-
- - name: Different size password
- ansible.builtin.debug:
- msg: "{{ lookup('community.general.passwordstore', 'example/test create=true length=42')}}"
-
- - name: Create password and overwrite the password if it exists. As a bonus, this module includes the old password inside the pass file
- ansible.builtin.debug:
- msg: "{{ lookup('community.general.passwordstore', 'example/test create=true overwrite=true')}}"
-
- - name: Create an alphanumeric password
- ansible.builtin.debug:
- msg: "{{ lookup('community.general.passwordstore', 'example/test create=true nosymbols=true') }}"
-
- - name: Return the value for user in the KV pair user, username
- ansible.builtin.debug:
- msg: "{{ lookup('community.general.passwordstore', 'example/test subkey=user')}}"
-
- - name: Return the entire password file content
- ansible.builtin.set_fact:
- passfilecontent: "{{ lookup('community.general.passwordstore', 'example/test returnall=true')}}"
-"""
-
-RETURN = """
-_raw:
- description:
- - a password
- type: list
- elements: str
-"""
-
-from contextlib import contextmanager
-import os
-import re
-import subprocess
-import time
-import yaml
-
-from ansible.errors import AnsibleError, AnsibleAssertionError
-from ansible.module_utils.common.file import FileLock
-from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
-from ansible.module_utils.parsing.convert_bool import boolean
-from ansible.utils.display import Display
-from ansible.utils.encrypt import random_password
-from ansible.plugins.lookup import LookupBase
-from ansible import constants as C
-
-display = Display()
-
-
-# backhacked check_output with input for python 2.7
-# http://stackoverflow.com/questions/10103551/passing-data-to-subprocess-check-output
-# note: contains special logic for calling 'pass', so not a drop-in replacement for check_output
-def check_output2(*popenargs, **kwargs):
- if 'stdout' in kwargs:
- raise ValueError('stdout argument not allowed, it will be overridden.')
- if 'stderr' in kwargs:
- raise ValueError('stderr argument not allowed, it will be overridden.')
- if 'input' in kwargs:
- if 'stdin' in kwargs:
- raise ValueError('stdin and input arguments may not both be used.')
- b_inputdata = to_bytes(kwargs['input'], errors='surrogate_or_strict')
- del kwargs['input']
- kwargs['stdin'] = subprocess.PIPE
- else:
- b_inputdata = None
- process = subprocess.Popen(*popenargs, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs)
- try:
- b_out, b_err = process.communicate(b_inputdata)
- except Exception:
- process.kill()
- process.wait()
- raise
- retcode = process.poll()
- if retcode == 0 and (b'encryption failed: Unusable public key' in b_out or
- b'encryption failed: Unusable public key' in b_err):
- retcode = 78 # os.EX_CONFIG
- if retcode != 0:
- cmd = kwargs.get("args")
- if cmd is None:
- cmd = popenargs[0]
- raise subprocess.CalledProcessError(
- retcode,
- cmd,
- to_native(b_out + b_err, errors='surrogate_or_strict')
- )
- return b_out
-
-
-class LookupModule(LookupBase):
- def parse_params(self, term):
- # I went with the "traditional" param followed with space separated KV pairs.
- # Waiting for final implementation of lookup parameter parsing.
- # See: https://github.com/ansible/ansible/issues/12255
- params = term.split()
- if len(params) > 0:
- # the first param is the pass-name
- self.passname = params[0]
- # next parse the optional parameters in keyvalue pairs
- try:
- for param in params[1:]:
- name, value = param.split('=', 1)
- if name not in self.paramvals:
- raise AnsibleAssertionError('%s not in paramvals' % name)
- self.paramvals[name] = value
- except (ValueError, AssertionError) as e:
- raise AnsibleError(e)
- # check and convert values
- try:
- for key in ['create', 'returnall', 'overwrite', 'backup', 'nosymbols']:
- if not isinstance(self.paramvals[key], bool):
- self.paramvals[key] = boolean(self.paramvals[key])
- except (ValueError, AssertionError) as e:
- raise AnsibleError(e)
- if self.paramvals['missing'] not in ['error', 'warn', 'create', 'empty']:
- raise AnsibleError("{0} is not a valid option for missing".format(self.paramvals['missing']))
- if not isinstance(self.paramvals['length'], int):
- if self.paramvals['length'].isdigit():
- self.paramvals['length'] = int(self.paramvals['length'])
- else:
- raise AnsibleError("{0} is not a correct value for length".format(self.paramvals['length']))
-
- if self.paramvals['create']:
- self.paramvals['missing'] = 'create'
-
- # Collect pass environment variables from the plugin's parameters.
- self.env = os.environ.copy()
- self.env['LANGUAGE'] = 'C' # make sure to get errors in English as required by check_output2
-
- # Set PASSWORD_STORE_DIR
- if os.path.isdir(self.paramvals['directory']):
- self.env['PASSWORD_STORE_DIR'] = self.paramvals['directory']
- else:
- raise AnsibleError('Passwordstore directory \'{0}\' does not exist'.format(self.paramvals['directory']))
-
- # Set PASSWORD_STORE_UMASK if umask is set
- if 'umask' in self.paramvals:
- if len(self.paramvals['umask']) != 3:
- raise AnsibleError('Passwordstore umask must have a length of 3.')
- elif int(self.paramvals['umask'][0]) > 3:
- raise AnsibleError('Passwordstore umask not allowed (password not user readable).')
- else:
- self.env['PASSWORD_STORE_UMASK'] = self.paramvals['umask']
-
- def check_pass(self):
- try:
- self.passoutput = to_text(
- check_output2(["pass", "show", self.passname], env=self.env),
- errors='surrogate_or_strict'
- ).splitlines()
- self.password = self.passoutput[0]
- self.passdict = {}
- try:
- values = yaml.safe_load('\n'.join(self.passoutput[1:]))
- for key, item in values.items():
- self.passdict[key] = item
- except (yaml.YAMLError, AttributeError):
- for line in self.passoutput[1:]:
- if ':' in line:
- name, value = line.split(':', 1)
- self.passdict[name.strip()] = value.strip()
- if os.path.isfile(os.path.join(self.paramvals['directory'], self.passname + ".gpg")):
- # Only accept password as found, if there a .gpg file for it (might be a tree node otherwise)
- return True
- except (subprocess.CalledProcessError) as e:
- # 'not in password store' is the expected error if a password wasn't found
- if 'not in the password store' not in e.output:
- raise AnsibleError(e)
-
- if self.paramvals['missing'] == 'error':
- raise AnsibleError('passwordstore: passname {0} not found and missing=error is set'.format(self.passname))
- elif self.paramvals['missing'] == 'warn':
- display.warning('passwordstore: passname {0} not found'.format(self.passname))
-
- return False
-
- def get_newpass(self):
- if self.paramvals['nosymbols']:
- chars = C.DEFAULT_PASSWORD_CHARS[:62]
- else:
- chars = C.DEFAULT_PASSWORD_CHARS
-
- if self.paramvals['userpass']:
- newpass = self.paramvals['userpass']
- else:
- newpass = random_password(length=self.paramvals['length'], chars=chars)
- return newpass
-
- def update_password(self):
- # generate new password, insert old lines from current result and return new password
- newpass = self.get_newpass()
- datetime = time.strftime("%d/%m/%Y %H:%M:%S")
- msg = newpass + '\n'
- if self.passoutput[1:]:
- msg += '\n'.join(self.passoutput[1:]) + '\n'
- if self.paramvals['backup']:
- msg += "lookup_pass: old password was {0} (Updated on {1})\n".format(self.password, datetime)
- try:
- check_output2(['pass', 'insert', '-f', '-m', self.passname], input=msg, env=self.env)
- except (subprocess.CalledProcessError) as e:
- raise AnsibleError(e)
- return newpass
-
- def generate_password(self):
- # generate new file and insert lookup_pass: Generated by Ansible on {date}
- # use pwgen to generate the password and insert values with pass -m
- newpass = self.get_newpass()
- datetime = time.strftime("%d/%m/%Y %H:%M:%S")
- msg = newpass + '\n' + "lookup_pass: First generated by ansible on {0}\n".format(datetime)
- try:
- check_output2(['pass', 'insert', '-f', '-m', self.passname], input=msg, env=self.env)
- except (subprocess.CalledProcessError) as e:
- raise AnsibleError(e)
- return newpass
-
- def get_passresult(self):
- if self.paramvals['returnall']:
- return os.linesep.join(self.passoutput)
- if self.paramvals['subkey'] == 'password':
- return self.password
- else:
- if self.paramvals['subkey'] in self.passdict:
- return self.passdict[self.paramvals['subkey']]
- else:
- return None
-
- @contextmanager
- def opt_lock(self, type):
- if self.get_option('lock') == type:
- tmpdir = os.environ.get('TMPDIR', '/tmp')
- lockfile = os.path.join(tmpdir, '.passwordstore.lock')
- with FileLock().lock_file(lockfile, tmpdir, self.lock_timeout):
- self.locked = type
- yield
- self.locked = None
- else:
- yield
-
- def setup(self, variables):
- self.locked = None
- timeout = self.get_option('locktimeout')
- if not re.match('^[0-9]+[smh]$', timeout):
- raise AnsibleError("{0} is not a correct value for locktimeout".format(timeout))
- unit_to_seconds = {"s": 1, "m": 60, "h": 3600}
- self.lock_timeout = int(timeout[:-1]) * unit_to_seconds[timeout[-1]]
- self.paramvals = {
- 'subkey': 'password',
- 'directory': variables.get('passwordstore', os.environ.get(
- 'PASSWORD_STORE_DIR',
- os.path.expanduser('~/.password-store'))),
- 'create': False,
- 'returnall': False,
- 'overwrite': False,
- 'nosymbols': False,
- 'userpass': '',
- 'length': 16,
- 'backup': False,
- 'missing': 'error',
- }
-
- def run(self, terms, variables, **kwargs):
- self.setup(variables)
- result = []
-
- for term in terms:
- self.parse_params(term) # parse the input into paramvals
- with self.opt_lock('readwrite'):
- if self.check_pass(): # password exists
- if self.paramvals['overwrite'] and self.paramvals['subkey'] == 'password':
- with self.opt_lock('write'):
- result.append(self.update_password())
- else:
- result.append(self.get_passresult())
- else: # password does not exist
- if self.paramvals['missing'] == 'create':
- with self.opt_lock('write'):
- if self.locked == 'write' and self.check_pass(): # lookup password again if under write lock
- result.append(self.get_passresult())
- else:
- result.append(self.generate_password())
- else:
- result.append(None)
-
- return result
diff --git a/ansible_collections/community/general/plugins/lookup/redis.py b/ansible_collections/community/general/plugins/lookup/redis.py
deleted file mode 100644
index 8de7e04c..00000000
--- a/ansible_collections/community/general/plugins/lookup/redis.py
+++ /dev/null
@@ -1,118 +0,0 @@
-# -*- coding: utf-8 -*-
-# (c) 2012, Jan-Piet Mens
-# (c) 2017 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-DOCUMENTATION = '''
- name: redis
- author:
- - Jan-Piet Mens (@jpmens)
- - Ansible Core Team
- short_description: fetch data from Redis
- description:
- - This lookup returns a list of results from a Redis DB corresponding to a list of items given to it
- requirements:
- - redis (python library https://github.com/andymccurdy/redis-py/)
- options:
- _terms:
- description: list of keys to query
- host:
- description: location of Redis host
- default: '127.0.0.1'
- env:
- - name: ANSIBLE_REDIS_HOST
- ini:
- - section: lookup_redis
- key: host
- port:
- description: port on which Redis is listening on
- default: 6379
- type: int
- env:
- - name: ANSIBLE_REDIS_PORT
- ini:
- - section: lookup_redis
- key: port
- socket:
- description: path to socket on which to query Redis, this option overrides host and port options when set.
- type: path
- env:
- - name: ANSIBLE_REDIS_SOCKET
- ini:
- - section: lookup_redis
- key: socket
-'''
-
-EXAMPLES = """
-- name: query redis for somekey (default or configured settings used)
- ansible.builtin.debug:
- msg: "{{ lookup('community.general.redis', 'somekey') }}"
-
-- name: query redis for list of keys and non-default host and port
- ansible.builtin.debug:
- msg: "{{ lookup('community.general.redis', item, host='myredis.internal.com', port=2121) }}"
- loop: '{{list_of_redis_keys}}'
-
-- name: use list directly
- ansible.builtin.debug:
- msg: "{{ lookup('community.general.redis', 'key1', 'key2', 'key3') }}"
-
-- name: use list directly with a socket
- ansible.builtin.debug:
- msg: "{{ lookup('community.general.redis', 'key1', 'key2', socket='/var/tmp/redis.sock') }}"
-
-"""
-
-RETURN = """
-_raw:
- description: value(s) stored in Redis
- type: list
- elements: str
-"""
-
-import os
-
-HAVE_REDIS = False
-try:
- import redis
- HAVE_REDIS = True
-except ImportError:
- pass
-
-from ansible.module_utils.common.text.converters import to_text
-from ansible.errors import AnsibleError
-from ansible.plugins.lookup import LookupBase
-
-
-class LookupModule(LookupBase):
-
- def run(self, terms, variables, **kwargs):
-
- if not HAVE_REDIS:
- raise AnsibleError("Can't LOOKUP(redis_kv): module redis is not installed")
-
- # get options
- self.set_options(direct=kwargs)
-
- # setup connection
- host = self.get_option('host')
- port = self.get_option('port')
- socket = self.get_option('socket')
- if socket is None:
- conn = redis.Redis(host=host, port=port)
- else:
- conn = redis.Redis(unix_socket_path=socket)
-
- ret = []
- for term in terms:
- try:
- res = conn.get(term)
- if res is None:
- res = ""
- ret.append(to_text(res))
- except Exception as e:
- # connection failed or key not found
- raise AnsibleError('Encountered exception while fetching {0}: {1}'.format(term, e))
- return ret
diff --git a/ansible_collections/community/general/plugins/lookup/tss.py b/ansible_collections/community/general/plugins/lookup/tss.py
deleted file mode 100644
index 880e6e38..00000000
--- a/ansible_collections/community/general/plugins/lookup/tss.py
+++ /dev/null
@@ -1,288 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright: (c) 2020, Adam Migus
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import absolute_import, division, print_function
-
-__metaclass__ = type
-
-DOCUMENTATION = r"""
-name: tss
-author: Adam Migus (@amigus)
-short_description: Get secrets from Thycotic Secret Server
-version_added: 1.0.0
-description:
- - Uses the Thycotic Secret Server Python SDK to get Secrets from Secret
- Server using token authentication with I(username) and I(password) on
- the REST API at I(base_url).
- - When using self-signed certificates the environment variable
- C(REQUESTS_CA_BUNDLE) can be set to a file containing the trusted certificates
- (in C(.pem) format).
- - For example, C(export REQUESTS_CA_BUNDLE='/etc/ssl/certs/ca-bundle.trust.crt').
-requirements:
- - python-tss-sdk - https://pypi.org/project/python-tss-sdk/
-options:
- _terms:
- description: The integer ID of the secret.
- required: true
- type: int
- base_url:
- description: The base URL of the server, e.g. C(https://localhost/SecretServer).
- env:
- - name: TSS_BASE_URL
- ini:
- - section: tss_lookup
- key: base_url
- required: true
- username:
- description: The username with which to request the OAuth2 Access Grant.
- env:
- - name: TSS_USERNAME
- ini:
- - section: tss_lookup
- key: username
- password:
- description:
- - The password associated with the supplied username.
- - Required when I(token) is not provided.
- env:
- - name: TSS_PASSWORD
- ini:
- - section: tss_lookup
- key: password
- domain:
- default: ""
- description:
- - The domain with which to request the OAuth2 Access Grant.
- - Optional when I(token) is not provided.
- - Requires C(python-tss-sdk) version 1.0.0 or greater.
- env:
- - name: TSS_DOMAIN
- ini:
- - section: tss_lookup
- key: domain
- required: false
- version_added: 3.6.0
- token:
- description:
- - Existing token for Thycotic authorizer.
- - If provided, I(username) and I(password) are not needed.
- - Requires C(python-tss-sdk) version 1.0.0 or greater.
- env:
- - name: TSS_TOKEN
- ini:
- - section: tss_lookup
- key: token
- version_added: 3.7.0
- api_path_uri:
- default: /api/v1
- description: The path to append to the base URL to form a valid REST
- API request.
- env:
- - name: TSS_API_PATH_URI
- required: false
- token_path_uri:
- default: /oauth2/token
- description: The path to append to the base URL to form a valid OAuth2
- Access Grant request.
- env:
- - name: TSS_TOKEN_PATH_URI
- required: false
-"""
-
-RETURN = r"""
-_list:
- description:
- - The JSON responses to C(GET /secrets/{id}).
- - See U(https://updates.thycotic.net/secretserver/restapiguide/TokenAuth/#operation--secrets--id--get).
- type: list
- elements: dict
-"""
-
-EXAMPLES = r"""
-- hosts: localhost
- vars:
- secret: >-
- {{
- lookup(
- 'community.general.tss',
- 102,
- base_url='https://secretserver.domain.com/SecretServer/',
- username='user.name',
- password='password'
- )
- }}
- tasks:
- - ansible.builtin.debug:
- msg: >
- the password is {{
- (secret['items']
- | items2dict(key_name='slug',
- value_name='itemValue'))['password']
- }}
-
-- hosts: localhost
- vars:
- secret: >-
- {{
- lookup(
- 'community.general.tss',
- 102,
- base_url='https://secretserver.domain.com/SecretServer/',
- username='user.name',
- password='password',
- domain='domain'
- )
- }}
- tasks:
- - ansible.builtin.debug:
- msg: >
- the password is {{
- (secret['items']
- | items2dict(key_name='slug',
- value_name='itemValue'))['password']
- }}
-
-- hosts: localhost
- vars:
- secret_password: >-
- {{
- ((lookup(
- 'community.general.tss',
- 102,
- base_url='https://secretserver.domain.com/SecretServer/',
- token='thycotic_access_token',
- ) | from_json).get('items') | items2dict(key_name='slug', value_name='itemValue'))['password']
- }}
- tasks:
- - ansible.builtin.debug:
- msg: the password is {{ secret_password }}
-"""
-
-import abc
-
-from ansible.errors import AnsibleError, AnsibleOptionsError
-from ansible.module_utils import six
-from ansible.plugins.lookup import LookupBase
-from ansible.utils.display import Display
-
-try:
- from thycotic.secrets.server import SecretServer, SecretServerError
-
- HAS_TSS_SDK = True
-except ImportError:
- SecretServer = None
- SecretServerError = None
- HAS_TSS_SDK = False
-
-try:
- from thycotic.secrets.server import PasswordGrantAuthorizer, DomainPasswordGrantAuthorizer, AccessTokenAuthorizer
-
- HAS_TSS_AUTHORIZER = True
-except ImportError:
- PasswordGrantAuthorizer = None
- DomainPasswordGrantAuthorizer = None
- AccessTokenAuthorizer = None
- HAS_TSS_AUTHORIZER = False
-
-
-display = Display()
-
-
-@six.add_metaclass(abc.ABCMeta)
-class TSSClient(object):
- def __init__(self):
- self._client = None
-
- @staticmethod
- def from_params(**server_parameters):
- if HAS_TSS_AUTHORIZER:
- return TSSClientV1(**server_parameters)
- else:
- return TSSClientV0(**server_parameters)
-
- def get_secret(self, term):
- display.debug("tss_lookup term: %s" % term)
-
- secret_id = self._term_to_secret_id(term)
- display.vvv(u"Secret Server lookup of Secret with ID %d" % secret_id)
-
- return self._client.get_secret_json(secret_id)
-
- @staticmethod
- def _term_to_secret_id(term):
- try:
- return int(term)
- except ValueError:
- raise AnsibleOptionsError("Secret ID must be an integer")
-
-
-class TSSClientV0(TSSClient):
- def __init__(self, **server_parameters):
- super(TSSClientV0, self).__init__()
-
- if server_parameters.get("domain"):
- raise AnsibleError("The 'domain' option requires 'python-tss-sdk' version 1.0.0 or greater")
-
- self._client = SecretServer(
- server_parameters["base_url"],
- server_parameters["username"],
- server_parameters["password"],
- server_parameters["api_path_uri"],
- server_parameters["token_path_uri"],
- )
-
-
-class TSSClientV1(TSSClient):
- def __init__(self, **server_parameters):
- super(TSSClientV1, self).__init__()
-
- authorizer = self._get_authorizer(**server_parameters)
- self._client = SecretServer(
- server_parameters["base_url"], authorizer, server_parameters["api_path_uri"]
- )
-
- @staticmethod
- def _get_authorizer(**server_parameters):
- if server_parameters.get("token"):
- return AccessTokenAuthorizer(
- server_parameters["token"],
- )
-
- if server_parameters.get("domain"):
- return DomainPasswordGrantAuthorizer(
- server_parameters["base_url"],
- server_parameters["username"],
- server_parameters["domain"],
- server_parameters["password"],
- server_parameters["token_path_uri"],
- )
-
- return PasswordGrantAuthorizer(
- server_parameters["base_url"],
- server_parameters["username"],
- server_parameters["password"],
- server_parameters["token_path_uri"],
- )
-
-
-class LookupModule(LookupBase):
- def run(self, terms, variables, **kwargs):
- if not HAS_TSS_SDK:
- raise AnsibleError("python-tss-sdk must be installed to use this plugin")
-
- self.set_options(var_options=variables, direct=kwargs)
-
- tss = TSSClient.from_params(
- base_url=self.get_option("base_url"),
- username=self.get_option("username"),
- password=self.get_option("password"),
- domain=self.get_option("domain"),
- token=self.get_option("token"),
- api_path_uri=self.get_option("api_path_uri"),
- token_path_uri=self.get_option("token_path_uri"),
- )
-
- try:
- return [tss.get_secret(term) for term in terms]
- except SecretServerError as error:
- raise AnsibleError("Secret Server lookup failure: %s" % error.message)
diff --git a/ansible_collections/community/general/plugins/module_utils/_mount.py b/ansible_collections/community/general/plugins/module_utils/_mount.py
deleted file mode 100644
index 391d4681..00000000
--- a/ansible_collections/community/general/plugins/module_utils/_mount.py
+++ /dev/null
@@ -1,95 +0,0 @@
-# -*- coding: utf-8 -*-
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is based on
-# Lib/posixpath.py of cpython
-# It is licensed under the PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
-#
-# 1. This LICENSE AGREEMENT is between the Python Software Foundation
-# ("PSF"), and the Individual or Organization ("Licensee") accessing and
-# otherwise using this software ("Python") in source or binary form and
-# its associated documentation.
-#
-# 2. Subject to the terms and conditions of this License Agreement, PSF hereby
-# grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
-# analyze, test, perform and/or display publicly, prepare derivative works,
-# distribute, and otherwise use Python alone or in any derivative version,
-# provided, however, that PSF's License Agreement and PSF's notice of copyright,
-# i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
-# 2011, 2012, 2013, 2014, 2015 Python Software Foundation; All Rights Reserved"
-# are retained in Python alone or in any derivative version prepared by Licensee.
-#
-# 3. In the event Licensee prepares a derivative work that is based on
-# or incorporates Python or any part thereof, and wants to make
-# the derivative work available to others as provided herein, then
-# Licensee hereby agrees to include in any such work a brief summary of
-# the changes made to Python.
-#
-# 4. PSF is making Python available to Licensee on an "AS IS"
-# basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
-# IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
-# DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
-# FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
-# INFRINGE ANY THIRD PARTY RIGHTS.
-#
-# 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
-# FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
-# A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
-# OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
-#
-# 6. This License Agreement will automatically terminate upon a material
-# breach of its terms and conditions.
-#
-# 7. Nothing in this License Agreement shall be deemed to create any
-# relationship of agency, partnership, or joint venture between PSF and
-# Licensee. This License Agreement does not grant permission to use PSF
-# trademarks or trade name in a trademark sense to endorse or promote
-# products or services of Licensee, or any third party.
-#
-# 8. By copying, installing or otherwise using Python, Licensee
-# agrees to be bound by the terms and conditions of this License
-# Agreement.
-
-from __future__ import absolute_import, division, print_function
-
-__metaclass__ = type
-
-import os
-
-
-def ismount(path):
- """Test whether a path is a mount point
- This is a copy of the upstream version of ismount(). Originally this was copied here as a workaround
- until Python issue 2466 was fixed. Now it is here so this will work on older versions of Python
- that may not have the upstream fix.
- https://github.com/ansible/ansible-modules-core/issues/2186
- http://bugs.python.org/issue2466
- """
- try:
- s1 = os.lstat(path)
- except (OSError, ValueError):
- # It doesn't exist -- so not a mount point. :-)
- return False
- else:
- # A symlink can never be a mount point
- if os.path.stat.S_ISLNK(s1.st_mode):
- return False
-
- if isinstance(path, bytes):
- parent = os.path.join(path, b'..')
- else:
- parent = os.path.join(path, '..')
- parent = os.path.realpath(parent)
- try:
- s2 = os.lstat(parent)
- except (OSError, ValueError):
- return False
-
- dev1 = s1.st_dev
- dev2 = s2.st_dev
- if dev1 != dev2:
- return True # path/.. on a different device as path
- ino1 = s1.st_ino
- ino2 = s2.st_ino
- if ino1 == ino2:
- return True # path/.. is the same i-node as path
- return False
diff --git a/ansible_collections/community/general/plugins/module_utils/dimensiondata.py b/ansible_collections/community/general/plugins/module_utils/dimensiondata.py
deleted file mode 100644
index bcb02e84..00000000
--- a/ansible_collections/community/general/plugins/module_utils/dimensiondata.py
+++ /dev/null
@@ -1,330 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright (c) 2016 Dimension Data
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-#
-# Authors:
-# - Aimon Bustardo
-# - Mark Maglana
-# - Adam Friedman
-#
-# Common functionality to be used by various module components
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-import os
-import re
-import traceback
-
-from ansible.module_utils.basic import AnsibleModule, missing_required_lib
-from ansible.module_utils.six.moves import configparser
-from os.path import expanduser
-from uuid import UUID
-
-LIBCLOUD_IMP_ERR = None
-try:
- from libcloud.common.dimensiondata import API_ENDPOINTS, DimensionDataAPIException, DimensionDataStatus
- from libcloud.compute.base import Node, NodeLocation
- from libcloud.compute.providers import get_driver
- from libcloud.compute.types import Provider
-
- import libcloud.security
-
- HAS_LIBCLOUD = True
-except ImportError:
- LIBCLOUD_IMP_ERR = traceback.format_exc()
- HAS_LIBCLOUD = False
-
-# MCP 2.x version patten for location (datacenter) names.
-#
-# Note that this is not a totally reliable way of determining MCP version.
-# Unfortunately, libcloud's NodeLocation currently makes no provision for extended properties.
-# At some point we may therefore want to either enhance libcloud or enable overriding mcp_version
-# by specifying it in the module parameters.
-MCP_2_LOCATION_NAME_PATTERN = re.compile(r".*MCP\s?2.*")
-
-
-class DimensionDataModule(object):
- """
- The base class containing common functionality used by Dimension Data modules for Ansible.
- """
-
- def __init__(self, module):
- """
- Create a new DimensionDataModule.
-
- Will fail if Apache libcloud is not present.
-
- :param module: The underlying Ansible module.
- :type module: AnsibleModule
- """
-
- self.module = module
-
- if not HAS_LIBCLOUD:
- self.module.fail_json(msg=missing_required_lib('libcloud'), exception=LIBCLOUD_IMP_ERR)
-
- # Credentials are common to all Dimension Data modules.
- credentials = self.get_credentials()
- self.user_id = credentials['user_id']
- self.key = credentials['key']
-
- # Region and location are common to all Dimension Data modules.
- region = self.module.params['region']
- self.region = 'dd-{0}'.format(region)
- self.location = self.module.params['location']
-
- libcloud.security.VERIFY_SSL_CERT = self.module.params['validate_certs']
-
- self.driver = get_driver(Provider.DIMENSIONDATA)(
- self.user_id,
- self.key,
- region=self.region
- )
-
- # Determine the MCP API version (this depends on the target datacenter).
- self.mcp_version = self.get_mcp_version(self.location)
-
- # Optional "wait-for-completion" arguments
- if 'wait' in self.module.params:
- self.wait = self.module.params['wait']
- self.wait_time = self.module.params['wait_time']
- self.wait_poll_interval = self.module.params['wait_poll_interval']
- else:
- self.wait = False
- self.wait_time = 0
- self.wait_poll_interval = 0
-
- def get_credentials(self):
- """
- Get user_id and key from module configuration, environment, or dotfile.
- Order of priority is module, environment, dotfile.
-
- To set in environment:
-
- export MCP_USER='myusername'
- export MCP_PASSWORD='mypassword'
-
- To set in dot file place a file at ~/.dimensiondata with
- the following contents:
-
- [dimensiondatacloud]
- MCP_USER: myusername
- MCP_PASSWORD: mypassword
- """
-
- if not HAS_LIBCLOUD:
- self.module.fail_json(msg='libcloud is required for this module.')
-
- user_id = None
- key = None
-
- # First, try the module configuration
- if 'mcp_user' in self.module.params:
- if 'mcp_password' not in self.module.params:
- self.module.fail_json(
- msg='"mcp_user" parameter was specified, but not "mcp_password" (either both must be specified, or neither).'
- )
-
- user_id = self.module.params['mcp_user']
- key = self.module.params['mcp_password']
-
- # Fall back to environment
- if not user_id or not key:
- user_id = os.environ.get('MCP_USER', None)
- key = os.environ.get('MCP_PASSWORD', None)
-
- # Finally, try dotfile (~/.dimensiondata)
- if not user_id or not key:
- home = expanduser('~')
- config = configparser.RawConfigParser()
- config.read("%s/.dimensiondata" % home)
-
- try:
- user_id = config.get("dimensiondatacloud", "MCP_USER")
- key = config.get("dimensiondatacloud", "MCP_PASSWORD")
- except (configparser.NoSectionError, configparser.NoOptionError):
- pass
-
- # One or more credentials not found. Function can't recover from this
- # so it has to raise an error instead of fail silently.
- if not user_id:
- raise MissingCredentialsError("Dimension Data user id not found")
- elif not key:
- raise MissingCredentialsError("Dimension Data key not found")
-
- # Both found, return data
- return dict(user_id=user_id, key=key)
-
- def get_mcp_version(self, location):
- """
- Get the MCP version for the specified location.
- """
-
- location = self.driver.ex_get_location_by_id(location)
- if MCP_2_LOCATION_NAME_PATTERN.match(location.name):
- return '2.0'
-
- return '1.0'
-
- def get_network_domain(self, locator, location):
- """
- Retrieve a network domain by its name or Id.
- """
-
- if is_uuid(locator):
- network_domain = self.driver.ex_get_network_domain(locator)
- else:
- matching_network_domains = [
- network_domain for network_domain in self.driver.ex_list_network_domains(location=location)
- if network_domain.name == locator
- ]
-
- if matching_network_domains:
- network_domain = matching_network_domains[0]
- else:
- network_domain = None
-
- if network_domain:
- return network_domain
-
- raise UnknownNetworkError("Network '%s' could not be found" % locator)
-
- def get_vlan(self, locator, location, network_domain):
- """
- Get a VLAN object by its name or id
- """
- if is_uuid(locator):
- vlan = self.driver.ex_get_vlan(locator)
- else:
- matching_vlans = [
- vlan for vlan in self.driver.ex_list_vlans(location, network_domain)
- if vlan.name == locator
- ]
-
- if matching_vlans:
- vlan = matching_vlans[0]
- else:
- vlan = None
-
- if vlan:
- return vlan
-
- raise UnknownVLANError("VLAN '%s' could not be found" % locator)
-
- @staticmethod
- def argument_spec(**additional_argument_spec):
- """
- Build an argument specification for a Dimension Data module.
- :param additional_argument_spec: An optional dictionary representing the specification for additional module arguments (if any).
- :return: A dict containing the argument specification.
- """
-
- spec = dict(
- region=dict(type='str', default='na'),
- mcp_user=dict(type='str', required=False),
- mcp_password=dict(type='str', required=False, no_log=True),
- location=dict(type='str', required=True),
- validate_certs=dict(type='bool', required=False, default=True)
- )
-
- if additional_argument_spec:
- spec.update(additional_argument_spec)
-
- return spec
-
- @staticmethod
- def argument_spec_with_wait(**additional_argument_spec):
- """
- Build an argument specification for a Dimension Data module that includes "wait for completion" arguments.
- :param additional_argument_spec: An optional dictionary representing the specification for additional module arguments (if any).
- :return: A dict containing the argument specification.
- """
-
- spec = DimensionDataModule.argument_spec(
- wait=dict(type='bool', required=False, default=False),
- wait_time=dict(type='int', required=False, default=600),
- wait_poll_interval=dict(type='int', required=False, default=2)
- )
-
- if additional_argument_spec:
- spec.update(additional_argument_spec)
-
- return spec
-
- @staticmethod
- def required_together(*additional_required_together):
- """
- Get the basic argument specification for Dimension Data modules indicating which arguments are must be specified together.
- :param additional_required_together: An optional list representing the specification for additional module arguments that must be specified together.
- :return: An array containing the argument specifications.
- """
-
- required_together = [
- ['mcp_user', 'mcp_password']
- ]
-
- if additional_required_together:
- required_together.extend(additional_required_together)
-
- return required_together
-
-
-class LibcloudNotFound(Exception):
- """
- Exception raised when Apache libcloud cannot be found.
- """
-
- pass
-
-
-class MissingCredentialsError(Exception):
- """
- Exception raised when credentials for Dimension Data CloudControl cannot be found.
- """
-
- pass
-
-
-class UnknownNetworkError(Exception):
- """
- Exception raised when a network or network domain cannot be found.
- """
-
- pass
-
-
-class UnknownVLANError(Exception):
- """
- Exception raised when a VLAN cannot be found.
- """
-
- pass
-
-
-def get_dd_regions():
- """
- Get the list of available regions whose vendor is Dimension Data.
- """
-
- # Get endpoints
- all_regions = API_ENDPOINTS.keys()
-
- # Only Dimension Data endpoints (no prefix)
- regions = [region[3:] for region in all_regions if region.startswith('dd-')]
-
- return regions
-
-
-def is_uuid(u, version=4):
- """
- Test if valid v4 UUID
- """
- try:
- uuid_obj = UUID(u, version=version)
-
- return str(uuid_obj) == u
- except ValueError:
- return False
diff --git a/ansible_collections/community/general/plugins/module_utils/gitlab.py b/ansible_collections/community/general/plugins/module_utils/gitlab.py
deleted file mode 100644
index 21af10b5..00000000
--- a/ansible_collections/community/general/plugins/module_utils/gitlab.py
+++ /dev/null
@@ -1,104 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr)
-# Copyright: (c) 2018, Marcus Watkins
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-from ansible.module_utils.basic import missing_required_lib
-from ansible.module_utils.common.text.converters import to_native
-
-from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
-
-try:
- from urllib import quote_plus # Python 2.X
- from urlparse import urljoin
-except ImportError:
- from urllib.parse import quote_plus, urljoin # Python 3+
-
-import traceback
-
-GITLAB_IMP_ERR = None
-try:
- import gitlab
- import requests
- HAS_GITLAB_PACKAGE = True
-except Exception:
- GITLAB_IMP_ERR = traceback.format_exc()
- HAS_GITLAB_PACKAGE = False
-
-
-def auth_argument_spec(spec=None):
- arg_spec = (dict(
- api_token=dict(type='str', no_log=True),
- api_oauth_token=dict(type='str', no_log=True),
- api_job_token=dict(type='str', no_log=True),
- ))
- if spec:
- arg_spec.update(spec)
- return arg_spec
-
-
-def find_project(gitlab_instance, identifier):
- try:
- project = gitlab_instance.projects.get(identifier)
- except Exception as e:
- current_user = gitlab_instance.user
- try:
- project = gitlab_instance.projects.get(current_user.username + '/' + identifier)
- except Exception as e:
- return None
-
- return project
-
-
-def find_group(gitlab_instance, identifier):
- try:
- project = gitlab_instance.groups.get(identifier)
- except Exception as e:
- return None
-
- return project
-
-
-def gitlab_authentication(module):
- gitlab_url = module.params['api_url']
- validate_certs = module.params['validate_certs']
- gitlab_user = module.params['api_username']
- gitlab_password = module.params['api_password']
- gitlab_token = module.params['api_token']
- gitlab_oauth_token = module.params['api_oauth_token']
- gitlab_job_token = module.params['api_job_token']
-
- if not HAS_GITLAB_PACKAGE:
- module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR)
-
- try:
- # python-gitlab library remove support for username/password authentication since 1.13.0
- # Changelog : https://github.com/python-gitlab/python-gitlab/releases/tag/v1.13.0
- # This condition allow to still support older version of the python-gitlab library
- if LooseVersion(gitlab.__version__) < LooseVersion("1.13.0"):
- gitlab_instance = gitlab.Gitlab(url=gitlab_url, ssl_verify=validate_certs, email=gitlab_user, password=gitlab_password,
- private_token=gitlab_token, api_version=4)
- else:
- # We can create an oauth_token using a username and password
- # https://docs.gitlab.com/ee/api/oauth2.html#authorization-code-flow
- if gitlab_user:
- data = {'grant_type': 'password', 'username': gitlab_user, 'password': gitlab_password}
- resp = requests.post(urljoin(gitlab_url, "oauth/token"), data=data, verify=validate_certs)
- resp_data = resp.json()
- gitlab_oauth_token = resp_data["access_token"]
-
- gitlab_instance = gitlab.Gitlab(url=gitlab_url, ssl_verify=validate_certs, private_token=gitlab_token,
- oauth_token=gitlab_oauth_token, job_token=gitlab_job_token, api_version=4)
-
- gitlab_instance.auth()
- except (gitlab.exceptions.GitlabAuthenticationError, gitlab.exceptions.GitlabGetError) as e:
- module.fail_json(msg="Failed to connect to GitLab server: %s" % to_native(e))
- except (gitlab.exceptions.GitlabHttpError) as e:
- module.fail_json(msg="Failed to connect to GitLab server: %s. \
- GitLab remove Session API now that private tokens are removed from user API endpoints since version 10.2." % to_native(e))
-
- return gitlab_instance
diff --git a/ansible_collections/community/general/plugins/module_utils/identity/keycloak/keycloak.py b/ansible_collections/community/general/plugins/module_utils/identity/keycloak/keycloak.py
deleted file mode 100644
index a856901b..00000000
--- a/ansible_collections/community/general/plugins/module_utils/identity/keycloak/keycloak.py
+++ /dev/null
@@ -1,1740 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright (c) 2017, Eike Frost
-#
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by Ansible
-# still belong to the author of the module, and may assign their own license
-# to the complete work.
-#
-# Redistribution and use in source and binary forms, with or without modification,
-# are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-from __future__ import absolute_import, division, print_function
-
-__metaclass__ = type
-
-import json
-import traceback
-
-from ansible.module_utils.urls import open_url
-from ansible.module_utils.six.moves.urllib.parse import urlencode, quote
-from ansible.module_utils.six.moves.urllib.error import HTTPError
-from ansible.module_utils.common.text.converters import to_native, to_text
-
-URL_REALM_INFO = "{url}/realms/{realm}"
-URL_REALMS = "{url}/admin/realms"
-URL_REALM = "{url}/admin/realms/{realm}"
-
-URL_TOKEN = "{url}/realms/{realm}/protocol/openid-connect/token"
-URL_CLIENT = "{url}/admin/realms/{realm}/clients/{id}"
-URL_CLIENTS = "{url}/admin/realms/{realm}/clients"
-
-URL_CLIENT_ROLES = "{url}/admin/realms/{realm}/clients/{id}/roles"
-URL_CLIENT_ROLE = "{url}/admin/realms/{realm}/clients/{id}/roles/{name}"
-URL_CLIENT_ROLE_COMPOSITES = "{url}/admin/realms/{realm}/clients/{id}/roles/{name}/composites"
-
-URL_REALM_ROLES = "{url}/admin/realms/{realm}/roles"
-URL_REALM_ROLE = "{url}/admin/realms/{realm}/roles/{name}"
-URL_REALM_ROLE_COMPOSITES = "{url}/admin/realms/{realm}/roles/{name}/composites"
-
-URL_CLIENTTEMPLATE = "{url}/admin/realms/{realm}/client-templates/{id}"
-URL_CLIENTTEMPLATES = "{url}/admin/realms/{realm}/client-templates"
-URL_GROUPS = "{url}/admin/realms/{realm}/groups"
-URL_GROUP = "{url}/admin/realms/{realm}/groups/{groupid}"
-
-URL_CLIENTSCOPES = "{url}/admin/realms/{realm}/client-scopes"
-URL_CLIENTSCOPE = "{url}/admin/realms/{realm}/client-scopes/{id}"
-URL_CLIENTSCOPE_PROTOCOLMAPPERS = "{url}/admin/realms/{realm}/client-scopes/{id}/protocol-mappers/models"
-URL_CLIENTSCOPE_PROTOCOLMAPPER = "{url}/admin/realms/{realm}/client-scopes/{id}/protocol-mappers/models/{mapper_id}"
-
-URL_CLIENT_ROLEMAPPINGS = "{url}/admin/realms/{realm}/groups/{id}/role-mappings/clients/{client}"
-URL_CLIENT_ROLEMAPPINGS_AVAILABLE = "{url}/admin/realms/{realm}/groups/{id}/role-mappings/clients/{client}/available"
-URL_CLIENT_ROLEMAPPINGS_COMPOSITE = "{url}/admin/realms/{realm}/groups/{id}/role-mappings/clients/{client}/composite"
-
-URL_AUTHENTICATION_FLOWS = "{url}/admin/realms/{realm}/authentication/flows"
-URL_AUTHENTICATION_FLOW = "{url}/admin/realms/{realm}/authentication/flows/{id}"
-URL_AUTHENTICATION_FLOW_COPY = "{url}/admin/realms/{realm}/authentication/flows/{copyfrom}/copy"
-URL_AUTHENTICATION_FLOW_EXECUTIONS = "{url}/admin/realms/{realm}/authentication/flows/{flowalias}/executions"
-URL_AUTHENTICATION_FLOW_EXECUTIONS_EXECUTION = "{url}/admin/realms/{realm}/authentication/flows/{flowalias}/executions/execution"
-URL_AUTHENTICATION_FLOW_EXECUTIONS_FLOW = "{url}/admin/realms/{realm}/authentication/flows/{flowalias}/executions/flow"
-URL_AUTHENTICATION_EXECUTION_CONFIG = "{url}/admin/realms/{realm}/authentication/executions/{id}/config"
-URL_AUTHENTICATION_EXECUTION_RAISE_PRIORITY = "{url}/admin/realms/{realm}/authentication/executions/{id}/raise-priority"
-URL_AUTHENTICATION_EXECUTION_LOWER_PRIORITY = "{url}/admin/realms/{realm}/authentication/executions/{id}/lower-priority"
-URL_AUTHENTICATION_CONFIG = "{url}/admin/realms/{realm}/authentication/config/{id}"
-
-URL_IDENTITY_PROVIDERS = "{url}/admin/realms/{realm}/identity-provider/instances"
-URL_IDENTITY_PROVIDER = "{url}/admin/realms/{realm}/identity-provider/instances/{alias}"
-URL_IDENTITY_PROVIDER_MAPPERS = "{url}/admin/realms/{realm}/identity-provider/instances/{alias}/mappers"
-URL_IDENTITY_PROVIDER_MAPPER = "{url}/admin/realms/{realm}/identity-provider/instances/{alias}/mappers/{id}"
-
-URL_COMPONENTS = "{url}/admin/realms/{realm}/components"
-URL_COMPONENT = "{url}/admin/realms/{realm}/components/{id}"
-
-
-def keycloak_argument_spec():
- """
- Returns argument_spec of options common to keycloak_*-modules
-
- :return: argument_spec dict
- """
- return dict(
- auth_keycloak_url=dict(type='str', aliases=['url'], required=True, no_log=False),
- auth_client_id=dict(type='str', default='admin-cli'),
- auth_realm=dict(type='str'),
- auth_client_secret=dict(type='str', default=None, no_log=True),
- auth_username=dict(type='str', aliases=['username']),
- auth_password=dict(type='str', aliases=['password'], no_log=True),
- validate_certs=dict(type='bool', default=True),
- connection_timeout=dict(type='int', default=10),
- token=dict(type='str', no_log=True),
- )
-
-
-def camel(words):
- return words.split('_')[0] + ''.join(x.capitalize() or '_' for x in words.split('_')[1:])
-
-
-class KeycloakError(Exception):
- pass
-
-
-def get_token(module_params):
- """ Obtains connection header with token for the authentication,
- token already given or obtained from credentials
- :param module_params: parameters of the module
- :return: connection header
- """
- token = module_params.get('token')
- base_url = module_params.get('auth_keycloak_url')
-
- if not base_url.lower().startswith(('http', 'https')):
- raise KeycloakError("auth_url '%s' should either start with 'http' or 'https'." % base_url)
-
- if token is None:
- base_url = module_params.get('auth_keycloak_url')
- validate_certs = module_params.get('validate_certs')
- auth_realm = module_params.get('auth_realm')
- client_id = module_params.get('auth_client_id')
- auth_username = module_params.get('auth_username')
- auth_password = module_params.get('auth_password')
- client_secret = module_params.get('auth_client_secret')
- connection_timeout = module_params.get('connection_timeout')
- auth_url = URL_TOKEN.format(url=base_url, realm=auth_realm)
- temp_payload = {
- 'grant_type': 'password',
- 'client_id': client_id,
- 'client_secret': client_secret,
- 'username': auth_username,
- 'password': auth_password,
- }
- # Remove empty items, for instance missing client_secret
- payload = dict(
- (k, v) for k, v in temp_payload.items() if v is not None)
- try:
- r = json.loads(to_native(open_url(auth_url, method='POST',
- validate_certs=validate_certs, timeout=connection_timeout,
- data=urlencode(payload)).read()))
- except ValueError as e:
- raise KeycloakError(
- 'API returned invalid JSON when trying to obtain access token from %s: %s'
- % (auth_url, str(e)))
- except Exception as e:
- raise KeycloakError('Could not obtain access token from %s: %s'
- % (auth_url, str(e)))
-
- try:
- token = r['access_token']
- except KeyError:
- raise KeycloakError(
- 'Could not obtain access token from %s' % auth_url)
- return {
- 'Authorization': 'Bearer ' + token,
- 'Content-Type': 'application/json'
- }
-
-
-def is_struct_included(struct1, struct2, exclude=None):
- """
- This function compare if the first parameter structure is included in the second.
- The function use every elements of struct1 and validates they are present in the struct2 structure.
- The two structure does not need to be equals for that function to return true.
- Each elements are compared recursively.
- :param struct1:
- type:
- dict for the initial call, can be dict, list, bool, int or str for recursive calls
- description:
- reference structure
- :param struct2:
- type:
- dict for the initial call, can be dict, list, bool, int or str for recursive calls
- description:
- structure to compare with first parameter.
- :param exclude:
- type:
- list
- description:
- Key to exclude from the comparison.
- default: None
- :return:
- type:
- bool
- description:
- Return True if all element of dict 1 are present in dict 2, return false otherwise.
- """
- if isinstance(struct1, list) and isinstance(struct2, list):
- for item1 in struct1:
- if isinstance(item1, (list, dict)):
- for item2 in struct2:
- if not is_struct_included(item1, item2, exclude):
- return False
- else:
- if item1 not in struct2:
- return False
- return True
- elif isinstance(struct1, dict) and isinstance(struct2, dict):
- try:
- for key in struct1:
- if not (exclude and key in exclude):
- if not is_struct_included(struct1[key], struct2[key], exclude):
- return False
- return True
- except KeyError:
- return False
- elif isinstance(struct1, bool) and isinstance(struct2, bool):
- return struct1 == struct2
- else:
- return to_text(struct1, 'utf-8') == to_text(struct2, 'utf-8')
-
-
-class KeycloakAPI(object):
- """ Keycloak API access; Keycloak uses OAuth 2.0 to protect its API, an access token for which
- is obtained through OpenID connect
- """
- def __init__(self, module, connection_header):
- self.module = module
- self.baseurl = self.module.params.get('auth_keycloak_url')
- self.validate_certs = self.module.params.get('validate_certs')
- self.connection_timeout = self.module.params.get('connection_timeout')
- self.restheaders = connection_header
-
- def get_realm_info_by_id(self, realm='master'):
- """ Obtain realm public info by id
-
- :param realm: realm id
- :return: dict of real, representation or None if none matching exist
- """
- realm_info_url = URL_REALM_INFO.format(url=self.baseurl, realm=realm)
-
- try:
- return json.loads(to_native(open_url(realm_info_url, method='GET', headers=self.restheaders, timeout=self.connection_timeout,
- validate_certs=self.validate_certs).read()))
-
- except HTTPError as e:
- if e.code == 404:
- return None
- else:
- self.module.fail_json(msg='Could not obtain realm %s: %s' % (realm, str(e)),
- exception=traceback.format_exc())
- except ValueError as e:
- self.module.fail_json(msg='API returned incorrect JSON when trying to obtain realm %s: %s' % (realm, str(e)),
- exception=traceback.format_exc())
- except Exception as e:
- self.module.fail_json(msg='Could not obtain realm %s: %s' % (realm, str(e)),
- exception=traceback.format_exc())
-
- def get_realm_by_id(self, realm='master'):
- """ Obtain realm representation by id
-
- :param realm: realm id
- :return: dict of real, representation or None if none matching exist
- """
- realm_url = URL_REALM.format(url=self.baseurl, realm=realm)
-
- try:
- return json.loads(to_native(open_url(realm_url, method='GET', headers=self.restheaders, timeout=self.connection_timeout,
- validate_certs=self.validate_certs).read()))
-
- except HTTPError as e:
- if e.code == 404:
- return None
- else:
- self.module.fail_json(msg='Could not obtain realm %s: %s' % (realm, str(e)),
- exception=traceback.format_exc())
- except ValueError as e:
- self.module.fail_json(msg='API returned incorrect JSON when trying to obtain realm %s: %s' % (realm, str(e)),
- exception=traceback.format_exc())
- except Exception as e:
- self.module.fail_json(msg='Could not obtain realm %s: %s' % (realm, str(e)),
- exception=traceback.format_exc())
-
- def update_realm(self, realmrep, realm="master"):
- """ Update an existing realm
- :param realmrep: corresponding (partial/full) realm representation with updates
- :param realm: realm to be updated in Keycloak
- :return: HTTPResponse object on success
- """
- realm_url = URL_REALM.format(url=self.baseurl, realm=realm)
-
- try:
- return open_url(realm_url, method='PUT', headers=self.restheaders, timeout=self.connection_timeout,
- data=json.dumps(realmrep), validate_certs=self.validate_certs)
- except Exception as e:
- self.module.fail_json(msg='Could not update realm %s: %s' % (realm, str(e)),
- exception=traceback.format_exc())
-
- def create_realm(self, realmrep):
- """ Create a realm in keycloak
- :param realmrep: Realm representation of realm to be created.
- :return: HTTPResponse object on success
- """
- realm_url = URL_REALMS.format(url=self.baseurl)
-
- try:
- return open_url(realm_url, method='POST', headers=self.restheaders, timeout=self.connection_timeout,
- data=json.dumps(realmrep), validate_certs=self.validate_certs)
- except Exception as e:
- self.module.fail_json(msg='Could not create realm %s: %s' % (realmrep['id'], str(e)),
- exception=traceback.format_exc())
-
- def delete_realm(self, realm="master"):
- """ Delete a realm from Keycloak
-
- :param realm: realm to be deleted
- :return: HTTPResponse object on success
- """
- realm_url = URL_REALM.format(url=self.baseurl, realm=realm)
-
- try:
- return open_url(realm_url, method='DELETE', headers=self.restheaders, timeout=self.connection_timeout,
- validate_certs=self.validate_certs)
- except Exception as e:
- self.module.fail_json(msg='Could not delete realm %s: %s' % (realm, str(e)),
- exception=traceback.format_exc())
-
- def get_clients(self, realm='master', filter=None):
- """ Obtains client representations for clients in a realm
-
- :param realm: realm to be queried
- :param filter: if defined, only the client with clientId specified in the filter is returned
- :return: list of dicts of client representations
- """
- clientlist_url = URL_CLIENTS.format(url=self.baseurl, realm=realm)
- if filter is not None:
- clientlist_url += '?clientId=%s' % filter
-
- try:
- return json.loads(to_native(open_url(clientlist_url, method='GET', headers=self.restheaders, timeout=self.connection_timeout,
- validate_certs=self.validate_certs).read()))
- except ValueError as e:
- self.module.fail_json(msg='API returned incorrect JSON when trying to obtain list of clients for realm %s: %s'
- % (realm, str(e)))
- except Exception as e:
- self.module.fail_json(msg='Could not obtain list of clients for realm %s: %s'
- % (realm, str(e)))
-
- def get_client_by_clientid(self, client_id, realm='master'):
- """ Get client representation by clientId
- :param client_id: The clientId to be queried
- :param realm: realm from which to obtain the client representation
- :return: dict with a client representation or None if none matching exist
- """
- r = self.get_clients(realm=realm, filter=client_id)
- if len(r) > 0:
- return r[0]
- else:
- return None
-
- def get_client_by_id(self, id, realm='master'):
- """ Obtain client representation by id
-
- :param id: id (not clientId) of client to be queried
- :param realm: client from this realm
- :return: dict of client representation or None if none matching exist
- """
- client_url = URL_CLIENT.format(url=self.baseurl, realm=realm, id=id)
-
- try:
- return json.loads(to_native(open_url(client_url, method='GET', headers=self.restheaders, timeout=self.connection_timeout,
- validate_certs=self.validate_certs).read()))
-
- except HTTPError as e:
- if e.code == 404:
- return None
- else:
- self.module.fail_json(msg='Could not obtain client %s for realm %s: %s'
- % (id, realm, str(e)))
- except ValueError as e:
- self.module.fail_json(msg='API returned incorrect JSON when trying to obtain client %s for realm %s: %s'
- % (id, realm, str(e)))
- except Exception as e:
- self.module.fail_json(msg='Could not obtain client %s for realm %s: %s'
- % (id, realm, str(e)))
-
- def get_client_id(self, client_id, realm='master'):
- """ Obtain id of client by client_id
-
- :param client_id: client_id of client to be queried
- :param realm: client template from this realm
- :return: id of client (usually a UUID)
- """
- result = self.get_client_by_clientid(client_id, realm)
- if isinstance(result, dict) and 'id' in result:
- return result['id']
- else:
- return None
-
- def update_client(self, id, clientrep, realm="master"):
- """ Update an existing client
- :param id: id (not clientId) of client to be updated in Keycloak
- :param clientrep: corresponding (partial/full) client representation with updates
- :param realm: realm the client is in
- :return: HTTPResponse object on success
- """
- client_url = URL_CLIENT.format(url=self.baseurl, realm=realm, id=id)
-
- try:
- return open_url(client_url, method='PUT', headers=self.restheaders, timeout=self.connection_timeout,
- data=json.dumps(clientrep), validate_certs=self.validate_certs)
- except Exception as e:
- self.module.fail_json(msg='Could not update client %s in realm %s: %s'
- % (id, realm, str(e)))
-
- def create_client(self, clientrep, realm="master"):
- """ Create a client in keycloak
- :param clientrep: Client representation of client to be created. Must at least contain field clientId.
- :param realm: realm for client to be created.
- :return: HTTPResponse object on success
- """
- client_url = URL_CLIENTS.format(url=self.baseurl, realm=realm)
-
- try:
- return open_url(client_url, method='POST', headers=self.restheaders, timeout=self.connection_timeout,
- data=json.dumps(clientrep), validate_certs=self.validate_certs)
- except Exception as e:
- self.module.fail_json(msg='Could not create client %s in realm %s: %s'
- % (clientrep['clientId'], realm, str(e)))
-
- def delete_client(self, id, realm="master"):
- """ Delete a client from Keycloak
-
- :param id: id (not clientId) of client to be deleted
- :param realm: realm of client to be deleted
- :return: HTTPResponse object on success
- """
- client_url = URL_CLIENT.format(url=self.baseurl, realm=realm, id=id)
-
- try:
- return open_url(client_url, method='DELETE', headers=self.restheaders, timeout=self.connection_timeout,
- validate_certs=self.validate_certs)
- except Exception as e:
- self.module.fail_json(msg='Could not delete client %s in realm %s: %s'
- % (id, realm, str(e)))
-
- def get_client_roles_by_id(self, cid, realm="master"):
- """ Fetch the roles of the a client on the Keycloak server.
-
- :param cid: ID of the client from which to obtain the rolemappings.
- :param realm: Realm from which to obtain the rolemappings.
- :return: The rollemappings of specified group and client of the realm (default "master").
- """
- client_roles_url = URL_CLIENT_ROLES.format(url=self.baseurl, realm=realm, id=cid)
- try:
- return json.loads(to_native(open_url(client_roles_url, method="GET", headers=self.restheaders, timeout=self.connection_timeout,
- validate_certs=self.validate_certs).read()))
- except Exception as e:
- self.module.fail_json(msg="Could not fetch rolemappings for client %s in realm %s: %s"
- % (cid, realm, str(e)))
-
- def get_client_role_by_name(self, gid, cid, name, realm="master"):
- """ Get the role ID of a client.
-
- :param gid: ID of the group from which to obtain the rolemappings.
- :param cid: ID of the client from which to obtain the rolemappings.
- :param name: Name of the role.
- :param realm: Realm from which to obtain the rolemappings.
- :return: The ID of the role, None if not found.
- """
- rolemappings = self.get_client_roles_by_id(cid, realm=realm)
- for role in rolemappings:
- if name == role['name']:
- return role['id']
- return None
-
- def get_client_rolemapping_by_id(self, gid, cid, rid, realm='master'):
- """ Obtain client representation by id
-
- :param gid: ID of the group from which to obtain the rolemappings.
- :param cid: ID of the client from which to obtain the rolemappings.
- :param rid: ID of the role.
- :param realm: client from this realm
- :return: dict of rolemapping representation or None if none matching exist
- """
- rolemappings_url = URL_CLIENT_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=gid, client=cid)
- try:
- rolemappings = json.loads(to_native(open_url(rolemappings_url, method="GET", headers=self.restheaders, timeout=self.connection_timeout,
- validate_certs=self.validate_certs).read()))
- for role in rolemappings:
- if rid == role['id']:
- return role
- except Exception as e:
- self.module.fail_json(msg="Could not fetch rolemappings for client %s in group %s, realm %s: %s"
- % (cid, gid, realm, str(e)))
- return None
-
- def get_client_available_rolemappings(self, gid, cid, realm="master"):
- """ Fetch the available role of a client in a specified goup on the Keycloak server.
-
- :param gid: ID of the group from which to obtain the rolemappings.
- :param cid: ID of the client from which to obtain the rolemappings.
- :param realm: Realm from which to obtain the rolemappings.
- :return: The rollemappings of specified group and client of the realm (default "master").
- """
- available_rolemappings_url = URL_CLIENT_ROLEMAPPINGS_AVAILABLE.format(url=self.baseurl, realm=realm, id=gid, client=cid)
- try:
- return json.loads(to_native(open_url(available_rolemappings_url, method="GET", headers=self.restheaders, timeout=self.connection_timeout,
- validate_certs=self.validate_certs).read()))
- except Exception as e:
- self.module.fail_json(msg="Could not fetch available rolemappings for client %s in group %s, realm %s: %s"
- % (cid, gid, realm, str(e)))
-
- def get_client_composite_rolemappings(self, gid, cid, realm="master"):
- """ Fetch the composite role of a client in a specified group on the Keycloak server.
-
- :param gid: ID of the group from which to obtain the rolemappings.
- :param cid: ID of the client from which to obtain the rolemappings.
- :param realm: Realm from which to obtain the rolemappings.
- :return: The rollemappings of specified group and client of the realm (default "master").
- """
- available_rolemappings_url = URL_CLIENT_ROLEMAPPINGS_COMPOSITE.format(url=self.baseurl, realm=realm, id=gid, client=cid)
- try:
- return json.loads(to_native(open_url(available_rolemappings_url, method="GET", headers=self.restheaders, timeout=self.connection_timeout,
- validate_certs=self.validate_certs).read()))
- except Exception as e:
- self.module.fail_json(msg="Could not fetch available rolemappings for client %s in group %s, realm %s: %s"
- % (cid, gid, realm, str(e)))
-
- def add_group_rolemapping(self, gid, cid, role_rep, realm="master"):
- """ Fetch the composite role of a client in a specified goup on the Keycloak server.
-
- :param gid: ID of the group from which to obtain the rolemappings.
- :param cid: ID of the client from which to obtain the rolemappings.
- :param role_rep: Representation of the role to assign.
- :param realm: Realm from which to obtain the rolemappings.
- :return: None.
- """
- available_rolemappings_url = URL_CLIENT_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=gid, client=cid)
- try:
- open_url(available_rolemappings_url, method="POST", headers=self.restheaders, data=json.dumps(role_rep),
- validate_certs=self.validate_certs, timeout=self.connection_timeout)
- except Exception as e:
- self.module.fail_json(msg="Could not fetch available rolemappings for client %s in group %s, realm %s: %s"
- % (cid, gid, realm, str(e)))
-
- def delete_group_rolemapping(self, gid, cid, role_rep, realm="master"):
- """ Delete the rolemapping of a client in a specified group on the Keycloak server.
-
- :param gid: ID of the group from which to obtain the rolemappings.
- :param cid: ID of the client from which to obtain the rolemappings.
- :param role_rep: Representation of the role to assign.
- :param realm: Realm from which to obtain the rolemappings.
- :return: None.
- """
- available_rolemappings_url = URL_CLIENT_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=gid, client=cid)
- try:
- open_url(available_rolemappings_url, method="DELETE", headers=self.restheaders,
- validate_certs=self.validate_certs, timeout=self.connection_timeout)
- except Exception as e:
- self.module.fail_json(msg="Could not delete available rolemappings for client %s in group %s, realm %s: %s"
- % (cid, gid, realm, str(e)))
-
- def get_client_templates(self, realm='master'):
- """ Obtains client template representations for client templates in a realm
-
- :param realm: realm to be queried
- :return: list of dicts of client representations
- """
- url = URL_CLIENTTEMPLATES.format(url=self.baseurl, realm=realm)
-
- try:
- return json.loads(to_native(open_url(url, method='GET', headers=self.restheaders, timeout=self.connection_timeout,
- validate_certs=self.validate_certs).read()))
- except ValueError as e:
- self.module.fail_json(msg='API returned incorrect JSON when trying to obtain list of client templates for realm %s: %s'
- % (realm, str(e)))
- except Exception as e:
- self.module.fail_json(msg='Could not obtain list of client templates for realm %s: %s'
- % (realm, str(e)))
-
- def get_client_template_by_id(self, id, realm='master'):
- """ Obtain client template representation by id
-
- :param id: id (not name) of client template to be queried
- :param realm: client template from this realm
- :return: dict of client template representation or None if none matching exist
- """
- url = URL_CLIENTTEMPLATE.format(url=self.baseurl, id=id, realm=realm)
-
- try:
- return json.loads(to_native(open_url(url, method='GET', headers=self.restheaders, timeout=self.connection_timeout,
- validate_certs=self.validate_certs).read()))
- except ValueError as e:
- self.module.fail_json(msg='API returned incorrect JSON when trying to obtain client templates %s for realm %s: %s'
- % (id, realm, str(e)))
- except Exception as e:
- self.module.fail_json(msg='Could not obtain client template %s for realm %s: %s'
- % (id, realm, str(e)))
-
- def get_client_template_by_name(self, name, realm='master'):
- """ Obtain client template representation by name
-
- :param name: name of client template to be queried
- :param realm: client template from this realm
- :return: dict of client template representation or None if none matching exist
- """
- result = self.get_client_templates(realm)
- if isinstance(result, list):
- result = [x for x in result if x['name'] == name]
- if len(result) > 0:
- return result[0]
- return None
-
- def get_client_template_id(self, name, realm='master'):
- """ Obtain client template id by name
-
- :param name: name of client template to be queried
- :param realm: client template from this realm
- :return: client template id (usually a UUID)
- """
- result = self.get_client_template_by_name(name, realm)
- if isinstance(result, dict) and 'id' in result:
- return result['id']
- else:
- return None
-
- def update_client_template(self, id, clienttrep, realm="master"):
- """ Update an existing client template
- :param id: id (not name) of client template to be updated in Keycloak
- :param clienttrep: corresponding (partial/full) client template representation with updates
- :param realm: realm the client template is in
- :return: HTTPResponse object on success
- """
- url = URL_CLIENTTEMPLATE.format(url=self.baseurl, realm=realm, id=id)
-
- try:
- return open_url(url, method='PUT', headers=self.restheaders, timeout=self.connection_timeout,
- data=json.dumps(clienttrep), validate_certs=self.validate_certs)
- except Exception as e:
- self.module.fail_json(msg='Could not update client template %s in realm %s: %s'
- % (id, realm, str(e)))
-
- def create_client_template(self, clienttrep, realm="master"):
- """ Create a client in keycloak
- :param clienttrep: Client template representation of client template to be created. Must at least contain field name
- :param realm: realm for client template to be created in
- :return: HTTPResponse object on success
- """
- url = URL_CLIENTTEMPLATES.format(url=self.baseurl, realm=realm)
-
- try:
- return open_url(url, method='POST', headers=self.restheaders, timeout=self.connection_timeout,
- data=json.dumps(clienttrep), validate_certs=self.validate_certs)
- except Exception as e:
- self.module.fail_json(msg='Could not create client template %s in realm %s: %s'
- % (clienttrep['clientId'], realm, str(e)))
-
- def delete_client_template(self, id, realm="master"):
- """ Delete a client template from Keycloak
-
- :param id: id (not name) of client to be deleted
- :param realm: realm of client template to be deleted
- :return: HTTPResponse object on success
- """
- url = URL_CLIENTTEMPLATE.format(url=self.baseurl, realm=realm, id=id)
-
- try:
- return open_url(url, method='DELETE', headers=self.restheaders, timeout=self.connection_timeout,
- validate_certs=self.validate_certs)
- except Exception as e:
- self.module.fail_json(msg='Could not delete client template %s in realm %s: %s'
- % (id, realm, str(e)))
-
- def get_clientscopes(self, realm="master"):
- """ Fetch the name and ID of all clientscopes on the Keycloak server.
-
- To fetch the full data of the group, make a subsequent call to
- get_clientscope_by_clientscopeid, passing in the ID of the group you wish to return.
-
- :param realm: Realm in which the clientscope resides; default 'master'.
- :return The clientscopes of this realm (default "master")
- """
- clientscopes_url = URL_CLIENTSCOPES.format(url=self.baseurl, realm=realm)
- try:
- return json.loads(to_native(open_url(clientscopes_url, method="GET", headers=self.restheaders, timeout=self.connection_timeout,
- validate_certs=self.validate_certs).read()))
- except Exception as e:
- self.module.fail_json(msg="Could not fetch list of clientscopes in realm %s: %s"
- % (realm, str(e)))
-
- def get_clientscope_by_clientscopeid(self, cid, realm="master"):
- """ Fetch a keycloak clientscope from the provided realm using the clientscope's unique ID.
-
- If the clientscope does not exist, None is returned.
-
- gid is a UUID provided by the Keycloak API
- :param cid: UUID of the clientscope to be returned
- :param realm: Realm in which the clientscope resides; default 'master'.
- """
- clientscope_url = URL_CLIENTSCOPE.format(url=self.baseurl, realm=realm, id=cid)
- try:
- return json.loads(to_native(open_url(clientscope_url, method="GET", headers=self.restheaders, timeout=self.connection_timeout,
- validate_certs=self.validate_certs).read()))
-
- except HTTPError as e:
- if e.code == 404:
- return None
- else:
- self.module.fail_json(msg="Could not fetch clientscope %s in realm %s: %s"
- % (cid, realm, str(e)))
- except Exception as e:
- self.module.fail_json(msg="Could not clientscope group %s in realm %s: %s"
- % (cid, realm, str(e)))
-
- def get_clientscope_by_name(self, name, realm="master"):
- """ Fetch a keycloak clientscope within a realm based on its name.
-
- The Keycloak API does not allow filtering of the clientscopes resource by name.
- As a result, this method first retrieves the entire list of clientscopes - name and ID -
- then performs a second query to fetch the group.
-
- If the clientscope does not exist, None is returned.
- :param name: Name of the clientscope to fetch.
- :param realm: Realm in which the clientscope resides; default 'master'
- """
- try:
- all_clientscopes = self.get_clientscopes(realm=realm)
-
- for clientscope in all_clientscopes:
- if clientscope['name'] == name:
- return self.get_clientscope_by_clientscopeid(clientscope['id'], realm=realm)
-
- return None
-
- except Exception as e:
- self.module.fail_json(msg="Could not fetch clientscope %s in realm %s: %s"
- % (name, realm, str(e)))
-
- def create_clientscope(self, clientscoperep, realm="master"):
- """ Create a Keycloak clientscope.
-
- :param clientscoperep: a ClientScopeRepresentation of the clientscope to be created. Must contain at minimum the field name.
- :return: HTTPResponse object on success
- """
- clientscopes_url = URL_CLIENTSCOPES.format(url=self.baseurl, realm=realm)
- try:
- return open_url(clientscopes_url, method='POST', headers=self.restheaders, timeout=self.connection_timeout,
- data=json.dumps(clientscoperep), validate_certs=self.validate_certs)
- except Exception as e:
- self.module.fail_json(msg="Could not create clientscope %s in realm %s: %s"
- % (clientscoperep['name'], realm, str(e)))
-
- def update_clientscope(self, clientscoperep, realm="master"):
- """ Update an existing clientscope.
-
- :param grouprep: A GroupRepresentation of the updated group.
- :return HTTPResponse object on success
- """
- clientscope_url = URL_CLIENTSCOPE.format(url=self.baseurl, realm=realm, id=clientscoperep['id'])
-
- try:
- return open_url(clientscope_url, method='PUT', headers=self.restheaders, timeout=self.connection_timeout,
- data=json.dumps(clientscoperep), validate_certs=self.validate_certs)
-
- except Exception as e:
- self.module.fail_json(msg='Could not update clientscope %s in realm %s: %s'
- % (clientscoperep['name'], realm, str(e)))
-
- def delete_clientscope(self, name=None, cid=None, realm="master"):
- """ Delete a clientscope. One of name or cid must be provided.
-
- Providing the clientscope ID is preferred as it avoids a second lookup to
- convert a clientscope name to an ID.
-
- :param name: The name of the clientscope. A lookup will be performed to retrieve the clientscope ID.
- :param cid: The ID of the clientscope (preferred to name).
- :param realm: The realm in which this group resides, default "master".
- """
-
- if cid is None and name is None:
- # prefer an exception since this is almost certainly a programming error in the module itself.
- raise Exception("Unable to delete group - one of group ID or name must be provided.")
-
- # only lookup the name if cid isn't provided.
- # in the case that both are provided, prefer the ID, since it's one
- # less lookup.
- if cid is None and name is not None:
- for clientscope in self.get_clientscopes(realm=realm):
- if clientscope['name'] == name:
- cid = clientscope['id']
- break
-
- # if the group doesn't exist - no problem, nothing to delete.
- if cid is None:
- return None
-
- # should have a good cid by here.
- clientscope_url = URL_CLIENTSCOPE.format(realm=realm, id=cid, url=self.baseurl)
- try:
- return open_url(clientscope_url, method='DELETE', headers=self.restheaders, timeout=self.connection_timeout,
- validate_certs=self.validate_certs)
-
- except Exception as e:
- self.module.fail_json(msg="Unable to delete clientscope %s: %s" % (cid, str(e)))
-
- def get_clientscope_protocolmappers(self, cid, realm="master"):
- """ Fetch the name and ID of all clientscopes on the Keycloak server.
-
- To fetch the full data of the group, make a subsequent call to
- get_clientscope_by_clientscopeid, passing in the ID of the group you wish to return.
-
- :param cid: id of clientscope (not name).
- :param realm: Realm in which the clientscope resides; default 'master'.
- :return The protocolmappers of this realm (default "master")
- """
- protocolmappers_url = URL_CLIENTSCOPE_PROTOCOLMAPPERS.format(id=cid, url=self.baseurl, realm=realm)
- try:
- return json.loads(to_native(open_url(protocolmappers_url, method="GET", headers=self.restheaders, timeout=self.connection_timeout,
- validate_certs=self.validate_certs).read()))
- except Exception as e:
- self.module.fail_json(msg="Could not fetch list of protocolmappers in realm %s: %s"
- % (realm, str(e)))
-
- def get_clientscope_protocolmapper_by_protocolmapperid(self, pid, cid, realm="master"):
- """ Fetch a keycloak clientscope from the provided realm using the clientscope's unique ID.
-
- If the clientscope does not exist, None is returned.
-
- gid is a UUID provided by the Keycloak API
-
- :param cid: UUID of the protocolmapper to be returned
- :param cid: UUID of the clientscope to be returned
- :param realm: Realm in which the clientscope resides; default 'master'.
- """
- protocolmapper_url = URL_CLIENTSCOPE_PROTOCOLMAPPER.format(url=self.baseurl, realm=realm, id=cid, mapper_id=pid)
- try:
- return json.loads(to_native(open_url(protocolmapper_url, method="GET", headers=self.restheaders, timeout=self.connection_timeout,
- validate_certs=self.validate_certs).read()))
-
- except HTTPError as e:
- if e.code == 404:
- return None
- else:
- self.module.fail_json(msg="Could not fetch protocolmapper %s in realm %s: %s"
- % (pid, realm, str(e)))
- except Exception as e:
- self.module.fail_json(msg="Could not fetch protocolmapper %s in realm %s: %s"
- % (cid, realm, str(e)))
-
- def get_clientscope_protocolmapper_by_name(self, cid, name, realm="master"):
- """ Fetch a keycloak clientscope within a realm based on its name.
-
- The Keycloak API does not allow filtering of the clientscopes resource by name.
- As a result, this method first retrieves the entire list of clientscopes - name and ID -
- then performs a second query to fetch the group.
-
- If the clientscope does not exist, None is returned.
- :param cid: Id of the clientscope (not name).
- :param name: Name of the protocolmapper to fetch.
- :param realm: Realm in which the clientscope resides; default 'master'
- """
- try:
- all_protocolmappers = self.get_clientscope_protocolmappers(cid, realm=realm)
-
- for protocolmapper in all_protocolmappers:
- if protocolmapper['name'] == name:
- return self.get_clientscope_protocolmapper_by_protocolmapperid(protocolmapper['id'], cid, realm=realm)
-
- return None
-
- except Exception as e:
- self.module.fail_json(msg="Could not fetch protocolmapper %s in realm %s: %s"
- % (name, realm, str(e)))
-
- def create_clientscope_protocolmapper(self, cid, mapper_rep, realm="master"):
- """ Create a Keycloak clientscope protocolmapper.
-
- :param cid: Id of the clientscope.
- :param mapper_rep: a ProtocolMapperRepresentation of the protocolmapper to be created. Must contain at minimum the field name.
- :return: HTTPResponse object on success
- """
- protocolmappers_url = URL_CLIENTSCOPE_PROTOCOLMAPPERS.format(url=self.baseurl, id=cid, realm=realm)
- try:
- return open_url(protocolmappers_url, method='POST', headers=self.restheaders, timeout=self.connection_timeout,
- data=json.dumps(mapper_rep), validate_certs=self.validate_certs)
- except Exception as e:
- self.module.fail_json(msg="Could not create protocolmapper %s in realm %s: %s"
- % (mapper_rep['name'], realm, str(e)))
-
- def update_clientscope_protocolmappers(self, cid, mapper_rep, realm="master"):
- """ Update an existing clientscope.
-
- :param cid: Id of the clientscope.
- :param mapper_rep: A ProtocolMapperRepresentation of the updated protocolmapper.
- :return HTTPResponse object on success
- """
- protocolmapper_url = URL_CLIENTSCOPE_PROTOCOLMAPPER.format(url=self.baseurl, realm=realm, id=cid, mapper_id=mapper_rep['id'])
-
- try:
- return open_url(protocolmapper_url, method='PUT', headers=self.restheaders, timeout=self.connection_timeout,
- data=json.dumps(mapper_rep), validate_certs=self.validate_certs)
-
- except Exception as e:
- self.module.fail_json(msg='Could not update protocolmappers for clientscope %s in realm %s: %s'
- % (mapper_rep, realm, str(e)))
-
- def get_groups(self, realm="master"):
- """ Fetch the name and ID of all groups on the Keycloak server.
-
- To fetch the full data of the group, make a subsequent call to
- get_group_by_groupid, passing in the ID of the group you wish to return.
-
- :param realm: Return the groups of this realm (default "master").
- """
- groups_url = URL_GROUPS.format(url=self.baseurl, realm=realm)
- try:
- return json.loads(to_native(open_url(groups_url, method="GET", headers=self.restheaders, timeout=self.connection_timeout,
- validate_certs=self.validate_certs).read()))
- except Exception as e:
- self.module.fail_json(msg="Could not fetch list of groups in realm %s: %s"
- % (realm, str(e)))
-
- def get_group_by_groupid(self, gid, realm="master"):
- """ Fetch a keycloak group from the provided realm using the group's unique ID.
-
- If the group does not exist, None is returned.
-
- gid is a UUID provided by the Keycloak API
- :param gid: UUID of the group to be returned
- :param realm: Realm in which the group resides; default 'master'.
- """
- groups_url = URL_GROUP.format(url=self.baseurl, realm=realm, groupid=gid)
- try:
- return json.loads(to_native(open_url(groups_url, method="GET", headers=self.restheaders, timeout=self.connection_timeout,
- validate_certs=self.validate_certs).read()))
-
- except HTTPError as e:
- if e.code == 404:
- return None
- else:
- self.module.fail_json(msg="Could not fetch group %s in realm %s: %s"
- % (gid, realm, str(e)))
- except Exception as e:
- self.module.fail_json(msg="Could not fetch group %s in realm %s: %s"
- % (gid, realm, str(e)))
-
- def get_group_by_name(self, name, realm="master"):
- """ Fetch a keycloak group within a realm based on its name.
-
- The Keycloak API does not allow filtering of the Groups resource by name.
- As a result, this method first retrieves the entire list of groups - name and ID -
- then performs a second query to fetch the group.
-
- If the group does not exist, None is returned.
- :param name: Name of the group to fetch.
- :param realm: Realm in which the group resides; default 'master'
- """
- groups_url = URL_GROUPS.format(url=self.baseurl, realm=realm)
- try:
- all_groups = self.get_groups(realm=realm)
-
- for group in all_groups:
- if group['name'] == name:
- return self.get_group_by_groupid(group['id'], realm=realm)
-
- return None
-
- except Exception as e:
- self.module.fail_json(msg="Could not fetch group %s in realm %s: %s"
- % (name, realm, str(e)))
-
- def create_group(self, grouprep, realm="master"):
- """ Create a Keycloak group.
-
- :param grouprep: a GroupRepresentation of the group to be created. Must contain at minimum the field name.
- :return: HTTPResponse object on success
- """
- groups_url = URL_GROUPS.format(url=self.baseurl, realm=realm)
- try:
- return open_url(groups_url, method='POST', headers=self.restheaders, timeout=self.connection_timeout,
- data=json.dumps(grouprep), validate_certs=self.validate_certs)
- except Exception as e:
- self.module.fail_json(msg="Could not create group %s in realm %s: %s"
- % (grouprep['name'], realm, str(e)))
-
- def update_group(self, grouprep, realm="master"):
- """ Update an existing group.
-
- :param grouprep: A GroupRepresentation of the updated group.
- :return HTTPResponse object on success
- """
- group_url = URL_GROUP.format(url=self.baseurl, realm=realm, groupid=grouprep['id'])
-
- try:
- return open_url(group_url, method='PUT', headers=self.restheaders, timeout=self.connection_timeout,
- data=json.dumps(grouprep), validate_certs=self.validate_certs)
- except Exception as e:
- self.module.fail_json(msg='Could not update group %s in realm %s: %s'
- % (grouprep['name'], realm, str(e)))
-
- def delete_group(self, name=None, groupid=None, realm="master"):
- """ Delete a group. One of name or groupid must be provided.
-
- Providing the group ID is preferred as it avoids a second lookup to
- convert a group name to an ID.
-
- :param name: The name of the group. A lookup will be performed to retrieve the group ID.
- :param groupid: The ID of the group (preferred to name).
- :param realm: The realm in which this group resides, default "master".
- """
-
- if groupid is None and name is None:
- # prefer an exception since this is almost certainly a programming error in the module itself.
- raise Exception("Unable to delete group - one of group ID or name must be provided.")
-
- # only lookup the name if groupid isn't provided.
- # in the case that both are provided, prefer the ID, since it's one
- # less lookup.
- if groupid is None and name is not None:
- for group in self.get_groups(realm=realm):
- if group['name'] == name:
- groupid = group['id']
- break
-
- # if the group doesn't exist - no problem, nothing to delete.
- if groupid is None:
- return None
-
- # should have a good groupid by here.
- group_url = URL_GROUP.format(realm=realm, groupid=groupid, url=self.baseurl)
- try:
- return open_url(group_url, method='DELETE', headers=self.restheaders, timeout=self.connection_timeout,
- validate_certs=self.validate_certs)
- except Exception as e:
- self.module.fail_json(msg="Unable to delete group %s: %s" % (groupid, str(e)))
-
- def get_realm_roles(self, realm='master'):
- """ Obtains role representations for roles in a realm
-
- :param realm: realm to be queried
- :return: list of dicts of role representations
- """
- rolelist_url = URL_REALM_ROLES.format(url=self.baseurl, realm=realm)
- try:
- return json.loads(to_native(open_url(rolelist_url, method='GET', headers=self.restheaders, timeout=self.connection_timeout,
- validate_certs=self.validate_certs).read()))
- except ValueError as e:
- self.module.fail_json(msg='API returned incorrect JSON when trying to obtain list of roles for realm %s: %s'
- % (realm, str(e)))
- except Exception as e:
- self.module.fail_json(msg='Could not obtain list of roles for realm %s: %s'
- % (realm, str(e)))
-
- def get_realm_role(self, name, realm='master'):
- """ Fetch a keycloak role from the provided realm using the role's name.
-
- If the role does not exist, None is returned.
- :param name: Name of the role to fetch.
- :param realm: Realm in which the role resides; default 'master'.
- """
- role_url = URL_REALM_ROLE.format(url=self.baseurl, realm=realm, name=quote(name))
- try:
- return json.loads(to_native(open_url(role_url, method="GET", headers=self.restheaders, timeout=self.connection_timeout,
- validate_certs=self.validate_certs).read()))
- except HTTPError as e:
- if e.code == 404:
- return None
- else:
- self.module.fail_json(msg='Could not fetch role %s in realm %s: %s'
- % (name, realm, str(e)))
- except Exception as e:
- self.module.fail_json(msg='Could not fetch role %s in realm %s: %s'
- % (name, realm, str(e)))
-
- def create_realm_role(self, rolerep, realm='master'):
- """ Create a Keycloak realm role.
-
- :param rolerep: a RoleRepresentation of the role to be created. Must contain at minimum the field name.
- :return: HTTPResponse object on success
- """
- roles_url = URL_REALM_ROLES.format(url=self.baseurl, realm=realm)
- try:
- return open_url(roles_url, method='POST', headers=self.restheaders, timeout=self.connection_timeout,
- data=json.dumps(rolerep), validate_certs=self.validate_certs)
- except Exception as e:
- self.module.fail_json(msg='Could not create role %s in realm %s: %s'
- % (rolerep['name'], realm, str(e)))
-
- def update_realm_role(self, rolerep, realm='master'):
- """ Update an existing realm role.
-
- :param rolerep: A RoleRepresentation of the updated role.
- :return HTTPResponse object on success
- """
- role_url = URL_REALM_ROLE.format(url=self.baseurl, realm=realm, name=quote(rolerep['name']))
- try:
- return open_url(role_url, method='PUT', headers=self.restheaders, timeout=self.connection_timeout,
- data=json.dumps(rolerep), validate_certs=self.validate_certs)
- except Exception as e:
- self.module.fail_json(msg='Could not update role %s in realm %s: %s'
- % (rolerep['name'], realm, str(e)))
-
- def delete_realm_role(self, name, realm='master'):
- """ Delete a realm role.
-
- :param name: The name of the role.
- :param realm: The realm in which this role resides, default "master".
- """
- role_url = URL_REALM_ROLE.format(url=self.baseurl, realm=realm, name=quote(name))
- try:
- return open_url(role_url, method='DELETE', headers=self.restheaders, timeout=self.connection_timeout,
- validate_certs=self.validate_certs)
- except Exception as e:
- self.module.fail_json(msg='Unable to delete role %s in realm %s: %s'
- % (name, realm, str(e)))
-
- def get_client_roles(self, clientid, realm='master'):
- """ Obtains role representations for client roles in a specific client
-
- :param clientid: Client id to be queried
- :param realm: Realm to be queried
- :return: List of dicts of role representations
- """
- cid = self.get_client_id(clientid, realm=realm)
- if cid is None:
- self.module.fail_json(msg='Could not find client %s in realm %s'
- % (clientid, realm))
- rolelist_url = URL_CLIENT_ROLES.format(url=self.baseurl, realm=realm, id=cid)
- try:
- return json.loads(to_native(open_url(rolelist_url, method='GET', headers=self.restheaders, timeout=self.connection_timeout,
- validate_certs=self.validate_certs).read()))
- except ValueError as e:
- self.module.fail_json(msg='API returned incorrect JSON when trying to obtain list of roles for client %s in realm %s: %s'
- % (clientid, realm, str(e)))
- except Exception as e:
- self.module.fail_json(msg='Could not obtain list of roles for client %s in realm %s: %s'
- % (clientid, realm, str(e)))
-
- def get_client_role(self, name, clientid, realm='master'):
- """ Fetch a keycloak client role from the provided realm using the role's name.
-
- :param name: Name of the role to fetch.
- :param clientid: Client id for the client role
- :param realm: Realm in which the role resides
- :return: Dict of role representation
- If the role does not exist, None is returned.
- """
- cid = self.get_client_id(clientid, realm=realm)
- if cid is None:
- self.module.fail_json(msg='Could not find client %s in realm %s'
- % (clientid, realm))
- role_url = URL_CLIENT_ROLE.format(url=self.baseurl, realm=realm, id=cid, name=quote(name))
- try:
- return json.loads(to_native(open_url(role_url, method="GET", headers=self.restheaders, timeout=self.connection_timeout,
- validate_certs=self.validate_certs).read()))
- except HTTPError as e:
- if e.code == 404:
- return None
- else:
- self.module.fail_json(msg='Could not fetch role %s in client %s of realm %s: %s'
- % (name, clientid, realm, str(e)))
- except Exception as e:
- self.module.fail_json(msg='Could not fetch role %s for client %s in realm %s: %s'
- % (name, clientid, realm, str(e)))
-
- def create_client_role(self, rolerep, clientid, realm='master'):
- """ Create a Keycloak client role.
-
- :param rolerep: a RoleRepresentation of the role to be created. Must contain at minimum the field name.
- :param clientid: Client id for the client role
- :param realm: Realm in which the role resides
- :return: HTTPResponse object on success
- """
- cid = self.get_client_id(clientid, realm=realm)
- if cid is None:
- self.module.fail_json(msg='Could not find client %s in realm %s'
- % (clientid, realm))
- roles_url = URL_CLIENT_ROLES.format(url=self.baseurl, realm=realm, id=cid)
- try:
- return open_url(roles_url, method='POST', headers=self.restheaders, timeout=self.connection_timeout,
- data=json.dumps(rolerep), validate_certs=self.validate_certs)
- except Exception as e:
- self.module.fail_json(msg='Could not create role %s for client %s in realm %s: %s'
- % (rolerep['name'], clientid, realm, str(e)))
-
- def update_client_role(self, rolerep, clientid, realm="master"):
- """ Update an existing client role.
-
- :param rolerep: A RoleRepresentation of the updated role.
- :param clientid: Client id for the client role
- :param realm: Realm in which the role resides
- :return HTTPResponse object on success
- """
- cid = self.get_client_id(clientid, realm=realm)
- if cid is None:
- self.module.fail_json(msg='Could not find client %s in realm %s'
- % (clientid, realm))
- role_url = URL_CLIENT_ROLE.format(url=self.baseurl, realm=realm, id=cid, name=quote(rolerep['name']))
- try:
- return open_url(role_url, method='PUT', headers=self.restheaders, timeout=self.connection_timeout,
- data=json.dumps(rolerep), validate_certs=self.validate_certs)
- except Exception as e:
- self.module.fail_json(msg='Could not update role %s for client %s in realm %s: %s'
- % (rolerep['name'], clientid, realm, str(e)))
-
- def delete_client_role(self, name, clientid, realm="master"):
- """ Delete a role. One of name or roleid must be provided.
-
- :param name: The name of the role.
- :param clientid: Client id for the client role
- :param realm: Realm in which the role resides
- """
- cid = self.get_client_id(clientid, realm=realm)
- if cid is None:
- self.module.fail_json(msg='Could not find client %s in realm %s'
- % (clientid, realm))
- role_url = URL_CLIENT_ROLE.format(url=self.baseurl, realm=realm, id=cid, name=quote(name))
- try:
- return open_url(role_url, method='DELETE', headers=self.restheaders, timeout=self.connection_timeout,
- validate_certs=self.validate_certs)
- except Exception as e:
- self.module.fail_json(msg='Unable to delete role %s for client %s in realm %s: %s'
- % (name, clientid, realm, str(e)))
-
- def get_authentication_flow_by_alias(self, alias, realm='master'):
- """
- Get an authentication flow by it's alias
- :param alias: Alias of the authentication flow to get.
- :param realm: Realm.
- :return: Authentication flow representation.
- """
- try:
- authentication_flow = {}
- # Check if the authentication flow exists on the Keycloak serveraders
- authentications = json.load(open_url(URL_AUTHENTICATION_FLOWS.format(url=self.baseurl, realm=realm), method='GET',
- headers=self.restheaders, timeout=self.connection_timeout))
- for authentication in authentications:
- if authentication["alias"] == alias:
- authentication_flow = authentication
- break
- return authentication_flow
- except Exception as e:
- self.module.fail_json(msg="Unable get authentication flow %s: %s" % (alias, str(e)))
-
- def delete_authentication_flow_by_id(self, id, realm='master'):
- """
- Delete an authentication flow from Keycloak
- :param id: id of authentication flow to be deleted
- :param realm: realm of client to be deleted
- :return: HTTPResponse object on success
- """
- flow_url = URL_AUTHENTICATION_FLOW.format(url=self.baseurl, realm=realm, id=id)
-
- try:
- return open_url(flow_url, method='DELETE', headers=self.restheaders, timeout=self.connection_timeout,
- validate_certs=self.validate_certs)
- except Exception as e:
- self.module.fail_json(msg='Could not delete authentication flow %s in realm %s: %s'
- % (id, realm, str(e)))
-
- def copy_auth_flow(self, config, realm='master'):
- """
- Create a new authentication flow from a copy of another.
- :param config: Representation of the authentication flow to create.
- :param realm: Realm.
- :return: Representation of the new authentication flow.
- """
- try:
- new_name = dict(
- newName=config["alias"]
- )
- open_url(
- URL_AUTHENTICATION_FLOW_COPY.format(
- url=self.baseurl,
- realm=realm,
- copyfrom=quote(config["copyFrom"])),
- method='POST',
- headers=self.restheaders,
- data=json.dumps(new_name),
- timeout=self.connection_timeout)
- flow_list = json.load(
- open_url(
- URL_AUTHENTICATION_FLOWS.format(url=self.baseurl,
- realm=realm),
- method='GET',
- headers=self.restheaders,
- timeout=self.connection_timeout))
- for flow in flow_list:
- if flow["alias"] == config["alias"]:
- return flow
- return None
- except Exception as e:
- self.module.fail_json(msg='Could not copy authentication flow %s in realm %s: %s'
- % (config["alias"], realm, str(e)))
-
- def create_empty_auth_flow(self, config, realm='master'):
- """
- Create a new empty authentication flow.
- :param config: Representation of the authentication flow to create.
- :param realm: Realm.
- :return: Representation of the new authentication flow.
- """
- try:
- new_flow = dict(
- alias=config["alias"],
- providerId=config["providerId"],
- description=config["description"],
- topLevel=True
- )
- open_url(
- URL_AUTHENTICATION_FLOWS.format(
- url=self.baseurl,
- realm=realm),
- method='POST',
- headers=self.restheaders,
- data=json.dumps(new_flow),
- timeout=self.connection_timeout)
- flow_list = json.load(
- open_url(
- URL_AUTHENTICATION_FLOWS.format(
- url=self.baseurl,
- realm=realm),
- method='GET',
- headers=self.restheaders,
- timeout=self.connection_timeout))
- for flow in flow_list:
- if flow["alias"] == config["alias"]:
- return flow
- return None
- except Exception as e:
- self.module.fail_json(msg='Could not create empty authentication flow %s in realm %s: %s'
- % (config["alias"], realm, str(e)))
-
- def update_authentication_executions(self, flowAlias, updatedExec, realm='master'):
- """ Update authentication executions
-
- :param flowAlias: name of the parent flow
- :param updatedExec: JSON containing updated execution
- :return: HTTPResponse object on success
- """
- try:
- open_url(
- URL_AUTHENTICATION_FLOW_EXECUTIONS.format(
- url=self.baseurl,
- realm=realm,
- flowalias=quote(flowAlias)),
- method='PUT',
- headers=self.restheaders,
- data=json.dumps(updatedExec),
- timeout=self.connection_timeout)
- except Exception as e:
- self.module.fail_json(msg="Unable to update executions %s: %s" % (updatedExec, str(e)))
-
- def add_authenticationConfig_to_execution(self, executionId, authenticationConfig, realm='master'):
- """ Add autenticatorConfig to the execution
-
- :param executionId: id of execution
- :param authenticationConfig: config to add to the execution
- :return: HTTPResponse object on success
- """
- try:
- open_url(
- URL_AUTHENTICATION_EXECUTION_CONFIG.format(
- url=self.baseurl,
- realm=realm,
- id=executionId),
- method='POST',
- headers=self.restheaders,
- data=json.dumps(authenticationConfig),
- timeout=self.connection_timeout)
- except Exception as e:
- self.module.fail_json(msg="Unable to add authenticationConfig %s: %s" % (executionId, str(e)))
-
- def create_subflow(self, subflowName, flowAlias, realm='master'):
- """ Create new sublow on the flow
-
- :param subflowName: name of the subflow to create
- :param flowAlias: name of the parent flow
- :return: HTTPResponse object on success
- """
- try:
- newSubFlow = {}
- newSubFlow["alias"] = subflowName
- newSubFlow["provider"] = "registration-page-form"
- newSubFlow["type"] = "basic-flow"
- open_url(
- URL_AUTHENTICATION_FLOW_EXECUTIONS_FLOW.format(
- url=self.baseurl,
- realm=realm,
- flowalias=quote(flowAlias)),
- method='POST',
- headers=self.restheaders,
- data=json.dumps(newSubFlow),
- timeout=self.connection_timeout)
- except Exception as e:
- self.module.fail_json(msg="Unable to create new subflow %s: %s" % (subflowName, str(e)))
-
- def create_execution(self, execution, flowAlias, realm='master'):
- """ Create new execution on the flow
-
- :param execution: name of execution to create
- :param flowAlias: name of the parent flow
- :return: HTTPResponse object on success
- """
- try:
- newExec = {}
- newExec["provider"] = execution["providerId"]
- newExec["requirement"] = execution["requirement"]
- open_url(
- URL_AUTHENTICATION_FLOW_EXECUTIONS_EXECUTION.format(
- url=self.baseurl,
- realm=realm,
- flowalias=quote(flowAlias)),
- method='POST',
- headers=self.restheaders,
- data=json.dumps(newExec),
- timeout=self.connection_timeout)
- except Exception as e:
- self.module.fail_json(msg="Unable to create new execution %s: %s" % (execution["provider"], str(e)))
-
- def change_execution_priority(self, executionId, diff, realm='master'):
- """ Raise or lower execution priority of diff time
-
- :param executionId: id of execution to lower priority
- :param realm: realm the client is in
- :param diff: Integer number, raise of diff time if positive lower of diff time if negative
- :return: HTTPResponse object on success
- """
- try:
- if diff > 0:
- for i in range(diff):
- open_url(
- URL_AUTHENTICATION_EXECUTION_RAISE_PRIORITY.format(
- url=self.baseurl,
- realm=realm,
- id=executionId),
- method='POST',
- headers=self.restheaders,
- timeout=self.connection_timeout)
- elif diff < 0:
- for i in range(-diff):
- open_url(
- URL_AUTHENTICATION_EXECUTION_LOWER_PRIORITY.format(
- url=self.baseurl,
- realm=realm,
- id=executionId),
- method='POST',
- headers=self.restheaders,
- timeout=self.connection_timeout)
- except Exception as e:
- self.module.fail_json(msg="Unable to change execution priority %s: %s" % (executionId, str(e)))
-
- def get_executions_representation(self, config, realm='master'):
- """
- Get a representation of the executions for an authentication flow.
- :param config: Representation of the authentication flow
- :param realm: Realm
- :return: Representation of the executions
- """
- try:
- # Get executions created
- executions = json.load(
- open_url(
- URL_AUTHENTICATION_FLOW_EXECUTIONS.format(
- url=self.baseurl,
- realm=realm,
- flowalias=quote(config["alias"])),
- method='GET',
- headers=self.restheaders,
- timeout=self.connection_timeout))
- for execution in executions:
- if "authenticationConfig" in execution:
- execConfigId = execution["authenticationConfig"]
- execConfig = json.load(
- open_url(
- URL_AUTHENTICATION_CONFIG.format(
- url=self.baseurl,
- realm=realm,
- id=execConfigId),
- method='GET',
- headers=self.restheaders,
- timeout=self.connection_timeout))
- execution["authenticationConfig"] = execConfig
- return executions
- except Exception as e:
- self.module.fail_json(msg='Could not get executions for authentication flow %s in realm %s: %s'
- % (config["alias"], realm, str(e)))
-
- def get_identity_providers(self, realm='master'):
- """ Fetch representations for identity providers in a realm
- :param realm: realm to be queried
- :return: list of representations for identity providers
- """
- idps_url = URL_IDENTITY_PROVIDERS.format(url=self.baseurl, realm=realm)
- try:
- return json.loads(to_native(open_url(idps_url, method='GET', headers=self.restheaders, timeout=self.connection_timeout,
- validate_certs=self.validate_certs).read()))
- except ValueError as e:
- self.module.fail_json(msg='API returned incorrect JSON when trying to obtain list of identity providers for realm %s: %s'
- % (realm, str(e)))
- except Exception as e:
- self.module.fail_json(msg='Could not obtain list of identity providers for realm %s: %s'
- % (realm, str(e)))
-
- def get_identity_provider(self, alias, realm='master'):
- """ Fetch identity provider representation from a realm using the idp's alias.
- If the identity provider does not exist, None is returned.
- :param alias: Alias of the identity provider to fetch.
- :param realm: Realm in which the identity provider resides; default 'master'.
- """
- idp_url = URL_IDENTITY_PROVIDER.format(url=self.baseurl, realm=realm, alias=alias)
- try:
- return json.loads(to_native(open_url(idp_url, method="GET", headers=self.restheaders, timeout=self.connection_timeout,
- validate_certs=self.validate_certs).read()))
- except HTTPError as e:
- if e.code == 404:
- return None
- else:
- self.module.fail_json(msg='Could not fetch identity provider %s in realm %s: %s'
- % (alias, realm, str(e)))
- except Exception as e:
- self.module.fail_json(msg='Could not fetch identity provider %s in realm %s: %s'
- % (alias, realm, str(e)))
-
- def create_identity_provider(self, idprep, realm='master'):
- """ Create an identity provider.
- :param idprep: Identity provider representation of the idp to be created.
- :param realm: Realm in which this identity provider resides, default "master".
- :return: HTTPResponse object on success
- """
- idps_url = URL_IDENTITY_PROVIDERS.format(url=self.baseurl, realm=realm)
- try:
- return open_url(idps_url, method='POST', headers=self.restheaders, timeout=self.connection_timeout,
- data=json.dumps(idprep), validate_certs=self.validate_certs)
- except Exception as e:
- self.module.fail_json(msg='Could not create identity provider %s in realm %s: %s'
- % (idprep['alias'], realm, str(e)))
-
- def update_identity_provider(self, idprep, realm='master'):
- """ Update an existing identity provider.
- :param idprep: Identity provider representation of the idp to be updated.
- :param realm: Realm in which this identity provider resides, default "master".
- :return HTTPResponse object on success
- """
- idp_url = URL_IDENTITY_PROVIDER.format(url=self.baseurl, realm=realm, alias=idprep['alias'])
- try:
- return open_url(idp_url, method='PUT', headers=self.restheaders, timeout=self.connection_timeout,
- data=json.dumps(idprep), validate_certs=self.validate_certs)
- except Exception as e:
- self.module.fail_json(msg='Could not update identity provider %s in realm %s: %s'
- % (idprep['alias'], realm, str(e)))
-
- def delete_identity_provider(self, alias, realm='master'):
- """ Delete an identity provider.
- :param alias: Alias of the identity provider.
- :param realm: Realm in which this identity provider resides, default "master".
- """
- idp_url = URL_IDENTITY_PROVIDER.format(url=self.baseurl, realm=realm, alias=alias)
- try:
- return open_url(idp_url, method='DELETE', headers=self.restheaders, timeout=self.connection_timeout,
- validate_certs=self.validate_certs)
- except Exception as e:
- self.module.fail_json(msg='Unable to delete identity provider %s in realm %s: %s'
- % (alias, realm, str(e)))
-
- def get_identity_provider_mappers(self, alias, realm='master'):
- """ Fetch representations for identity provider mappers
- :param alias: Alias of the identity provider.
- :param realm: realm to be queried
- :return: list of representations for identity provider mappers
- """
- mappers_url = URL_IDENTITY_PROVIDER_MAPPERS.format(url=self.baseurl, realm=realm, alias=alias)
- try:
- return json.loads(to_native(open_url(mappers_url, method='GET', headers=self.restheaders, timeout=self.connection_timeout,
- validate_certs=self.validate_certs).read()))
- except ValueError as e:
- self.module.fail_json(msg='API returned incorrect JSON when trying to obtain list of identity provider mappers for idp %s in realm %s: %s'
- % (alias, realm, str(e)))
- except Exception as e:
- self.module.fail_json(msg='Could not obtain list of identity provider mappers for idp %s in realm %s: %s'
- % (alias, realm, str(e)))
-
- def get_identity_provider_mapper(self, mid, alias, realm='master'):
- """ Fetch identity provider representation from a realm using the idp's alias.
- If the identity provider does not exist, None is returned.
- :param mid: Unique ID of the mapper to fetch.
- :param alias: Alias of the identity provider.
- :param realm: Realm in which the identity provider resides; default 'master'.
- """
- mapper_url = URL_IDENTITY_PROVIDER_MAPPER.format(url=self.baseurl, realm=realm, alias=alias, id=mid)
- try:
- return json.loads(to_native(open_url(mapper_url, method="GET", headers=self.restheaders, timeout=self.connection_timeout,
- validate_certs=self.validate_certs).read()))
- except HTTPError as e:
- if e.code == 404:
- return None
- else:
- self.module.fail_json(msg='Could not fetch mapper %s for identity provider %s in realm %s: %s'
- % (mid, alias, realm, str(e)))
- except Exception as e:
- self.module.fail_json(msg='Could not fetch mapper %s for identity provider %s in realm %s: %s'
- % (mid, alias, realm, str(e)))
-
- def create_identity_provider_mapper(self, mapper, alias, realm='master'):
- """ Create an identity provider mapper.
- :param mapper: IdentityProviderMapperRepresentation of the mapper to be created.
- :param alias: Alias of the identity provider.
- :param realm: Realm in which this identity provider resides, default "master".
- :return: HTTPResponse object on success
- """
- mappers_url = URL_IDENTITY_PROVIDER_MAPPERS.format(url=self.baseurl, realm=realm, alias=alias)
- try:
- return open_url(mappers_url, method='POST', headers=self.restheaders, timeout=self.connection_timeout,
- data=json.dumps(mapper), validate_certs=self.validate_certs)
- except Exception as e:
- self.module.fail_json(msg='Could not create identity provider mapper %s for idp %s in realm %s: %s'
- % (mapper['name'], alias, realm, str(e)))
-
- def update_identity_provider_mapper(self, mapper, alias, realm='master'):
- """ Update an existing identity provider.
- :param mapper: IdentityProviderMapperRepresentation of the mapper to be updated.
- :param alias: Alias of the identity provider.
- :param realm: Realm in which this identity provider resides, default "master".
- :return HTTPResponse object on success
- """
- mapper_url = URL_IDENTITY_PROVIDER_MAPPER.format(url=self.baseurl, realm=realm, alias=alias, id=mapper['id'])
- try:
- return open_url(mapper_url, method='PUT', headers=self.restheaders, timeout=self.connection_timeout,
- data=json.dumps(mapper), validate_certs=self.validate_certs)
- except Exception as e:
- self.module.fail_json(msg='Could not update mapper %s for identity provider %s in realm %s: %s'
- % (mapper['id'], alias, realm, str(e)))
-
- def delete_identity_provider_mapper(self, mid, alias, realm='master'):
- """ Delete an identity provider.
- :param mid: Unique ID of the mapper to delete.
- :param alias: Alias of the identity provider.
- :param realm: Realm in which this identity provider resides, default "master".
- """
- mapper_url = URL_IDENTITY_PROVIDER_MAPPER.format(url=self.baseurl, realm=realm, alias=alias, id=mid)
- try:
- return open_url(mapper_url, method='DELETE', headers=self.restheaders, timeout=self.connection_timeout,
- validate_certs=self.validate_certs)
- except Exception as e:
- self.module.fail_json(msg='Unable to delete mapper %s for identity provider %s in realm %s: %s'
- % (mid, alias, realm, str(e)))
-
- def get_components(self, filter=None, realm='master'):
- """ Fetch representations for components in a realm
- :param realm: realm to be queried
- :param filter: search filter
- :return: list of representations for components
- """
- comps_url = URL_COMPONENTS.format(url=self.baseurl, realm=realm)
- if filter is not None:
- comps_url += '?%s' % filter
-
- try:
- return json.loads(to_native(open_url(comps_url, method='GET', headers=self.restheaders, timeout=self.connection_timeout,
- validate_certs=self.validate_certs).read()))
- except ValueError as e:
- self.module.fail_json(msg='API returned incorrect JSON when trying to obtain list of components for realm %s: %s'
- % (realm, str(e)))
- except Exception as e:
- self.module.fail_json(msg='Could not obtain list of components for realm %s: %s'
- % (realm, str(e)))
-
- def get_component(self, cid, realm='master'):
- """ Fetch component representation from a realm using its cid.
- If the component does not exist, None is returned.
- :param cid: Unique ID of the component to fetch.
- :param realm: Realm in which the component resides; default 'master'.
- """
- comp_url = URL_COMPONENT.format(url=self.baseurl, realm=realm, id=cid)
- try:
- return json.loads(to_native(open_url(comp_url, method="GET", headers=self.restheaders, timeout=self.connection_timeout,
- validate_certs=self.validate_certs).read()))
- except HTTPError as e:
- if e.code == 404:
- return None
- else:
- self.module.fail_json(msg='Could not fetch component %s in realm %s: %s'
- % (cid, realm, str(e)))
- except Exception as e:
- self.module.fail_json(msg='Could not fetch component %s in realm %s: %s'
- % (cid, realm, str(e)))
-
- def create_component(self, comprep, realm='master'):
- """ Create an component.
- :param comprep: Component representation of the component to be created.
- :param realm: Realm in which this component resides, default "master".
- :return: Component representation of the created component
- """
- comps_url = URL_COMPONENTS.format(url=self.baseurl, realm=realm)
- try:
- resp = open_url(comps_url, method='POST', headers=self.restheaders, timeout=self.connection_timeout,
- data=json.dumps(comprep), validate_certs=self.validate_certs)
- comp_url = resp.getheader('Location')
- if comp_url is None:
- self.module.fail_json(msg='Could not create component in realm %s: %s'
- % (realm, 'unexpected response'))
- return json.loads(to_native(open_url(comp_url, method="GET", headers=self.restheaders, timeout=self.connection_timeout,
- validate_certs=self.validate_certs).read()))
- except Exception as e:
- self.module.fail_json(msg='Could not create component in realm %s: %s'
- % (realm, str(e)))
-
- def update_component(self, comprep, realm='master'):
- """ Update an existing component.
- :param comprep: Component representation of the component to be updated.
- :param realm: Realm in which this component resides, default "master".
- :return HTTPResponse object on success
- """
- cid = comprep.get('id')
- if cid is None:
- self.module.fail_json(msg='Cannot update component without id')
- comp_url = URL_COMPONENT.format(url=self.baseurl, realm=realm, id=cid)
- try:
- return open_url(comp_url, method='PUT', headers=self.restheaders, timeout=self.connection_timeout,
- data=json.dumps(comprep), validate_certs=self.validate_certs)
- except Exception as e:
- self.module.fail_json(msg='Could not update component %s in realm %s: %s'
- % (cid, realm, str(e)))
-
- def delete_component(self, cid, realm='master'):
- """ Delete an component.
- :param cid: Unique ID of the component.
- :param realm: Realm in which this component resides, default "master".
- """
- comp_url = URL_COMPONENT.format(url=self.baseurl, realm=realm, id=cid)
- try:
- return open_url(comp_url, method='DELETE', headers=self.restheaders, timeout=self.connection_timeout,
- validate_certs=self.validate_certs)
- except Exception as e:
- self.module.fail_json(msg='Unable to delete component %s in realm %s: %s'
- % (cid, realm, str(e)))
diff --git a/ansible_collections/community/general/plugins/module_utils/ilo_redfish_utils.py b/ansible_collections/community/general/plugins/module_utils/ilo_redfish_utils.py
deleted file mode 100644
index 04b08ae5..00000000
--- a/ansible_collections/community/general/plugins/module_utils/ilo_redfish_utils.py
+++ /dev/null
@@ -1,232 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright (c) 2021-2022 Hewlett Packard Enterprise, Inc. All rights reserved.
-# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils
-
-
-class iLORedfishUtils(RedfishUtils):
-
- def get_ilo_sessions(self):
- result = {}
- # listing all users has always been slower than other operations, why?
- session_list = []
- sessions_results = []
- # Get these entries, but does not fail if not found
- properties = ['Description', 'Id', 'Name', 'UserName']
-
- # Changed self.sessions_uri to Hardcoded string.
- response = self.get_request(
- self.root_uri + self.service_root + "SessionService/Sessions/")
- if not response['ret']:
- return response
- result['ret'] = True
- data = response['data']
-
- if 'Oem' in data:
- if data["Oem"]["Hpe"]["Links"]["MySession"]["@odata.id"]:
- current_session = data["Oem"]["Hpe"]["Links"]["MySession"]["@odata.id"]
-
- for sessions in data[u'Members']:
- # session_list[] are URIs
- session_list.append(sessions[u'@odata.id'])
- # for each session, get details
- for uri in session_list:
- session = {}
- if uri != current_session:
- response = self.get_request(self.root_uri + uri)
- if not response['ret']:
- return response
- data = response['data']
- for property in properties:
- if property in data:
- session[property] = data[property]
- sessions_results.append(session)
- result["msg"] = sessions_results
- result["ret"] = True
- return result
-
- def set_ntp_server(self, mgr_attributes):
- result = {}
- setkey = mgr_attributes['mgr_attr_name']
-
- nic_info = self.get_manager_ethernet_uri()
- ethuri = nic_info["nic_addr"]
-
- response = self.get_request(self.root_uri + ethuri)
- if not response['ret']:
- return response
- result['ret'] = True
- data = response['data']
- payload = {"DHCPv4": {
- "UseNTPServers": ""
- }}
-
- if data["DHCPv4"]["UseNTPServers"]:
- payload["DHCPv4"]["UseNTPServers"] = False
- res_dhv4 = self.patch_request(self.root_uri + ethuri, payload)
- if not res_dhv4['ret']:
- return res_dhv4
-
- payload = {"DHCPv6": {
- "UseNTPServers": ""
- }}
-
- if data["DHCPv6"]["UseNTPServers"]:
- payload["DHCPv6"]["UseNTPServers"] = False
- res_dhv6 = self.patch_request(self.root_uri + ethuri, payload)
- if not res_dhv6['ret']:
- return res_dhv6
-
- datetime_uri = self.manager_uri + "DateTime"
-
- response = self.get_request(self.root_uri + datetime_uri)
- if not response['ret']:
- return response
-
- data = response['data']
-
- ntp_list = data[setkey]
- if(len(ntp_list) == 2):
- ntp_list.pop(0)
-
- ntp_list.append(mgr_attributes['mgr_attr_value'])
-
- payload = {setkey: ntp_list}
-
- response1 = self.patch_request(self.root_uri + datetime_uri, payload)
- if not response1['ret']:
- return response1
-
- return {'ret': True, 'changed': True, 'msg': "Modified %s" % mgr_attributes['mgr_attr_name']}
-
- def set_time_zone(self, attr):
- key = attr['mgr_attr_name']
-
- uri = self.manager_uri + "DateTime/"
- response = self.get_request(self.root_uri + uri)
- if not response['ret']:
- return response
-
- data = response["data"]
-
- if key not in data:
- return {'ret': False, 'changed': False, 'msg': "Key %s not found" % key}
-
- timezones = data["TimeZoneList"]
- index = ""
- for tz in timezones:
- if attr['mgr_attr_value'] in tz["Name"]:
- index = tz["Index"]
- break
-
- payload = {key: {"Index": index}}
- response = self.patch_request(self.root_uri + uri, payload)
- if not response['ret']:
- return response
-
- return {'ret': True, 'changed': True, 'msg': "Modified %s" % attr['mgr_attr_name']}
-
- def set_dns_server(self, attr):
- key = attr['mgr_attr_name']
- nic_info = self.get_manager_ethernet_uri()
- uri = nic_info["nic_addr"]
-
- response = self.get_request(self.root_uri + uri)
- if not response['ret']:
- return response
-
- data = response['data']
-
- dns_list = data["Oem"]["Hpe"]["IPv4"][key]
-
- if len(dns_list) == 3:
- dns_list.pop(0)
-
- dns_list.append(attr['mgr_attr_value'])
-
- payload = {
- "Oem": {
- "Hpe": {
- "IPv4": {
- key: dns_list
- }
- }
- }
- }
-
- response = self.patch_request(self.root_uri + uri, payload)
- if not response['ret']:
- return response
-
- return {'ret': True, 'changed': True, 'msg': "Modified %s" % attr['mgr_attr_name']}
-
- def set_domain_name(self, attr):
- key = attr['mgr_attr_name']
-
- nic_info = self.get_manager_ethernet_uri()
- ethuri = nic_info["nic_addr"]
-
- response = self.get_request(self.root_uri + ethuri)
- if not response['ret']:
- return response
-
- data = response['data']
-
- payload = {"DHCPv4": {
- "UseDomainName": ""
- }}
-
- if data["DHCPv4"]["UseDomainName"]:
- payload["DHCPv4"]["UseDomainName"] = False
- res_dhv4 = self.patch_request(self.root_uri + ethuri, payload)
- if not res_dhv4['ret']:
- return res_dhv4
-
- payload = {"DHCPv6": {
- "UseDomainName": ""
- }}
-
- if data["DHCPv6"]["UseDomainName"]:
- payload["DHCPv6"]["UseDomainName"] = False
- res_dhv6 = self.patch_request(self.root_uri + ethuri, payload)
- if not res_dhv6['ret']:
- return res_dhv6
-
- domain_name = attr['mgr_attr_value']
-
- payload = {"Oem": {
- "Hpe": {
- key: domain_name
- }
- }}
-
- response = self.patch_request(self.root_uri + ethuri, payload)
- if not response['ret']:
- return response
- return {'ret': True, 'changed': True, 'msg': "Modified %s" % attr['mgr_attr_name']}
-
- def set_wins_registration(self, mgrattr):
- Key = mgrattr['mgr_attr_name']
-
- nic_info = self.get_manager_ethernet_uri()
- ethuri = nic_info["nic_addr"]
-
- payload = {
- "Oem": {
- "Hpe": {
- "IPv4": {
- Key: False
- }
- }
- }
- }
-
- response = self.patch_request(self.root_uri + ethuri, payload)
- if not response['ret']:
- return response
- return {'ret': True, 'changed': True, 'msg': "Modified %s" % mgrattr['mgr_attr_name']}
diff --git a/ansible_collections/community/general/plugins/module_utils/influxdb.py b/ansible_collections/community/general/plugins/module_utils/influxdb.py
deleted file mode 100644
index c171131a..00000000
--- a/ansible_collections/community/general/plugins/module_utils/influxdb.py
+++ /dev/null
@@ -1,93 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2017, Ansible Project
-# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-import traceback
-
-from ansible.module_utils.basic import missing_required_lib
-
-from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
-
-REQUESTS_IMP_ERR = None
-try:
- import requests.exceptions
- HAS_REQUESTS = True
-except ImportError:
- REQUESTS_IMP_ERR = traceback.format_exc()
- HAS_REQUESTS = False
-
-INFLUXDB_IMP_ERR = None
-try:
- from influxdb import InfluxDBClient
- from influxdb import __version__ as influxdb_version
- from influxdb import exceptions
- HAS_INFLUXDB = True
-except ImportError:
- INFLUXDB_IMP_ERR = traceback.format_exc()
- HAS_INFLUXDB = False
-
-
-class InfluxDb():
- def __init__(self, module):
- self.module = module
- self.params = self.module.params
- self.check_lib()
- self.hostname = self.params['hostname']
- self.port = self.params['port']
- self.path = self.params['path']
- self.username = self.params['username']
- self.password = self.params['password']
- self.database_name = self.params.get('database_name')
-
- def check_lib(self):
- if not HAS_REQUESTS:
- self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
-
- if not HAS_INFLUXDB:
- self.module.fail_json(msg=missing_required_lib('influxdb'), exception=INFLUXDB_IMP_ERR)
-
- @staticmethod
- def influxdb_argument_spec():
- return dict(
- hostname=dict(type='str', default='localhost'),
- port=dict(type='int', default=8086),
- path=dict(type='str', default=''),
- username=dict(type='str', default='root', aliases=['login_username']),
- password=dict(type='str', default='root', no_log=True, aliases=['login_password']),
- ssl=dict(type='bool', default=False),
- validate_certs=dict(type='bool', default=True),
- timeout=dict(type='int'),
- retries=dict(type='int', default=3),
- proxies=dict(type='dict', default={}),
- use_udp=dict(type='bool', default=False),
- udp_port=dict(type='int', default=4444),
- )
-
- def connect_to_influxdb(self):
- args = dict(
- host=self.hostname,
- port=self.port,
- username=self.username,
- password=self.password,
- database=self.database_name,
- ssl=self.params['ssl'],
- verify_ssl=self.params['validate_certs'],
- timeout=self.params['timeout'],
- use_udp=self.params['use_udp'],
- udp_port=self.params['udp_port'],
- proxies=self.params['proxies'],
- )
- influxdb_api_version = LooseVersion(influxdb_version)
- if influxdb_api_version >= LooseVersion('4.1.0'):
- # retries option is added in version 4.1.0
- args.update(retries=self.params['retries'])
-
- if influxdb_api_version >= LooseVersion('5.1.0'):
- # path argument is added in version 5.1.0
- args.update(path=self.path)
-
- return InfluxDBClient(**args)
diff --git a/ansible_collections/community/general/plugins/module_utils/ipa.py b/ansible_collections/community/general/plugins/module_utils/ipa.py
deleted file mode 100644
index 3d8c2580..00000000
--- a/ansible_collections/community/general/plugins/module_utils/ipa.py
+++ /dev/null
@@ -1,214 +0,0 @@
-# -*- coding: utf-8 -*-
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by Ansible
-# still belong to the author of the module, and may assign their own license
-# to the complete work.
-#
-# Copyright (c) 2016 Thomas Krahn (@Nosmoht)
-#
-# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-import json
-import os
-import socket
-import uuid
-
-import re
-from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
-from ansible.module_utils.six import PY3
-from ansible.module_utils.six.moves.urllib.parse import quote
-from ansible.module_utils.urls import fetch_url, HAS_GSSAPI
-from ansible.module_utils.basic import env_fallback, AnsibleFallbackNotFound
-
-
-def _env_then_dns_fallback(*args, **kwargs):
- ''' Load value from environment or DNS in that order'''
- try:
- result = env_fallback(*args, **kwargs)
- if result == '':
- raise AnsibleFallbackNotFound
- return result
- except AnsibleFallbackNotFound:
- # If no host was given, we try to guess it from IPA.
- # The ipa-ca entry is a standard entry that IPA will have set for
- # the CA.
- try:
- return socket.gethostbyaddr(socket.gethostbyname('ipa-ca'))[0]
- except Exception:
- raise AnsibleFallbackNotFound
-
-
-class IPAClient(object):
- def __init__(self, module, host, port, protocol):
- self.host = host
- self.port = port
- self.protocol = protocol
- self.module = module
- self.headers = None
- self.timeout = module.params.get('ipa_timeout')
- self.use_gssapi = False
-
- def get_base_url(self):
- return '%s://%s/ipa' % (self.protocol, self.host)
-
- def get_json_url(self):
- return '%s/session/json' % self.get_base_url()
-
- def login(self, username, password):
- if 'KRB5CCNAME' in os.environ and HAS_GSSAPI:
- self.use_gssapi = True
- elif 'KRB5_CLIENT_KTNAME' in os.environ and HAS_GSSAPI:
- ccache = "MEMORY:" + str(uuid.uuid4())
- os.environ['KRB5CCNAME'] = ccache
- self.use_gssapi = True
- else:
- if not password:
- if 'KRB5CCNAME' in os.environ or 'KRB5_CLIENT_KTNAME' in os.environ:
- self.module.warn("In order to use GSSAPI, you need to install 'urllib_gssapi'")
- self._fail('login', 'Password is required if not using '
- 'GSSAPI. To use GSSAPI, please set the '
- 'KRB5_CLIENT_KTNAME or KRB5CCNAME (or both) '
- ' environment variables.')
- url = '%s/session/login_password' % self.get_base_url()
- data = 'user=%s&password=%s' % (quote(username, safe=''), quote(password, safe=''))
- headers = {'referer': self.get_base_url(),
- 'Content-Type': 'application/x-www-form-urlencoded',
- 'Accept': 'text/plain'}
- try:
- resp, info = fetch_url(module=self.module, url=url, data=to_bytes(data), headers=headers, timeout=self.timeout)
- status_code = info['status']
- if status_code not in [200, 201, 204]:
- self._fail('login', info['msg'])
-
- self.headers = {'Cookie': info.get('set-cookie')}
- except Exception as e:
- self._fail('login', to_native(e))
- if not self.headers:
- self.headers = dict()
- self.headers.update({
- 'referer': self.get_base_url(),
- 'Content-Type': 'application/json',
- 'Accept': 'application/json'})
-
- def _fail(self, msg, e):
- if 'message' in e:
- err_string = e.get('message')
- else:
- err_string = e
- self.module.fail_json(msg='%s: %s' % (msg, err_string))
-
- def get_ipa_version(self):
- response = self.ping()['summary']
- ipa_ver_regex = re.compile(r'IPA server version (\d\.\d\.\d).*')
- version_match = ipa_ver_regex.match(response)
- ipa_version = None
- if version_match:
- ipa_version = version_match.groups()[0]
- return ipa_version
-
- def ping(self):
- return self._post_json(method='ping', name=None)
-
- def _post_json(self, method, name, item=None):
- if item is None:
- item = {}
- url = '%s/session/json' % self.get_base_url()
- data = dict(method=method)
-
- # TODO: We should probably handle this a little better.
- if method in ('ping', 'config_show', 'otpconfig_show'):
- data['params'] = [[], {}]
- elif method in ('config_mod', 'otpconfig_mod'):
- data['params'] = [[], item]
- else:
- data['params'] = [[name], item]
-
- try:
- resp, info = fetch_url(module=self.module, url=url, data=to_bytes(json.dumps(data)),
- headers=self.headers, timeout=self.timeout, use_gssapi=self.use_gssapi)
- status_code = info['status']
- if status_code not in [200, 201, 204]:
- self._fail(method, info['msg'])
- except Exception as e:
- self._fail('post %s' % method, to_native(e))
-
- if PY3:
- charset = resp.headers.get_content_charset('latin-1')
- else:
- response_charset = resp.headers.getparam('charset')
- if response_charset:
- charset = response_charset
- else:
- charset = 'latin-1'
- resp = json.loads(to_text(resp.read(), encoding=charset))
- err = resp.get('error')
- if err is not None:
- self._fail('response %s' % method, err)
-
- if 'result' in resp:
- result = resp.get('result')
- if 'result' in result:
- result = result.get('result')
- if isinstance(result, list):
- if len(result) > 0:
- return result[0]
- else:
- return {}
- return result
- return None
-
- def get_diff(self, ipa_data, module_data):
- result = []
- for key in module_data.keys():
- mod_value = module_data.get(key, None)
- if isinstance(mod_value, list):
- default = []
- else:
- default = None
- ipa_value = ipa_data.get(key, default)
- if isinstance(ipa_value, list) and not isinstance(mod_value, list):
- mod_value = [mod_value]
- if isinstance(ipa_value, list) and isinstance(mod_value, list):
- mod_value = sorted(mod_value)
- ipa_value = sorted(ipa_value)
- if mod_value != ipa_value:
- result.append(key)
- return result
-
- def modify_if_diff(self, name, ipa_list, module_list, add_method, remove_method, item=None, append=None):
- changed = False
- diff = list(set(ipa_list) - set(module_list))
- if append is not True and len(diff) > 0:
- changed = True
- if not self.module.check_mode:
- if item:
- remove_method(name=name, item={item: diff})
- else:
- remove_method(name=name, item=diff)
-
- diff = list(set(module_list) - set(ipa_list))
- if len(diff) > 0:
- changed = True
- if not self.module.check_mode:
- if item:
- add_method(name=name, item={item: diff})
- else:
- add_method(name=name, item=diff)
-
- return changed
-
-
-def ipa_argument_spec():
- return dict(
- ipa_prot=dict(type='str', default='https', choices=['http', 'https'], fallback=(env_fallback, ['IPA_PROT'])),
- ipa_host=dict(type='str', default='ipa.example.com', fallback=(_env_then_dns_fallback, ['IPA_HOST'])),
- ipa_port=dict(type='int', default=443, fallback=(env_fallback, ['IPA_PORT'])),
- ipa_user=dict(type='str', default='admin', fallback=(env_fallback, ['IPA_USER'])),
- ipa_pass=dict(type='str', no_log=True, fallback=(env_fallback, ['IPA_PASS'])),
- ipa_timeout=dict(type='int', default=10, fallback=(env_fallback, ['IPA_TIMEOUT'])),
- validate_certs=dict(type='bool', default=True),
- )
diff --git a/ansible_collections/community/general/plugins/module_utils/ldap.py b/ansible_collections/community/general/plugins/module_utils/ldap.py
deleted file mode 100644
index 30dbaf76..00000000
--- a/ansible_collections/community/general/plugins/module_utils/ldap.py
+++ /dev/null
@@ -1,92 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2016, Peter Sagerson
-# Copyright: (c) 2016, Jiri Tyr
-# Copyright: (c) 2017-2018 Keller Fuchs (@KellerFuchs)
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-import traceback
-from ansible.module_utils.common.text.converters import to_native
-
-try:
- import ldap
- import ldap.sasl
-
- HAS_LDAP = True
-
- SASCL_CLASS = {
- 'gssapi': ldap.sasl.gssapi,
- 'external': ldap.sasl.external,
- }
-except ImportError:
- HAS_LDAP = False
-
-
-def gen_specs(**specs):
- specs.update({
- 'bind_dn': dict(),
- 'bind_pw': dict(default='', no_log=True),
- 'dn': dict(required=True),
- 'referrals_chasing': dict(type='str', default='anonymous', choices=['disabled', 'anonymous']),
- 'server_uri': dict(default='ldapi:///'),
- 'start_tls': dict(default=False, type='bool'),
- 'validate_certs': dict(default=True, type='bool'),
- 'sasl_class': dict(choices=['external', 'gssapi'], default='external', type='str'),
- })
-
- return specs
-
-
-class LdapGeneric(object):
- def __init__(self, module):
- # Shortcuts
- self.module = module
- self.bind_dn = self.module.params['bind_dn']
- self.bind_pw = self.module.params['bind_pw']
- self.dn = self.module.params['dn']
- self.referrals_chasing = self.module.params['referrals_chasing']
- self.server_uri = self.module.params['server_uri']
- self.start_tls = self.module.params['start_tls']
- self.verify_cert = self.module.params['validate_certs']
- self.sasl_class = self.module.params['sasl_class']
-
- # Establish connection
- self.connection = self._connect_to_ldap()
-
- def fail(self, msg, exn):
- self.module.fail_json(
- msg=msg,
- details=to_native(exn),
- exception=traceback.format_exc()
- )
-
- def _connect_to_ldap(self):
- if not self.verify_cert:
- ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER)
-
- connection = ldap.initialize(self.server_uri)
-
- if self.referrals_chasing == 'disabled':
- # Switch off chasing of referrals (https://github.com/ansible-collections/community.general/issues/1067)
- connection.set_option(ldap.OPT_REFERRALS, 0)
-
- if self.start_tls:
- try:
- connection.start_tls_s()
- except ldap.LDAPError as e:
- self.fail("Cannot start TLS.", e)
-
- try:
- if self.bind_dn is not None:
- connection.simple_bind_s(self.bind_dn, self.bind_pw)
- else:
- klass = SASCL_CLASS.get(self.sasl_class, ldap.sasl.external)
- connection.sasl_interactive_bind_s('', klass())
- except ldap.LDAPError as e:
- self.fail("Cannot bind to the server.", e)
-
- return connection
diff --git a/ansible_collections/community/general/plugins/module_utils/linode.py b/ansible_collections/community/general/plugins/module_utils/linode.py
deleted file mode 100644
index 9d7c37e6..00000000
--- a/ansible_collections/community/general/plugins/module_utils/linode.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# -*- coding: utf-8 -*-
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by Ansible
-# still belong to the author of the module, and may assign their own license
-# to the complete work.
-#
-# Copyright (c), Luke Murphy @decentral1se
-#
-# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-def get_user_agent(module):
- """Retrieve a user-agent to send with LinodeClient requests."""
- try:
- from ansible.module_utils.ansible_release import __version__ as ansible_version
- except ImportError:
- ansible_version = 'unknown'
- return 'Ansible-%s/%s' % (module, ansible_version)
diff --git a/ansible_collections/community/general/plugins/module_utils/lxd.py b/ansible_collections/community/general/plugins/module_utils/lxd.py
deleted file mode 100644
index e25caf11..00000000
--- a/ansible_collections/community/general/plugins/module_utils/lxd.py
+++ /dev/null
@@ -1,132 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# (c) 2016, Hiroaki Nakamura
-#
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by Ansible
-# still belong to the author of the module, and may assign their own license
-# to the complete work.
-#
-# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-import socket
-import ssl
-
-from ansible.module_utils.urls import generic_urlparse
-from ansible.module_utils.six.moves.urllib.parse import urlparse
-from ansible.module_utils.six.moves import http_client
-from ansible.module_utils.common.text.converters import to_text
-
-# httplib/http.client connection using unix domain socket
-HTTPConnection = http_client.HTTPConnection
-HTTPSConnection = http_client.HTTPSConnection
-
-import json
-
-
-class UnixHTTPConnection(HTTPConnection):
- def __init__(self, path):
- HTTPConnection.__init__(self, 'localhost')
- self.path = path
-
- def connect(self):
- sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
- sock.connect(self.path)
- self.sock = sock
-
-
-class LXDClientException(Exception):
- def __init__(self, msg, **kwargs):
- self.msg = msg
- self.kwargs = kwargs
-
-
-class LXDClient(object):
- def __init__(self, url, key_file=None, cert_file=None, debug=False):
- """LXD Client.
-
- :param url: The URL of the LXD server. (e.g. unix:/var/lib/lxd/unix.socket or https://127.0.0.1)
- :type url: ``str``
- :param key_file: The path of the client certificate key file.
- :type key_file: ``str``
- :param cert_file: The path of the client certificate file.
- :type cert_file: ``str``
- :param debug: The debug flag. The request and response are stored in logs when debug is true.
- :type debug: ``bool``
- """
- self.url = url
- self.debug = debug
- self.logs = []
- if url.startswith('https:'):
- self.cert_file = cert_file
- self.key_file = key_file
- parts = generic_urlparse(urlparse(self.url))
- ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
- ctx.load_cert_chain(cert_file, keyfile=key_file)
- self.connection = HTTPSConnection(parts.get('netloc'), context=ctx)
- elif url.startswith('unix:'):
- unix_socket_path = url[len('unix:'):]
- self.connection = UnixHTTPConnection(unix_socket_path)
- else:
- raise LXDClientException('URL scheme must be unix: or https:')
-
- def do(self, method, url, body_json=None, ok_error_codes=None, timeout=None, wait_for_container=None):
- resp_json = self._send_request(method, url, body_json=body_json, ok_error_codes=ok_error_codes, timeout=timeout)
- if resp_json['type'] == 'async':
- url = '{0}/wait'.format(resp_json['operation'])
- resp_json = self._send_request('GET', url)
- if wait_for_container:
- while resp_json['metadata']['status'] == 'Running':
- resp_json = self._send_request('GET', url)
- if resp_json['metadata']['status'] != 'Success':
- self._raise_err_from_json(resp_json)
- return resp_json
-
- def authenticate(self, trust_password):
- body_json = {'type': 'client', 'password': trust_password}
- return self._send_request('POST', '/1.0/certificates', body_json=body_json)
-
- def _send_request(self, method, url, body_json=None, ok_error_codes=None, timeout=None):
- try:
- body = json.dumps(body_json)
- self.connection.request(method, url, body=body)
- resp = self.connection.getresponse()
- resp_data = resp.read()
- resp_data = to_text(resp_data, errors='surrogate_or_strict')
- resp_json = json.loads(resp_data)
- self.logs.append({
- 'type': 'sent request',
- 'request': {'method': method, 'url': url, 'json': body_json, 'timeout': timeout},
- 'response': {'json': resp_json}
- })
- resp_type = resp_json.get('type', None)
- if resp_type == 'error':
- if ok_error_codes is not None and resp_json['error_code'] in ok_error_codes:
- return resp_json
- if resp_json['error'] == "Certificate already in trust store":
- return resp_json
- self._raise_err_from_json(resp_json)
- return resp_json
- except socket.error as e:
- raise LXDClientException('cannot connect to the LXD server', err=e)
-
- def _raise_err_from_json(self, resp_json):
- err_params = {}
- if self.debug:
- err_params['logs'] = self.logs
- raise LXDClientException(self._get_err_from_resp_json(resp_json), **err_params)
-
- @staticmethod
- def _get_err_from_resp_json(resp_json):
- err = None
- metadata = resp_json.get('metadata', None)
- if metadata is not None:
- err = metadata.get('err', None)
- if err is None:
- err = resp_json.get('error', None)
- return err
diff --git a/ansible_collections/community/general/plugins/module_utils/manageiq.py b/ansible_collections/community/general/plugins/module_utils/manageiq.py
deleted file mode 100644
index 98e5590c..00000000
--- a/ansible_collections/community/general/plugins/module_utils/manageiq.py
+++ /dev/null
@@ -1,157 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright (c) 2017, Daniel Korn
-#
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by Ansible
-# still belong to the author of the module, and may assign their own license
-# to the complete work.
-#
-# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-import os
-import traceback
-
-from ansible.module_utils.basic import missing_required_lib
-
-CLIENT_IMP_ERR = None
-try:
- from manageiq_client.api import ManageIQClient
- HAS_CLIENT = True
-except ImportError:
- CLIENT_IMP_ERR = traceback.format_exc()
- HAS_CLIENT = False
-
-
-def manageiq_argument_spec():
- options = dict(
- url=dict(default=os.environ.get('MIQ_URL', None)),
- username=dict(default=os.environ.get('MIQ_USERNAME', None)),
- password=dict(default=os.environ.get('MIQ_PASSWORD', None), no_log=True),
- token=dict(default=os.environ.get('MIQ_TOKEN', None), no_log=True),
- validate_certs=dict(default=True, type='bool', aliases=['verify_ssl']),
- ca_cert=dict(required=False, default=None, aliases=['ca_bundle_path']),
- )
-
- return dict(
- manageiq_connection=dict(type='dict',
- apply_defaults=True,
- options=options),
- )
-
-
-def check_client(module):
- if not HAS_CLIENT:
- module.fail_json(msg=missing_required_lib('manageiq-client'), exception=CLIENT_IMP_ERR)
-
-
-def validate_connection_params(module):
- params = module.params['manageiq_connection']
- error_str = "missing required argument: manageiq_connection[{}]"
- url = params['url']
- token = params['token']
- username = params['username']
- password = params['password']
-
- if (url and username and password) or (url and token):
- return params
- for arg in ['url', 'username', 'password']:
- if params[arg] in (None, ''):
- module.fail_json(msg=error_str.format(arg))
-
-
-def manageiq_entities():
- return {
- 'provider': 'providers', 'host': 'hosts', 'vm': 'vms',
- 'category': 'categories', 'cluster': 'clusters', 'data store': 'data_stores',
- 'group': 'groups', 'resource pool': 'resource_pools', 'service': 'services',
- 'service template': 'service_templates', 'template': 'templates',
- 'tenant': 'tenants', 'user': 'users', 'blueprint': 'blueprints'
- }
-
-
-class ManageIQ(object):
- """
- class encapsulating ManageIQ API client.
- """
-
- def __init__(self, module):
- # handle import errors
- check_client(module)
-
- params = validate_connection_params(module)
-
- url = params['url']
- username = params['username']
- password = params['password']
- token = params['token']
- verify_ssl = params['validate_certs']
- ca_bundle_path = params['ca_cert']
-
- self._module = module
- self._api_url = url + '/api'
- self._auth = dict(user=username, password=password, token=token)
- try:
- self._client = ManageIQClient(self._api_url, self._auth, verify_ssl=verify_ssl, ca_bundle_path=ca_bundle_path)
- except Exception as e:
- self.module.fail_json(msg="failed to open connection (%s): %s" % (url, str(e)))
-
- @property
- def module(self):
- """ Ansible module module
-
- Returns:
- the ansible module
- """
- return self._module
-
- @property
- def api_url(self):
- """ Base ManageIQ API
-
- Returns:
- the base ManageIQ API
- """
- return self._api_url
-
- @property
- def client(self):
- """ ManageIQ client
-
- Returns:
- the ManageIQ client
- """
- return self._client
-
- def find_collection_resource_by(self, collection_name, **params):
- """ Searches the collection resource by the collection name and the param passed.
-
- Returns:
- the resource as an object if it exists in manageiq, None otherwise.
- """
- try:
- entity = self.client.collections.__getattribute__(collection_name).get(**params)
- except ValueError:
- return None
- except Exception as e:
- self.module.fail_json(msg="failed to find resource {error}".format(error=e))
- return vars(entity)
-
- def find_collection_resource_or_fail(self, collection_name, **params):
- """ Searches the collection resource by the collection name and the param passed.
-
- Returns:
- the resource as an object if it exists in manageiq, Fail otherwise.
- """
- resource = self.find_collection_resource_by(collection_name, **params)
- if resource:
- return resource
- else:
- msg = "{collection_name} where {params} does not exist in manageiq".format(
- collection_name=collection_name, params=str(params))
- self.module.fail_json(msg=msg)
diff --git a/ansible_collections/community/general/plugins/module_utils/mh/base.py b/ansible_collections/community/general/plugins/module_utils/mh/base.py
deleted file mode 100644
index 90c228b3..00000000
--- a/ansible_collections/community/general/plugins/module_utils/mh/base.py
+++ /dev/null
@@ -1,69 +0,0 @@
-# -*- coding: utf-8 -*-
-# (c) 2020, Alexei Znamensky
-# Copyright: (c) 2020, Ansible Project
-# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.mh.exceptions import ModuleHelperException as _MHE
-from ansible_collections.community.general.plugins.module_utils.mh.deco import module_fails_on_exception
-
-
-class ModuleHelperBase(object):
- module = None
- ModuleHelperException = _MHE
-
- def __init__(self, module=None):
- self._changed = False
-
- if module:
- self.module = module
-
- if not isinstance(self.module, AnsibleModule):
- self.module = AnsibleModule(**self.module)
-
- def __init_module__(self):
- pass
-
- def __run__(self):
- raise NotImplementedError()
-
- def __quit_module__(self):
- pass
-
- def __changed__(self):
- raise NotImplementedError()
-
- @property
- def changed(self):
- try:
- return self.__changed__()
- except NotImplementedError:
- return self._changed
-
- @changed.setter
- def changed(self, value):
- self._changed = value
-
- def has_changed(self):
- raise NotImplementedError()
-
- @property
- def output(self):
- raise NotImplementedError()
-
- @module_fails_on_exception
- def run(self):
- self.__init_module__()
- self.__run__()
- self.__quit_module__()
- output = self.output
- if 'failed' not in output:
- output['failed'] = False
- self.module.exit_json(changed=self.has_changed(), **output)
-
- @classmethod
- def execute(cls, module=None):
- cls(module).run()
diff --git a/ansible_collections/community/general/plugins/module_utils/mh/exceptions.py b/ansible_collections/community/general/plugins/module_utils/mh/exceptions.py
deleted file mode 100644
index 558dcca0..00000000
--- a/ansible_collections/community/general/plugins/module_utils/mh/exceptions.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# -*- coding: utf-8 -*-
-# (c) 2020, Alexei Znamensky
-# Copyright: (c) 2020, Ansible Project
-# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-class ModuleHelperException(Exception):
- @staticmethod
- def _get_remove(key, kwargs):
- if key in kwargs:
- result = kwargs[key]
- del kwargs[key]
- return result
- return None
-
- def __init__(self, *args, **kwargs):
- self.msg = self._get_remove('msg', kwargs) or "Module failed with exception: {0}".format(self)
- self.update_output = self._get_remove('update_output', kwargs) or {}
- super(ModuleHelperException, self).__init__(*args)
diff --git a/ansible_collections/community/general/plugins/module_utils/mh/mixins/deps.py b/ansible_collections/community/general/plugins/module_utils/mh/mixins/deps.py
deleted file mode 100644
index 1c6c9ae4..00000000
--- a/ansible_collections/community/general/plugins/module_utils/mh/mixins/deps.py
+++ /dev/null
@@ -1,58 +0,0 @@
-# -*- coding: utf-8 -*-
-# (c) 2020, Alexei Znamensky
-# Copyright: (c) 2020, Ansible Project
-# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-import traceback
-
-from ansible_collections.community.general.plugins.module_utils.mh.base import ModuleHelperBase
-from ansible_collections.community.general.plugins.module_utils.mh.deco import module_fails_on_exception
-
-
-class DependencyCtxMgr(object):
- def __init__(self, name, msg=None):
- self.name = name
- self.msg = msg
- self.has_it = False
- self.exc_type = None
- self.exc_val = None
- self.exc_tb = None
-
- def __enter__(self):
- return self
-
- def __exit__(self, exc_type, exc_val, exc_tb):
- self.has_it = exc_type is None
- self.exc_type = exc_type
- self.exc_val = exc_val
- self.exc_tb = exc_tb
- return not self.has_it
-
- @property
- def text(self):
- return self.msg or str(self.exc_val)
-
-
-class DependencyMixin(ModuleHelperBase):
- _dependencies = []
-
- @classmethod
- def dependency(cls, name, msg):
- cls._dependencies.append(DependencyCtxMgr(name, msg))
- return cls._dependencies[-1]
-
- def fail_on_missing_deps(self):
- for d in self._dependencies:
- if not d.has_it:
- self.module.fail_json(changed=False,
- exception="\n".join(traceback.format_exception(d.exc_type, d.exc_val, d.exc_tb)),
- msg=d.text,
- **self.output)
-
- @module_fails_on_exception
- def run(self):
- self.fail_on_missing_deps()
- super(DependencyMixin, self).run()
diff --git a/ansible_collections/community/general/plugins/module_utils/mh/module_helper.py b/ansible_collections/community/general/plugins/module_utils/mh/module_helper.py
deleted file mode 100644
index 65842fd7..00000000
--- a/ansible_collections/community/general/plugins/module_utils/mh/module_helper.py
+++ /dev/null
@@ -1,89 +0,0 @@
-# -*- coding: utf-8 -*-
-# (c) 2020, Alexei Znamensky
-# Copyright: (c) 2020, Ansible Project
-# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-from ansible.module_utils.common.dict_transformations import dict_merge
-
-from ansible_collections.community.general.plugins.module_utils.mh.base import ModuleHelperBase, AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.mh.mixins.cmd import CmdMixin
-from ansible_collections.community.general.plugins.module_utils.mh.mixins.state import StateMixin
-from ansible_collections.community.general.plugins.module_utils.mh.mixins.deps import DependencyMixin
-from ansible_collections.community.general.plugins.module_utils.mh.mixins.vars import VarsMixin, VarDict as _VD
-from ansible_collections.community.general.plugins.module_utils.mh.mixins.deprecate_attrs import DeprecateAttrsMixin
-
-
-class ModuleHelper(DeprecateAttrsMixin, VarsMixin, DependencyMixin, ModuleHelperBase):
- _output_conflict_list = ('msg', 'exception', 'output', 'vars', 'changed')
- facts_name = None
- output_params = ()
- diff_params = ()
- change_params = ()
- facts_params = ()
-
- VarDict = _VD # for backward compatibility, will be deprecated at some point
-
- def __init__(self, module=None):
- super(ModuleHelper, self).__init__(module)
- for name, value in self.module.params.items():
- self.vars.set(
- name, value,
- diff=name in self.diff_params,
- output=name in self.output_params,
- change=None if not self.change_params else name in self.change_params,
- fact=name in self.facts_params,
- )
-
- self._deprecate_attr(
- attr="VarDict",
- msg="ModuleHelper.VarDict attribute is deprecated, use VarDict from "
- "the ansible_collections.community.general.plugins.module_utils.mh.mixins.vars module instead",
- version="6.0.0",
- collection_name="community.general",
- target=ModuleHelper,
- module=self.module)
-
- def update_output(self, **kwargs):
- self.update_vars(meta={"output": True}, **kwargs)
-
- def update_facts(self, **kwargs):
- self.update_vars(meta={"fact": True}, **kwargs)
-
- def _vars_changed(self):
- return any(self.vars.has_changed(v) for v in self.vars.change_vars())
-
- def has_changed(self):
- return self.changed or self._vars_changed()
-
- @property
- def output(self):
- result = dict(self.vars.output())
- if self.facts_name:
- facts = self.vars.facts()
- if facts is not None:
- result['ansible_facts'] = {self.facts_name: facts}
- if self.module._diff:
- diff = result.get('diff', {})
- vars_diff = self.vars.diff() or {}
- result['diff'] = dict_merge(dict(diff), vars_diff)
-
- for varname in result:
- if varname in self._output_conflict_list:
- result["_" + varname] = result[varname]
- del result[varname]
- return result
-
-
-class StateModuleHelper(StateMixin, ModuleHelper):
- pass
-
-
-class CmdModuleHelper(CmdMixin, ModuleHelper):
- pass
-
-
-class CmdStateModuleHelper(CmdMixin, StateMixin, ModuleHelper):
- pass
diff --git a/ansible_collections/community/general/plugins/module_utils/module_helper.py b/ansible_collections/community/general/plugins/module_utils/module_helper.py
deleted file mode 100644
index a6b35bdd..00000000
--- a/ansible_collections/community/general/plugins/module_utils/module_helper.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# -*- coding: utf-8 -*-
-# (c) 2020, Alexei Znamensky
-# Copyright: (c) 2020, Ansible Project
-# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-from ansible_collections.community.general.plugins.module_utils.mh.module_helper import (
- ModuleHelper, StateModuleHelper, CmdModuleHelper, CmdStateModuleHelper, AnsibleModule
-)
-from ansible_collections.community.general.plugins.module_utils.mh.mixins.cmd import CmdMixin, ArgFormat
-from ansible_collections.community.general.plugins.module_utils.mh.mixins.state import StateMixin
-from ansible_collections.community.general.plugins.module_utils.mh.mixins.deps import DependencyCtxMgr
-from ansible_collections.community.general.plugins.module_utils.mh.exceptions import ModuleHelperException
-from ansible_collections.community.general.plugins.module_utils.mh.deco import cause_changes, module_fails_on_exception
-from ansible_collections.community.general.plugins.module_utils.mh.mixins.vars import VarMeta, VarDict
diff --git a/ansible_collections/community/general/plugins/module_utils/oneview.py b/ansible_collections/community/general/plugins/module_utils/oneview.py
deleted file mode 100644
index 6d786b0b..00000000
--- a/ansible_collections/community/general/plugins/module_utils/oneview.py
+++ /dev/null
@@ -1,486 +0,0 @@
-# -*- coding: utf-8 -*-
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by Ansible
-# still belong to the author of the module, and may assign their own license
-# to the complete work.
-#
-# Copyright (2016-2017) Hewlett Packard Enterprise Development LP
-#
-# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-import abc
-import collections
-import json
-import os
-import traceback
-
-HPE_ONEVIEW_IMP_ERR = None
-try:
- from hpOneView.oneview_client import OneViewClient
- HAS_HPE_ONEVIEW = True
-except ImportError:
- HPE_ONEVIEW_IMP_ERR = traceback.format_exc()
- HAS_HPE_ONEVIEW = False
-
-from ansible.module_utils import six
-from ansible.module_utils.basic import AnsibleModule, missing_required_lib
-from ansible.module_utils.common.text.converters import to_native
-from ansible.module_utils.common._collections_compat import Mapping
-
-
-def transform_list_to_dict(list_):
- """
- Transforms a list into a dictionary, putting values as keys.
-
- :arg list list_: List of values
- :return: dict: dictionary built
- """
-
- ret = {}
-
- if not list_:
- return ret
-
- for value in list_:
- if isinstance(value, Mapping):
- ret.update(value)
- else:
- ret[to_native(value, errors='surrogate_or_strict')] = True
-
- return ret
-
-
-def merge_list_by_key(original_list, updated_list, key, ignore_when_null=None):
- """
- Merge two lists by the key. It basically:
-
- 1. Adds the items that are present on updated_list and are absent on original_list.
-
- 2. Removes items that are absent on updated_list and are present on original_list.
-
- 3. For all items that are in both lists, overwrites the values from the original item by the updated item.
-
- :arg list original_list: original list.
- :arg list updated_list: list with changes.
- :arg str key: unique identifier.
- :arg list ignore_when_null: list with the keys from the updated items that should be ignored in the merge,
- if its values are null.
- :return: list: Lists merged.
- """
- ignore_when_null = [] if ignore_when_null is None else ignore_when_null
-
- if not original_list:
- return updated_list
-
- items_map = collections.OrderedDict([(i[key], i.copy()) for i in original_list])
-
- merged_items = collections.OrderedDict()
-
- for item in updated_list:
- item_key = item[key]
- if item_key in items_map:
- for ignored_key in ignore_when_null:
- if ignored_key in item and item[ignored_key] is None:
- item.pop(ignored_key)
- merged_items[item_key] = items_map[item_key]
- merged_items[item_key].update(item)
- else:
- merged_items[item_key] = item
-
- return list(merged_items.values())
-
-
-def _str_sorted(obj):
- if isinstance(obj, Mapping):
- return json.dumps(obj, sort_keys=True)
- else:
- return str(obj)
-
-
-def _standardize_value(value):
- """
- Convert value to string to enhance the comparison.
-
- :arg value: Any object type.
-
- :return: str: Converted value.
- """
- if isinstance(value, float) and value.is_integer():
- # Workaround to avoid erroneous comparison between int and float
- # Removes zero from integer floats
- value = int(value)
-
- return str(value)
-
-
-class OneViewModuleException(Exception):
- """
- OneView base Exception.
-
- Attributes:
- msg (str): Exception message.
- oneview_response (dict): OneView rest response.
- """
-
- def __init__(self, data):
- self.msg = None
- self.oneview_response = None
-
- if isinstance(data, six.string_types):
- self.msg = data
- else:
- self.oneview_response = data
-
- if data and isinstance(data, dict):
- self.msg = data.get('message')
-
- if self.oneview_response:
- Exception.__init__(self, self.msg, self.oneview_response)
- else:
- Exception.__init__(self, self.msg)
-
-
-class OneViewModuleTaskError(OneViewModuleException):
- """
- OneView Task Error Exception.
-
- Attributes:
- msg (str): Exception message.
- error_code (str): A code which uniquely identifies the specific error.
- """
-
- def __init__(self, msg, error_code=None):
- super(OneViewModuleTaskError, self).__init__(msg)
- self.error_code = error_code
-
-
-class OneViewModuleValueError(OneViewModuleException):
- """
- OneView Value Error.
- The exception is raised when the data contains an inappropriate value.
-
- Attributes:
- msg (str): Exception message.
- """
- pass
-
-
-class OneViewModuleResourceNotFound(OneViewModuleException):
- """
- OneView Resource Not Found Exception.
- The exception is raised when an associated resource was not found.
-
- Attributes:
- msg (str): Exception message.
- """
- pass
-
-
-@six.add_metaclass(abc.ABCMeta)
-class OneViewModuleBase(object):
- MSG_CREATED = 'Resource created successfully.'
- MSG_UPDATED = 'Resource updated successfully.'
- MSG_DELETED = 'Resource deleted successfully.'
- MSG_ALREADY_PRESENT = 'Resource is already present.'
- MSG_ALREADY_ABSENT = 'Resource is already absent.'
- MSG_DIFF_AT_KEY = 'Difference found at key \'{0}\'. '
-
- ONEVIEW_COMMON_ARGS = dict(
- config=dict(type='path'),
- hostname=dict(type='str'),
- username=dict(type='str'),
- password=dict(type='str', no_log=True),
- api_version=dict(type='int'),
- image_streamer_hostname=dict(type='str')
- )
-
- ONEVIEW_VALIDATE_ETAG_ARGS = dict(validate_etag=dict(type='bool', default=True))
-
- resource_client = None
-
- def __init__(self, additional_arg_spec=None, validate_etag_support=False, supports_check_mode=False):
- """
- OneViewModuleBase constructor.
-
- :arg dict additional_arg_spec: Additional argument spec definition.
- :arg bool validate_etag_support: Enables support to eTag validation.
- """
- argument_spec = self._build_argument_spec(additional_arg_spec, validate_etag_support)
-
- self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=supports_check_mode)
-
- self._check_hpe_oneview_sdk()
- self._create_oneview_client()
-
- self.state = self.module.params.get('state')
- self.data = self.module.params.get('data')
-
- # Preload params for get_all - used by facts
- self.facts_params = self.module.params.get('params') or {}
-
- # Preload options as dict - used by facts
- self.options = transform_list_to_dict(self.module.params.get('options'))
-
- self.validate_etag_support = validate_etag_support
-
- def _build_argument_spec(self, additional_arg_spec, validate_etag_support):
-
- merged_arg_spec = dict()
- merged_arg_spec.update(self.ONEVIEW_COMMON_ARGS)
-
- if validate_etag_support:
- merged_arg_spec.update(self.ONEVIEW_VALIDATE_ETAG_ARGS)
-
- if additional_arg_spec:
- merged_arg_spec.update(additional_arg_spec)
-
- return merged_arg_spec
-
- def _check_hpe_oneview_sdk(self):
- if not HAS_HPE_ONEVIEW:
- self.module.fail_json(msg=missing_required_lib('hpOneView'), exception=HPE_ONEVIEW_IMP_ERR)
-
- def _create_oneview_client(self):
- if self.module.params.get('hostname'):
- config = dict(ip=self.module.params['hostname'],
- credentials=dict(userName=self.module.params['username'], password=self.module.params['password']),
- api_version=self.module.params['api_version'],
- image_streamer_ip=self.module.params['image_streamer_hostname'])
- self.oneview_client = OneViewClient(config)
- elif not self.module.params['config']:
- self.oneview_client = OneViewClient.from_environment_variables()
- else:
- self.oneview_client = OneViewClient.from_json_file(self.module.params['config'])
-
- @abc.abstractmethod
- def execute_module(self):
- """
- Abstract method, must be implemented by the inheritor.
-
- This method is called from the run method. It should contains the module logic
-
- :return: dict: It must return a dictionary with the attributes for the module result,
- such as ansible_facts, msg and changed.
- """
- pass
-
- def run(self):
- """
- Common implementation of the OneView run modules.
-
- It calls the inheritor 'execute_module' function and sends the return to the Ansible.
-
- It handles any OneViewModuleException in order to signal a failure to Ansible, with a descriptive error message.
-
- """
- try:
- if self.validate_etag_support:
- if not self.module.params.get('validate_etag'):
- self.oneview_client.connection.disable_etag_validation()
-
- result = self.execute_module()
-
- if "changed" not in result:
- result['changed'] = False
-
- self.module.exit_json(**result)
-
- except OneViewModuleException as exception:
- error_msg = '; '.join(to_native(e) for e in exception.args)
- self.module.fail_json(msg=error_msg, exception=traceback.format_exc())
-
- def resource_absent(self, resource, method='delete'):
- """
- Generic implementation of the absent state for the OneView resources.
-
- It checks if the resource needs to be removed.
-
- :arg dict resource: Resource to delete.
- :arg str method: Function of the OneView client that will be called for resource deletion.
- Usually delete or remove.
- :return: A dictionary with the expected arguments for the AnsibleModule.exit_json
- """
- if resource:
- getattr(self.resource_client, method)(resource)
-
- return {"changed": True, "msg": self.MSG_DELETED}
- else:
- return {"changed": False, "msg": self.MSG_ALREADY_ABSENT}
-
- def get_by_name(self, name):
- """
- Generic get by name implementation.
-
- :arg str name: Resource name to search for.
-
- :return: The resource found or None.
- """
- result = self.resource_client.get_by('name', name)
- return result[0] if result else None
-
- def resource_present(self, resource, fact_name, create_method='create'):
- """
- Generic implementation of the present state for the OneView resources.
-
- It checks if the resource needs to be created or updated.
-
- :arg dict resource: Resource to create or update.
- :arg str fact_name: Name of the fact returned to the Ansible.
- :arg str create_method: Function of the OneView client that will be called for resource creation.
- Usually create or add.
- :return: A dictionary with the expected arguments for the AnsibleModule.exit_json
- """
-
- changed = False
- if "newName" in self.data:
- self.data["name"] = self.data.pop("newName")
-
- if not resource:
- resource = getattr(self.resource_client, create_method)(self.data)
- msg = self.MSG_CREATED
- changed = True
-
- else:
- merged_data = resource.copy()
- merged_data.update(self.data)
-
- if self.compare(resource, merged_data):
- msg = self.MSG_ALREADY_PRESENT
- else:
- resource = self.resource_client.update(merged_data)
- changed = True
- msg = self.MSG_UPDATED
-
- return dict(
- msg=msg,
- changed=changed,
- ansible_facts={fact_name: resource}
- )
-
- def resource_scopes_set(self, state, fact_name, scope_uris):
- """
- Generic implementation of the scopes update PATCH for the OneView resources.
- It checks if the resource needs to be updated with the current scopes.
- This method is meant to be run after ensuring the present state.
- :arg dict state: Dict containing the data from the last state results in the resource.
- It needs to have the 'msg', 'changed', and 'ansible_facts' entries.
- :arg str fact_name: Name of the fact returned to the Ansible.
- :arg list scope_uris: List with all the scope URIs to be added to the resource.
- :return: A dictionary with the expected arguments for the AnsibleModule.exit_json
- """
- if scope_uris is None:
- scope_uris = []
- resource = state['ansible_facts'][fact_name]
- operation_data = dict(operation='replace', path='/scopeUris', value=scope_uris)
-
- if resource['scopeUris'] is None or set(resource['scopeUris']) != set(scope_uris):
- state['ansible_facts'][fact_name] = self.resource_client.patch(resource['uri'], **operation_data)
- state['changed'] = True
- state['msg'] = self.MSG_UPDATED
-
- return state
-
- def compare(self, first_resource, second_resource):
- """
- Recursively compares dictionary contents equivalence, ignoring types and elements order.
- Particularities of the comparison:
- - Inexistent key = None
- - These values are considered equal: None, empty, False
- - Lists are compared value by value after a sort, if they have same size.
- - Each element is converted to str before the comparison.
- :arg dict first_resource: first dictionary
- :arg dict second_resource: second dictionary
- :return: bool: True when equal, False when different.
- """
- resource1 = first_resource
- resource2 = second_resource
-
- debug_resources = "resource1 = {0}, resource2 = {1}".format(resource1, resource2)
-
- # The first resource is True / Not Null and the second resource is False / Null
- if resource1 and not resource2:
- self.module.log("resource1 and not resource2. " + debug_resources)
- return False
-
- # Checks all keys in first dict against the second dict
- for key in resource1:
- if key not in resource2:
- if resource1[key] is not None:
- # Inexistent key is equivalent to exist with value None
- self.module.log(self.MSG_DIFF_AT_KEY.format(key) + debug_resources)
- return False
- # If both values are null, empty or False it will be considered equal.
- elif not resource1[key] and not resource2[key]:
- continue
- elif isinstance(resource1[key], Mapping):
- # recursive call
- if not self.compare(resource1[key], resource2[key]):
- self.module.log(self.MSG_DIFF_AT_KEY.format(key) + debug_resources)
- return False
- elif isinstance(resource1[key], list):
- # change comparison function to compare_list
- if not self.compare_list(resource1[key], resource2[key]):
- self.module.log(self.MSG_DIFF_AT_KEY.format(key) + debug_resources)
- return False
- elif _standardize_value(resource1[key]) != _standardize_value(resource2[key]):
- self.module.log(self.MSG_DIFF_AT_KEY.format(key) + debug_resources)
- return False
-
- # Checks all keys in the second dict, looking for missing elements
- for key in resource2.keys():
- if key not in resource1:
- if resource2[key] is not None:
- # Inexistent key is equivalent to exist with value None
- self.module.log(self.MSG_DIFF_AT_KEY.format(key) + debug_resources)
- return False
-
- return True
-
- def compare_list(self, first_resource, second_resource):
- """
- Recursively compares lists contents equivalence, ignoring types and element orders.
- Lists with same size are compared value by value after a sort,
- each element is converted to str before the comparison.
- :arg list first_resource: first list
- :arg list second_resource: second list
- :return: True when equal; False when different.
- """
-
- resource1 = first_resource
- resource2 = second_resource
-
- debug_resources = "resource1 = {0}, resource2 = {1}".format(resource1, resource2)
-
- # The second list is null / empty / False
- if not resource2:
- self.module.log("resource 2 is null. " + debug_resources)
- return False
-
- if len(resource1) != len(resource2):
- self.module.log("resources have different length. " + debug_resources)
- return False
-
- resource1 = sorted(resource1, key=_str_sorted)
- resource2 = sorted(resource2, key=_str_sorted)
-
- for i, val in enumerate(resource1):
- if isinstance(val, Mapping):
- # change comparison function to compare dictionaries
- if not self.compare(val, resource2[i]):
- self.module.log("resources are different. " + debug_resources)
- return False
- elif isinstance(val, list):
- # recursive call
- if not self.compare_list(val, resource2[i]):
- self.module.log("lists are different. " + debug_resources)
- return False
- elif _standardize_value(val) != _standardize_value(resource2[i]):
- self.module.log("values are different. " + debug_resources)
- return False
-
- # no differences found
- return True
diff --git a/ansible_collections/community/general/plugins/module_utils/online.py b/ansible_collections/community/general/plugins/module_utils/online.py
deleted file mode 100644
index b5acbcc0..00000000
--- a/ansible_collections/community/general/plugins/module_utils/online.py
+++ /dev/null
@@ -1,122 +0,0 @@
-# -*- coding: utf-8 -*-
-# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-import json
-import sys
-
-from ansible.module_utils.basic import env_fallback
-from ansible.module_utils.urls import fetch_url
-
-
-def online_argument_spec():
- return dict(
- api_token=dict(required=True, fallback=(env_fallback, ['ONLINE_TOKEN', 'ONLINE_API_KEY', 'ONLINE_OAUTH_TOKEN', 'ONLINE_API_TOKEN']),
- no_log=True, aliases=['oauth_token']),
- api_url=dict(fallback=(env_fallback, ['ONLINE_API_URL']), default='https://api.online.net', aliases=['base_url']),
- api_timeout=dict(type='int', default=30, aliases=['timeout']),
- validate_certs=dict(default=True, type='bool'),
- )
-
-
-class OnlineException(Exception):
-
- def __init__(self, message):
- self.message = message
-
-
-class Response(object):
-
- def __init__(self, resp, info):
- self.body = None
- if resp:
- self.body = resp.read()
- self.info = info
-
- @property
- def json(self):
- if not self.body:
- if "body" in self.info:
- return json.loads(self.info["body"])
- return None
- try:
- return json.loads(self.body)
- except ValueError:
- return None
-
- @property
- def status_code(self):
- return self.info["status"]
-
- @property
- def ok(self):
- return self.status_code in (200, 201, 202, 204)
-
-
-class Online(object):
-
- def __init__(self, module):
- self.module = module
- self.headers = {
- 'Authorization': "Bearer %s" % self.module.params.get('api_token'),
- 'User-Agent': self.get_user_agent_string(module),
- 'Content-type': 'application/json',
- }
- self.name = None
-
- def get_resources(self):
- results = self.get('/%s' % self.name)
- if not results.ok:
- raise OnlineException('Error fetching {0} ({1}) [{2}: {3}]'.format(
- self.name, '%s/%s' % (self.module.params.get('api_url'), self.name),
- results.status_code, results.json['message']
- ))
-
- return results.json
-
- def _url_builder(self, path):
- if path[0] == '/':
- path = path[1:]
- return '%s/%s' % (self.module.params.get('api_url'), path)
-
- def send(self, method, path, data=None, headers=None):
- url = self._url_builder(path)
- data = self.module.jsonify(data)
-
- if headers is not None:
- self.headers.update(headers)
-
- resp, info = fetch_url(
- self.module, url, data=data, headers=self.headers, method=method,
- timeout=self.module.params.get('api_timeout')
- )
-
- # Exceptions in fetch_url may result in a status -1, the ensures a proper error to the user in all cases
- if info['status'] == -1:
- self.module.fail_json(msg=info['msg'])
-
- return Response(resp, info)
-
- @staticmethod
- def get_user_agent_string(module):
- return "ansible %s Python %s" % (module.ansible_version, sys.version.split(' ', 1)[0])
-
- def get(self, path, data=None, headers=None):
- return self.send('GET', path, data, headers)
-
- def put(self, path, data=None, headers=None):
- return self.send('PUT', path, data, headers)
-
- def post(self, path, data=None, headers=None):
- return self.send('POST', path, data, headers)
-
- def delete(self, path, data=None, headers=None):
- return self.send('DELETE', path, data, headers)
-
- def patch(self, path, data=None, headers=None):
- return self.send("PATCH", path, data, headers)
-
- def update(self, path, data=None, headers=None):
- return self.send("UPDATE", path, data, headers)
diff --git a/ansible_collections/community/general/plugins/module_utils/opennebula.py b/ansible_collections/community/general/plugins/module_utils/opennebula.py
deleted file mode 100644
index c896a9c6..00000000
--- a/ansible_collections/community/general/plugins/module_utils/opennebula.py
+++ /dev/null
@@ -1,313 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright 2018 www.privaz.io Valletech AB
-#
-# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-import time
-import ssl
-from os import environ
-from ansible.module_utils.six import string_types
-from ansible.module_utils.basic import AnsibleModule
-
-
-HAS_PYONE = True
-
-try:
- from pyone import OneException
- from pyone.server import OneServer
-except ImportError:
- OneException = Exception
- HAS_PYONE = False
-
-
-class OpenNebulaModule:
- """
- Base class for all OpenNebula Ansible Modules.
- This is basically a wrapper of the common arguments, the pyone client and
- some utility methods.
- """
-
- common_args = dict(
- api_url=dict(type='str', aliases=['api_endpoint'], default=environ.get("ONE_URL")),
- api_username=dict(type='str', default=environ.get("ONE_USERNAME")),
- api_password=dict(type='str', no_log=True, aliases=['api_token'], default=environ.get("ONE_PASSWORD")),
- validate_certs=dict(default=True, type='bool'),
- wait_timeout=dict(type='int', default=300),
- )
-
- def __init__(self, argument_spec, supports_check_mode=False, mutually_exclusive=None, required_one_of=None, required_if=None):
-
- module_args = OpenNebulaModule.common_args.copy()
- module_args.update(argument_spec)
-
- self.module = AnsibleModule(argument_spec=module_args,
- supports_check_mode=supports_check_mode,
- mutually_exclusive=mutually_exclusive,
- required_one_of=required_one_of,
- required_if=required_if)
- self.result = dict(changed=False,
- original_message='',
- message='')
- self.one = self.create_one_client()
-
- self.resolved_parameters = self.resolve_parameters()
-
- def create_one_client(self):
- """
- Creates an XMLPRC client to OpenNebula.
-
- Returns: the new xmlrpc client.
-
- """
-
- # context required for not validating SSL, old python versions won't validate anyway.
- if hasattr(ssl, '_create_unverified_context'):
- no_ssl_validation_context = ssl._create_unverified_context()
- else:
- no_ssl_validation_context = None
-
- # Check if the module can run
- if not HAS_PYONE:
- self.fail("pyone is required for this module")
-
- if self.module.params.get("api_url"):
- url = self.module.params.get("api_url")
- else:
- self.fail("Either api_url or the environment variable ONE_URL must be provided")
-
- if self.module.params.get("api_username"):
- username = self.module.params.get("api_username")
- else:
- self.fail("Either api_username or the environment vairable ONE_USERNAME must be provided")
-
- if self.module.params.get("api_password"):
- password = self.module.params.get("api_password")
- else:
- self.fail("Either api_password or the environment vairable ONE_PASSWORD must be provided")
-
- session = "%s:%s" % (username, password)
-
- if not self.module.params.get("validate_certs") and "PYTHONHTTPSVERIFY" not in environ:
- return OneServer(url, session=session, context=no_ssl_validation_context)
- else:
- return OneServer(url, session)
-
- def close_one_client(self):
- """
- Close the pyone session.
- """
- self.one.server_close()
-
- def fail(self, msg):
- """
- Utility failure method, will ensure pyone is properly closed before failing.
- Args:
- msg: human readable failure reason.
- """
- if hasattr(self, 'one'):
- self.close_one_client()
- self.module.fail_json(msg=msg)
-
- def exit(self):
- """
- Utility exit method, will ensure pyone is properly closed before exiting.
-
- """
- if hasattr(self, 'one'):
- self.close_one_client()
- self.module.exit_json(**self.result)
-
- def resolve_parameters(self):
- """
- This method resolves parameters provided by a secondary ID to the primary ID.
- For example if cluster_name is present, cluster_id will be introduced by performing
- the required resolution
-
- Returns: a copy of the parameters that includes the resolved parameters.
-
- """
-
- resolved_params = dict(self.module.params)
-
- if 'cluster_name' in self.module.params:
- clusters = self.one.clusterpool.info()
- for cluster in clusters.CLUSTER:
- if cluster.NAME == self.module.params.get('cluster_name'):
- resolved_params['cluster_id'] = cluster.ID
-
- return resolved_params
-
- def is_parameter(self, name):
- """
- Utility method to check if a parameter was provided or is resolved
- Args:
- name: the parameter to check
- """
- if name in self.resolved_parameters:
- return self.get_parameter(name) is not None
- else:
- return False
-
- def get_parameter(self, name):
- """
- Utility method for accessing parameters that includes resolved ID
- parameters from provided Name parameters.
- """
- return self.resolved_parameters.get(name)
-
- def get_host_by_name(self, name):
- '''
- Returns a host given its name.
- Args:
- name: the name of the host
-
- Returns: the host object or None if the host is absent.
-
- '''
- hosts = self.one.hostpool.info()
- for h in hosts.HOST:
- if h.NAME == name:
- return h
- return None
-
- def get_cluster_by_name(self, name):
- """
- Returns a cluster given its name.
- Args:
- name: the name of the cluster
-
- Returns: the cluster object or None if the host is absent.
- """
-
- clusters = self.one.clusterpool.info()
- for c in clusters.CLUSTER:
- if c.NAME == name:
- return c
- return None
-
- def get_template_by_name(self, name):
- '''
- Returns a template given its name.
- Args:
- name: the name of the template
-
- Returns: the template object or None if the host is absent.
-
- '''
- templates = self.one.templatepool.info()
- for t in templates.TEMPLATE:
- if t.NAME == name:
- return t
- return None
-
- def cast_template(self, template):
- """
- OpenNebula handles all template elements as strings
- At some point there is a cast being performed on types provided by the user
- This function mimics that transformation so that required template updates are detected properly
- additionally an array will be converted to a comma separated list,
- which works for labels and hopefully for something more.
-
- Args:
- template: the template to transform
-
- Returns: the transformed template with data casts applied.
- """
-
- # TODO: check formally available data types in templates
- # TODO: some arrays might be converted to space separated
-
- for key in template:
- value = template[key]
- if isinstance(value, dict):
- self.cast_template(template[key])
- elif isinstance(value, list):
- template[key] = ', '.join(value)
- elif not isinstance(value, string_types):
- template[key] = str(value)
-
- def requires_template_update(self, current, desired):
- """
- This function will help decide if a template update is required or not
- If a desired key is missing from the current dictionary an update is required
- If the intersection of both dictionaries is not deep equal, an update is required
- Args:
- current: current template as a dictionary
- desired: desired template as a dictionary
-
- Returns: True if a template update is required
- """
-
- if not desired:
- return False
-
- self.cast_template(desired)
- intersection = dict()
- for dkey in desired.keys():
- if dkey in current.keys():
- intersection[dkey] = current[dkey]
- else:
- return True
- return not (desired == intersection)
-
- def wait_for_state(self, element_name, state, state_name, target_states,
- invalid_states=None, transition_states=None,
- wait_timeout=None):
- """
- Args:
- element_name: the name of the object we are waiting for: HOST, VM, etc.
- state: lambda that returns the current state, will be queried until target state is reached
- state_name: lambda that returns the readable form of a given state
- target_states: states expected to be reached
- invalid_states: if any of this states is reached, fail
- transition_states: when used, these are the valid states during the transition.
- wait_timeout: timeout period in seconds. Defaults to the provided parameter.
- """
-
- if not wait_timeout:
- wait_timeout = self.module.params.get("wait_timeout")
-
- start_time = time.time()
-
- while (time.time() - start_time) < wait_timeout:
- current_state = state()
-
- if current_state in invalid_states:
- self.fail('invalid %s state %s' % (element_name, state_name(current_state)))
-
- if transition_states:
- if current_state not in transition_states:
- self.fail('invalid %s transition state %s' % (element_name, state_name(current_state)))
-
- if current_state in target_states:
- return True
-
- time.sleep(self.one.server_retry_interval())
-
- self.fail(msg="Wait timeout has expired!")
-
- def run_module(self):
- """
- trigger the start of the execution of the module.
- Returns:
-
- """
- try:
- self.run(self.one, self.module, self.result)
- except OneException as e:
- self.fail(msg="OpenNebula Exception: %s" % e)
-
- def run(self, one, module, result):
- """
- to be implemented by subclass with the actual module actions.
- Args:
- one: the OpenNebula XMLRPC client
- module: the Ansible Module object
- result: the Ansible result
- """
- raise NotImplementedError("Method requires implementation")
diff --git a/ansible_collections/community/general/plugins/module_utils/proxmox.py b/ansible_collections/community/general/plugins/module_utils/proxmox.py
deleted file mode 100644
index 94bd0b79..00000000
--- a/ansible_collections/community/general/plugins/module_utils/proxmox.py
+++ /dev/null
@@ -1,145 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright: (c) 2020, Tristan Le Guern
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-import atexit
-import time
-import re
-import traceback
-
-PROXMOXER_IMP_ERR = None
-try:
- from proxmoxer import ProxmoxAPI
- HAS_PROXMOXER = True
-except ImportError:
- HAS_PROXMOXER = False
- PROXMOXER_IMP_ERR = traceback.format_exc()
-
-
-from ansible.module_utils.basic import env_fallback, missing_required_lib
-from ansible.module_utils.common.text.converters import to_native
-from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
-
-
-def proxmox_auth_argument_spec():
- return dict(
- api_host=dict(type='str',
- required=True,
- fallback=(env_fallback, ['PROXMOX_HOST'])
- ),
- api_user=dict(type='str',
- required=True,
- fallback=(env_fallback, ['PROXMOX_USER'])
- ),
- api_password=dict(type='str',
- no_log=True,
- fallback=(env_fallback, ['PROXMOX_PASSWORD'])
- ),
- api_token_id=dict(type='str',
- no_log=False
- ),
- api_token_secret=dict(type='str',
- no_log=True
- ),
- validate_certs=dict(type='bool',
- default=False
- ),
- )
-
-
-def proxmox_to_ansible_bool(value):
- '''Convert Proxmox representation of a boolean to be ansible-friendly'''
- return True if value == 1 else False
-
-
-def ansible_to_proxmox_bool(value):
- '''Convert Ansible representation of a boolean to be proxmox-friendly'''
- if value is None:
- return None
-
- if not isinstance(value, bool):
- raise ValueError("%s must be of type bool not %s" % (value, type(value)))
-
- return 1 if value else 0
-
-
-class ProxmoxAnsible(object):
- """Base class for Proxmox modules"""
- def __init__(self, module):
- if not HAS_PROXMOXER:
- module.fail_json(msg=missing_required_lib('proxmoxer'), exception=PROXMOXER_IMP_ERR)
-
- self.module = module
- self.proxmox_api = self._connect()
- # Test token validity
- try:
- self.proxmox_api.version.get()
- except Exception as e:
- module.fail_json(msg='%s' % e, exception=traceback.format_exc())
-
- def _connect(self):
- api_host = self.module.params['api_host']
- api_user = self.module.params['api_user']
- api_password = self.module.params['api_password']
- api_token_id = self.module.params['api_token_id']
- api_token_secret = self.module.params['api_token_secret']
- validate_certs = self.module.params['validate_certs']
-
- auth_args = {'user': api_user}
- if api_password:
- auth_args['password'] = api_password
- else:
- auth_args['token_name'] = api_token_id
- auth_args['token_value'] = api_token_secret
-
- try:
- return ProxmoxAPI(api_host, verify_ssl=validate_certs, **auth_args)
- except Exception as e:
- self.module.fail_json(msg='%s' % e, exception=traceback.format_exc())
-
- def version(self):
- apireturn = self.proxmox_api.version.get()
- return LooseVersion(apireturn['version'])
-
- def get_node(self, node):
- nodes = [n for n in self.proxmox_api.nodes.get() if n['node'] == node]
- return nodes[0] if nodes else None
-
- def get_nextvmid(self):
- vmid = self.proxmox_api.cluster.nextid.get()
- return vmid
-
- def get_vmid(self, name, ignore_missing=False, choose_first_if_multiple=False):
- vms = [vm['vmid'] for vm in self.proxmox_api.cluster.resources.get(type='vm') if vm.get('name') == name]
-
- if not vms:
- if ignore_missing:
- return None
-
- self.module.fail_json(msg='No VM with name %s found' % name)
- elif len(vms) > 1:
- if choose_first_if_multiple:
- self.module.deprecate(
- 'Multiple VMs with name %s found, choosing the first one. ' % name +
- 'This will be an error in the future. To ensure the correct VM is used, ' +
- 'also pass the vmid parameter.',
- version='5.0.0', collection_name='community.general')
- else:
- self.module.fail_json(msg='Multiple VMs with name %s found, provide vmid instead' % name)
-
- return vms[0]
-
- def get_vm(self, vmid, ignore_missing=False):
- vms = [vm for vm in self.proxmox_api.cluster.resources.get(type='vm') if vm['vmid'] == int(vmid)]
-
- if vms:
- return vms[0]
- else:
- if ignore_missing:
- return None
-
- self.module.fail_json(msg='VM with vmid %s does not exist in cluster' % vmid)
diff --git a/ansible_collections/community/general/plugins/module_utils/rax.py b/ansible_collections/community/general/plugins/module_utils/rax.py
deleted file mode 100644
index 84effee9..00000000
--- a/ansible_collections/community/general/plugins/module_utils/rax.py
+++ /dev/null
@@ -1,316 +0,0 @@
-# -*- coding: utf-8 -*-
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by
-# Ansible still belong to the author of the module, and may assign their own
-# license to the complete work.
-#
-# Copyright (c), Michael DeHaan , 2012-2013
-#
-# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-import os
-import re
-from uuid import UUID
-
-from ansible.module_utils.six import text_type, binary_type
-
-FINAL_STATUSES = ('ACTIVE', 'ERROR')
-VOLUME_STATUS = ('available', 'attaching', 'creating', 'deleting', 'in-use',
- 'error', 'error_deleting')
-
-CLB_ALGORITHMS = ['RANDOM', 'LEAST_CONNECTIONS', 'ROUND_ROBIN',
- 'WEIGHTED_LEAST_CONNECTIONS', 'WEIGHTED_ROUND_ROBIN']
-CLB_PROTOCOLS = ['DNS_TCP', 'DNS_UDP', 'FTP', 'HTTP', 'HTTPS', 'IMAPS',
- 'IMAPv4', 'LDAP', 'LDAPS', 'MYSQL', 'POP3', 'POP3S', 'SMTP',
- 'TCP', 'TCP_CLIENT_FIRST', 'UDP', 'UDP_STREAM', 'SFTP']
-
-NON_CALLABLES = (text_type, binary_type, bool, dict, int, list, type(None))
-PUBLIC_NET_ID = "00000000-0000-0000-0000-000000000000"
-SERVICE_NET_ID = "11111111-1111-1111-1111-111111111111"
-
-
-def rax_slugify(value):
- """Prepend a key with rax_ and normalize the key name"""
- return 'rax_%s' % (re.sub(r'[^\w-]', '_', value).lower().lstrip('_'))
-
-
-def rax_clb_node_to_dict(obj):
- """Function to convert a CLB Node object to a dict"""
- if not obj:
- return {}
- node = obj.to_dict()
- node['id'] = obj.id
- node['weight'] = obj.weight
- return node
-
-
-def rax_to_dict(obj, obj_type='standard'):
- """Generic function to convert a pyrax object to a dict
-
- obj_type values:
- standard
- clb
- server
-
- """
- instance = {}
- for key in dir(obj):
- value = getattr(obj, key)
- if obj_type == 'clb' and key == 'nodes':
- instance[key] = []
- for node in value:
- instance[key].append(rax_clb_node_to_dict(node))
- elif (isinstance(value, list) and len(value) > 0 and
- not isinstance(value[0], NON_CALLABLES)):
- instance[key] = []
- for item in value:
- instance[key].append(rax_to_dict(item))
- elif (isinstance(value, NON_CALLABLES) and not key.startswith('_')):
- if obj_type == 'server':
- if key == 'image':
- if not value:
- instance['rax_boot_source'] = 'volume'
- else:
- instance['rax_boot_source'] = 'local'
- key = rax_slugify(key)
- instance[key] = value
-
- if obj_type == 'server':
- for attr in ['id', 'accessIPv4', 'name', 'status']:
- instance[attr] = instance.get(rax_slugify(attr))
-
- return instance
-
-
-def rax_find_bootable_volume(module, rax_module, server, exit=True):
- """Find a servers bootable volume"""
- cs = rax_module.cloudservers
- cbs = rax_module.cloud_blockstorage
- server_id = rax_module.utils.get_id(server)
- volumes = cs.volumes.get_server_volumes(server_id)
- bootable_volumes = []
- for volume in volumes:
- vol = cbs.get(volume)
- if module.boolean(vol.bootable):
- bootable_volumes.append(vol)
- if not bootable_volumes:
- if exit:
- module.fail_json(msg='No bootable volumes could be found for '
- 'server %s' % server_id)
- else:
- return False
- elif len(bootable_volumes) > 1:
- if exit:
- module.fail_json(msg='Multiple bootable volumes found for server '
- '%s' % server_id)
- else:
- return False
-
- return bootable_volumes[0]
-
-
-def rax_find_image(module, rax_module, image, exit=True):
- """Find a server image by ID or Name"""
- cs = rax_module.cloudservers
- try:
- UUID(image)
- except ValueError:
- try:
- image = cs.images.find(human_id=image)
- except(cs.exceptions.NotFound,
- cs.exceptions.NoUniqueMatch):
- try:
- image = cs.images.find(name=image)
- except (cs.exceptions.NotFound,
- cs.exceptions.NoUniqueMatch):
- if exit:
- module.fail_json(msg='No matching image found (%s)' %
- image)
- else:
- return False
-
- return rax_module.utils.get_id(image)
-
-
-def rax_find_volume(module, rax_module, name):
- """Find a Block storage volume by ID or name"""
- cbs = rax_module.cloud_blockstorage
- try:
- UUID(name)
- volume = cbs.get(name)
- except ValueError:
- try:
- volume = cbs.find(name=name)
- except rax_module.exc.NotFound:
- volume = None
- except Exception as e:
- module.fail_json(msg='%s' % e)
- return volume
-
-
-def rax_find_network(module, rax_module, network):
- """Find a cloud network by ID or name"""
- cnw = rax_module.cloud_networks
- try:
- UUID(network)
- except ValueError:
- if network.lower() == 'public':
- return cnw.get_server_networks(PUBLIC_NET_ID)
- elif network.lower() == 'private':
- return cnw.get_server_networks(SERVICE_NET_ID)
- else:
- try:
- network_obj = cnw.find_network_by_label(network)
- except (rax_module.exceptions.NetworkNotFound,
- rax_module.exceptions.NetworkLabelNotUnique):
- module.fail_json(msg='No matching network found (%s)' %
- network)
- else:
- return cnw.get_server_networks(network_obj)
- else:
- return cnw.get_server_networks(network)
-
-
-def rax_find_server(module, rax_module, server):
- """Find a Cloud Server by ID or name"""
- cs = rax_module.cloudservers
- try:
- UUID(server)
- server = cs.servers.get(server)
- except ValueError:
- servers = cs.servers.list(search_opts=dict(name='^%s$' % server))
- if not servers:
- module.fail_json(msg='No Server was matched by name, '
- 'try using the Server ID instead')
- if len(servers) > 1:
- module.fail_json(msg='Multiple servers matched by name, '
- 'try using the Server ID instead')
-
- # We made it this far, grab the first and hopefully only server
- # in the list
- server = servers[0]
- return server
-
-
-def rax_find_loadbalancer(module, rax_module, loadbalancer):
- """Find a Cloud Load Balancer by ID or name"""
- clb = rax_module.cloud_loadbalancers
- try:
- found = clb.get(loadbalancer)
- except Exception:
- found = []
- for lb in clb.list():
- if loadbalancer == lb.name:
- found.append(lb)
-
- if not found:
- module.fail_json(msg='No loadbalancer was matched')
-
- if len(found) > 1:
- module.fail_json(msg='Multiple loadbalancers matched')
-
- # We made it this far, grab the first and hopefully only item
- # in the list
- found = found[0]
-
- return found
-
-
-def rax_argument_spec():
- """Return standard base dictionary used for the argument_spec
- argument in AnsibleModule
-
- """
- return dict(
- api_key=dict(type='str', aliases=['password'], no_log=True),
- auth_endpoint=dict(type='str'),
- credentials=dict(type='path', aliases=['creds_file']),
- env=dict(type='str'),
- identity_type=dict(type='str', default='rackspace'),
- region=dict(type='str'),
- tenant_id=dict(type='str'),
- tenant_name=dict(type='str'),
- username=dict(type='str'),
- validate_certs=dict(type='bool', aliases=['verify_ssl']),
- )
-
-
-def rax_required_together():
- """Return the default list used for the required_together argument to
- AnsibleModule"""
- return [['api_key', 'username']]
-
-
-def setup_rax_module(module, rax_module, region_required=True):
- """Set up pyrax in a standard way for all modules"""
- rax_module.USER_AGENT = 'ansible/%s %s' % (module.ansible_version,
- rax_module.USER_AGENT)
-
- api_key = module.params.get('api_key')
- auth_endpoint = module.params.get('auth_endpoint')
- credentials = module.params.get('credentials')
- env = module.params.get('env')
- identity_type = module.params.get('identity_type')
- region = module.params.get('region')
- tenant_id = module.params.get('tenant_id')
- tenant_name = module.params.get('tenant_name')
- username = module.params.get('username')
- verify_ssl = module.params.get('validate_certs')
-
- if env is not None:
- rax_module.set_environment(env)
-
- rax_module.set_setting('identity_type', identity_type)
- if verify_ssl is not None:
- rax_module.set_setting('verify_ssl', verify_ssl)
- if auth_endpoint is not None:
- rax_module.set_setting('auth_endpoint', auth_endpoint)
- if tenant_id is not None:
- rax_module.set_setting('tenant_id', tenant_id)
- if tenant_name is not None:
- rax_module.set_setting('tenant_name', tenant_name)
-
- try:
- username = username or os.environ.get('RAX_USERNAME')
- if not username:
- username = rax_module.get_setting('keyring_username')
- if username:
- api_key = 'USE_KEYRING'
- if not api_key:
- api_key = os.environ.get('RAX_API_KEY')
- credentials = (credentials or os.environ.get('RAX_CREDENTIALS') or
- os.environ.get('RAX_CREDS_FILE'))
- region = (region or os.environ.get('RAX_REGION') or
- rax_module.get_setting('region'))
- except KeyError as e:
- module.fail_json(msg='Unable to load %s' % e.message)
-
- try:
- if api_key and username:
- if api_key == 'USE_KEYRING':
- rax_module.keyring_auth(username, region=region)
- else:
- rax_module.set_credentials(username, api_key=api_key,
- region=region)
- elif credentials:
- credentials = os.path.expanduser(credentials)
- rax_module.set_credential_file(credentials, region=region)
- else:
- raise Exception('No credentials supplied!')
- except Exception as e:
- if e.message:
- msg = str(e.message)
- else:
- msg = repr(e)
- module.fail_json(msg=msg)
-
- if region_required and region not in rax_module.regions:
- module.fail_json(msg='%s is not a valid region, must be one of: %s' %
- (region, ','.join(rax_module.regions)))
-
- return rax_module
diff --git a/ansible_collections/community/general/plugins/module_utils/redfish_utils.py b/ansible_collections/community/general/plugins/module_utils/redfish_utils.py
deleted file mode 100644
index 378d8fa9..00000000
--- a/ansible_collections/community/general/plugins/module_utils/redfish_utils.py
+++ /dev/null
@@ -1,2982 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright (c) 2017-2018 Dell EMC Inc.
-# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-import json
-from ansible.module_utils.urls import open_url
-from ansible.module_utils.common.text.converters import to_native
-from ansible.module_utils.common.text.converters import to_text
-from ansible.module_utils.six.moves import http_client
-from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
-from ansible.module_utils.six.moves.urllib.parse import urlparse
-
-GET_HEADERS = {'accept': 'application/json', 'OData-Version': '4.0'}
-POST_HEADERS = {'content-type': 'application/json', 'accept': 'application/json',
- 'OData-Version': '4.0'}
-PATCH_HEADERS = {'content-type': 'application/json', 'accept': 'application/json',
- 'OData-Version': '4.0'}
-DELETE_HEADERS = {'accept': 'application/json', 'OData-Version': '4.0'}
-
-FAIL_MSG = 'Issuing a data modification command without specifying the '\
- 'ID of the target %(resource)s resource when there is more '\
- 'than one %(resource)s is no longer allowed. Use the `resource_id` '\
- 'option to specify the target %(resource)s ID.'
-
-
-class RedfishUtils(object):
-
- def __init__(self, creds, root_uri, timeout, module, resource_id=None,
- data_modification=False, strip_etag_quotes=False):
- self.root_uri = root_uri
- self.creds = creds
- self.timeout = timeout
- self.module = module
- self.service_root = '/redfish/v1/'
- self.resource_id = resource_id
- self.data_modification = data_modification
- self.strip_etag_quotes = strip_etag_quotes
- self._init_session()
-
- def _auth_params(self, headers):
- """
- Return tuple of required authentication params based on the presence
- of a token in the self.creds dict. If using a token, set the
- X-Auth-Token header in the `headers` param.
-
- :param headers: dict containing headers to send in request
- :return: tuple of username, password and force_basic_auth
- """
- if self.creds.get('token'):
- username = None
- password = None
- force_basic_auth = False
- headers['X-Auth-Token'] = self.creds['token']
- else:
- username = self.creds['user']
- password = self.creds['pswd']
- force_basic_auth = True
- return username, password, force_basic_auth
-
- # The following functions are to send GET/POST/PATCH/DELETE requests
- def get_request(self, uri):
- req_headers = dict(GET_HEADERS)
- username, password, basic_auth = self._auth_params(req_headers)
- try:
- resp = open_url(uri, method="GET", headers=req_headers,
- url_username=username, url_password=password,
- force_basic_auth=basic_auth, validate_certs=False,
- follow_redirects='all',
- use_proxy=True, timeout=self.timeout)
- data = json.loads(to_native(resp.read()))
- headers = dict((k.lower(), v) for (k, v) in resp.info().items())
- except HTTPError as e:
- msg = self._get_extended_message(e)
- return {'ret': False,
- 'msg': "HTTP Error %s on GET request to '%s', extended message: '%s'"
- % (e.code, uri, msg),
- 'status': e.code}
- except URLError as e:
- return {'ret': False, 'msg': "URL Error on GET request to '%s': '%s'"
- % (uri, e.reason)}
- # Almost all errors should be caught above, but just in case
- except Exception as e:
- return {'ret': False,
- 'msg': "Failed GET request to '%s': '%s'" % (uri, to_text(e))}
- return {'ret': True, 'data': data, 'headers': headers}
-
- def post_request(self, uri, pyld):
- req_headers = dict(POST_HEADERS)
- username, password, basic_auth = self._auth_params(req_headers)
- try:
- resp = open_url(uri, data=json.dumps(pyld),
- headers=req_headers, method="POST",
- url_username=username, url_password=password,
- force_basic_auth=basic_auth, validate_certs=False,
- follow_redirects='all',
- use_proxy=True, timeout=self.timeout)
- headers = dict((k.lower(), v) for (k, v) in resp.info().items())
- except HTTPError as e:
- msg = self._get_extended_message(e)
- return {'ret': False,
- 'msg': "HTTP Error %s on POST request to '%s', extended message: '%s'"
- % (e.code, uri, msg),
- 'status': e.code}
- except URLError as e:
- return {'ret': False, 'msg': "URL Error on POST request to '%s': '%s'"
- % (uri, e.reason)}
- # Almost all errors should be caught above, but just in case
- except Exception as e:
- return {'ret': False,
- 'msg': "Failed POST request to '%s': '%s'" % (uri, to_text(e))}
- return {'ret': True, 'headers': headers, 'resp': resp}
-
- def patch_request(self, uri, pyld):
- req_headers = dict(PATCH_HEADERS)
- r = self.get_request(uri)
- if r['ret']:
- # Get etag from etag header or @odata.etag property
- etag = r['headers'].get('etag')
- if not etag:
- etag = r['data'].get('@odata.etag')
- if etag:
- if self.strip_etag_quotes:
- etag = etag.strip('"')
- req_headers['If-Match'] = etag
- username, password, basic_auth = self._auth_params(req_headers)
- try:
- resp = open_url(uri, data=json.dumps(pyld),
- headers=req_headers, method="PATCH",
- url_username=username, url_password=password,
- force_basic_auth=basic_auth, validate_certs=False,
- follow_redirects='all',
- use_proxy=True, timeout=self.timeout)
- except HTTPError as e:
- msg = self._get_extended_message(e)
- return {'ret': False,
- 'msg': "HTTP Error %s on PATCH request to '%s', extended message: '%s'"
- % (e.code, uri, msg),
- 'status': e.code}
- except URLError as e:
- return {'ret': False, 'msg': "URL Error on PATCH request to '%s': '%s'"
- % (uri, e.reason)}
- # Almost all errors should be caught above, but just in case
- except Exception as e:
- return {'ret': False,
- 'msg': "Failed PATCH request to '%s': '%s'" % (uri, to_text(e))}
- return {'ret': True, 'resp': resp}
-
- def delete_request(self, uri, pyld=None):
- req_headers = dict(DELETE_HEADERS)
- username, password, basic_auth = self._auth_params(req_headers)
- try:
- data = json.dumps(pyld) if pyld else None
- resp = open_url(uri, data=data,
- headers=req_headers, method="DELETE",
- url_username=username, url_password=password,
- force_basic_auth=basic_auth, validate_certs=False,
- follow_redirects='all',
- use_proxy=True, timeout=self.timeout)
- except HTTPError as e:
- msg = self._get_extended_message(e)
- return {'ret': False,
- 'msg': "HTTP Error %s on DELETE request to '%s', extended message: '%s'"
- % (e.code, uri, msg),
- 'status': e.code}
- except URLError as e:
- return {'ret': False, 'msg': "URL Error on DELETE request to '%s': '%s'"
- % (uri, e.reason)}
- # Almost all errors should be caught above, but just in case
- except Exception as e:
- return {'ret': False,
- 'msg': "Failed DELETE request to '%s': '%s'" % (uri, to_text(e))}
- return {'ret': True, 'resp': resp}
-
- @staticmethod
- def _get_extended_message(error):
- """
- Get Redfish ExtendedInfo message from response payload if present
- :param error: an HTTPError exception
- :type error: HTTPError
- :return: the ExtendedInfo message if present, else standard HTTP error
- """
- msg = http_client.responses.get(error.code, '')
- if error.code >= 400:
- try:
- body = error.read().decode('utf-8')
- data = json.loads(body)
- ext_info = data['error']['@Message.ExtendedInfo']
- msg = ext_info[0]['Message']
- except Exception:
- pass
- return msg
-
- def _init_session(self):
- pass
-
- def _find_accountservice_resource(self):
- response = self.get_request(self.root_uri + self.service_root)
- if response['ret'] is False:
- return response
- data = response['data']
- if 'AccountService' not in data:
- return {'ret': False, 'msg': "AccountService resource not found"}
- else:
- account_service = data["AccountService"]["@odata.id"]
- response = self.get_request(self.root_uri + account_service)
- if response['ret'] is False:
- return response
- data = response['data']
- accounts = data['Accounts']['@odata.id']
- if accounts[-1:] == '/':
- accounts = accounts[:-1]
- self.accounts_uri = accounts
- return {'ret': True}
-
- def _find_sessionservice_resource(self):
- response = self.get_request(self.root_uri + self.service_root)
- if response['ret'] is False:
- return response
- data = response['data']
- if 'SessionService' not in data:
- return {'ret': False, 'msg': "SessionService resource not found"}
- else:
- session_service = data["SessionService"]["@odata.id"]
- response = self.get_request(self.root_uri + session_service)
- if response['ret'] is False:
- return response
- data = response['data']
- sessions = data['Sessions']['@odata.id']
- if sessions[-1:] == '/':
- sessions = sessions[:-1]
- self.sessions_uri = sessions
- return {'ret': True}
-
- def _get_resource_uri_by_id(self, uris, id_prop):
- for uri in uris:
- response = self.get_request(self.root_uri + uri)
- if response['ret'] is False:
- continue
- data = response['data']
- if id_prop == data.get('Id'):
- return uri
- return None
-
- def _find_systems_resource(self):
- response = self.get_request(self.root_uri + self.service_root)
- if response['ret'] is False:
- return response
- data = response['data']
- if 'Systems' not in data:
- return {'ret': False, 'msg': "Systems resource not found"}
- response = self.get_request(self.root_uri + data['Systems']['@odata.id'])
- if response['ret'] is False:
- return response
- self.systems_uris = [
- i['@odata.id'] for i in response['data'].get('Members', [])]
- if not self.systems_uris:
- return {
- 'ret': False,
- 'msg': "ComputerSystem's Members array is either empty or missing"}
- self.systems_uri = self.systems_uris[0]
- if self.data_modification:
- if self.resource_id:
- self.systems_uri = self._get_resource_uri_by_id(self.systems_uris,
- self.resource_id)
- if not self.systems_uri:
- return {
- 'ret': False,
- 'msg': "System resource %s not found" % self.resource_id}
- elif len(self.systems_uris) > 1:
- self.module.fail_json(msg=FAIL_MSG % {'resource': 'System'})
- return {'ret': True}
-
- def _find_updateservice_resource(self):
- response = self.get_request(self.root_uri + self.service_root)
- if response['ret'] is False:
- return response
- data = response['data']
- if 'UpdateService' not in data:
- return {'ret': False, 'msg': "UpdateService resource not found"}
- else:
- update = data["UpdateService"]["@odata.id"]
- self.update_uri = update
- response = self.get_request(self.root_uri + update)
- if response['ret'] is False:
- return response
- data = response['data']
- self.firmware_uri = self.software_uri = None
- if 'FirmwareInventory' in data:
- self.firmware_uri = data['FirmwareInventory'][u'@odata.id']
- if 'SoftwareInventory' in data:
- self.software_uri = data['SoftwareInventory'][u'@odata.id']
- return {'ret': True}
-
- def _find_chassis_resource(self):
- response = self.get_request(self.root_uri + self.service_root)
- if response['ret'] is False:
- return response
- data = response['data']
- if 'Chassis' not in data:
- return {'ret': False, 'msg': "Chassis resource not found"}
- chassis = data["Chassis"]["@odata.id"]
- response = self.get_request(self.root_uri + chassis)
- if response['ret'] is False:
- return response
- self.chassis_uris = [
- i['@odata.id'] for i in response['data'].get('Members', [])]
- if not self.chassis_uris:
- return {'ret': False,
- 'msg': "Chassis Members array is either empty or missing"}
- self.chassis_uri = self.chassis_uris[0]
- if self.data_modification:
- if self.resource_id:
- self.chassis_uri = self._get_resource_uri_by_id(self.chassis_uris,
- self.resource_id)
- if not self.chassis_uri:
- return {
- 'ret': False,
- 'msg': "Chassis resource %s not found" % self.resource_id}
- elif len(self.chassis_uris) > 1:
- self.module.fail_json(msg=FAIL_MSG % {'resource': 'Chassis'})
- return {'ret': True}
-
- def _find_managers_resource(self):
- response = self.get_request(self.root_uri + self.service_root)
- if response['ret'] is False:
- return response
- data = response['data']
- if 'Managers' not in data:
- return {'ret': False, 'msg': "Manager resource not found"}
- manager = data["Managers"]["@odata.id"]
- response = self.get_request(self.root_uri + manager)
- if response['ret'] is False:
- return response
- self.manager_uris = [
- i['@odata.id'] for i in response['data'].get('Members', [])]
- if not self.manager_uris:
- return {'ret': False,
- 'msg': "Managers Members array is either empty or missing"}
- self.manager_uri = self.manager_uris[0]
- if self.data_modification:
- if self.resource_id:
- self.manager_uri = self._get_resource_uri_by_id(self.manager_uris,
- self.resource_id)
- if not self.manager_uri:
- return {
- 'ret': False,
- 'msg': "Manager resource %s not found" % self.resource_id}
- elif len(self.manager_uris) > 1:
- self.module.fail_json(msg=FAIL_MSG % {'resource': 'Manager'})
- return {'ret': True}
-
- def _get_all_action_info_values(self, action):
- """Retrieve all parameter values for an Action from ActionInfo.
- Fall back to AllowableValue annotations if no ActionInfo found.
- Return the result in an ActionInfo-like dictionary, keyed
- by the name of the parameter. """
- ai = {}
- if '@Redfish.ActionInfo' in action:
- ai_uri = action['@Redfish.ActionInfo']
- response = self.get_request(self.root_uri + ai_uri)
- if response['ret'] is True:
- data = response['data']
- if 'Parameters' in data:
- params = data['Parameters']
- ai = dict((p['Name'], p)
- for p in params if 'Name' in p)
- if not ai:
- ai = dict((k[:-24],
- {'AllowableValues': v}) for k, v in action.items()
- if k.endswith('@Redfish.AllowableValues'))
- return ai
-
- def _get_allowable_values(self, action, name, default_values=None):
- if default_values is None:
- default_values = []
- ai = self._get_all_action_info_values(action)
- allowable_values = ai.get(name, {}).get('AllowableValues')
- # fallback to default values
- if allowable_values is None:
- allowable_values = default_values
- return allowable_values
-
- def get_logs(self):
- log_svcs_uri_list = []
- list_of_logs = []
- properties = ['Severity', 'Created', 'EntryType', 'OemRecordFormat',
- 'Message', 'MessageId', 'MessageArgs']
-
- # Find LogService
- response = self.get_request(self.root_uri + self.manager_uri)
- if response['ret'] is False:
- return response
- data = response['data']
- if 'LogServices' not in data:
- return {'ret': False, 'msg': "LogServices resource not found"}
-
- # Find all entries in LogServices
- logs_uri = data["LogServices"]["@odata.id"]
- response = self.get_request(self.root_uri + logs_uri)
- if response['ret'] is False:
- return response
- data = response['data']
- for log_svcs_entry in data.get('Members', []):
- response = self.get_request(self.root_uri + log_svcs_entry[u'@odata.id'])
- if response['ret'] is False:
- return response
- _data = response['data']
- if 'Entries' in _data:
- log_svcs_uri_list.append(_data['Entries'][u'@odata.id'])
-
- # For each entry in LogServices, get log name and all log entries
- for log_svcs_uri in log_svcs_uri_list:
- logs = {}
- list_of_log_entries = []
- response = self.get_request(self.root_uri + log_svcs_uri)
- if response['ret'] is False:
- return response
- data = response['data']
- logs['Description'] = data.get('Description',
- 'Collection of log entries')
- # Get all log entries for each type of log found
- for logEntry in data.get('Members', []):
- entry = {}
- for prop in properties:
- if prop in logEntry:
- entry[prop] = logEntry.get(prop)
- if entry:
- list_of_log_entries.append(entry)
- log_name = log_svcs_uri.split('/')[-1]
- logs[log_name] = list_of_log_entries
- list_of_logs.append(logs)
-
- # list_of_logs[logs{list_of_log_entries[entry{}]}]
- return {'ret': True, 'entries': list_of_logs}
-
- def clear_logs(self):
- # Find LogService
- response = self.get_request(self.root_uri + self.manager_uri)
- if response['ret'] is False:
- return response
- data = response['data']
- if 'LogServices' not in data:
- return {'ret': False, 'msg': "LogServices resource not found"}
-
- # Find all entries in LogServices
- logs_uri = data["LogServices"]["@odata.id"]
- response = self.get_request(self.root_uri + logs_uri)
- if response['ret'] is False:
- return response
- data = response['data']
-
- for log_svcs_entry in data[u'Members']:
- response = self.get_request(self.root_uri + log_svcs_entry["@odata.id"])
- if response['ret'] is False:
- return response
- _data = response['data']
- # Check to make sure option is available, otherwise error is ugly
- if "Actions" in _data:
- if "#LogService.ClearLog" in _data[u"Actions"]:
- self.post_request(self.root_uri + _data[u"Actions"]["#LogService.ClearLog"]["target"], {})
- if response['ret'] is False:
- return response
- return {'ret': True}
-
- def aggregate(self, func, uri_list, uri_name):
- ret = True
- entries = []
- for uri in uri_list:
- inventory = func(uri)
- ret = inventory.pop('ret') and ret
- if 'entries' in inventory:
- entries.append(({uri_name: uri},
- inventory['entries']))
- return dict(ret=ret, entries=entries)
-
- def aggregate_chassis(self, func):
- return self.aggregate(func, self.chassis_uris, 'chassis_uri')
-
- def aggregate_managers(self, func):
- return self.aggregate(func, self.manager_uris, 'manager_uri')
-
- def aggregate_systems(self, func):
- return self.aggregate(func, self.systems_uris, 'system_uri')
-
- def get_storage_controller_inventory(self, systems_uri):
- result = {}
- controller_list = []
- controller_results = []
- # Get these entries, but does not fail if not found
- properties = ['CacheSummary', 'FirmwareVersion', 'Identifiers',
- 'Location', 'Manufacturer', 'Model', 'Name', 'Id',
- 'PartNumber', 'SerialNumber', 'SpeedGbps', 'Status']
- key = "StorageControllers"
-
- # Find Storage service
- response = self.get_request(self.root_uri + systems_uri)
- if response['ret'] is False:
- return response
- data = response['data']
-
- if 'Storage' not in data:
- return {'ret': False, 'msg': "Storage resource not found"}
-
- # Get a list of all storage controllers and build respective URIs
- storage_uri = data['Storage']["@odata.id"]
- response = self.get_request(self.root_uri + storage_uri)
- if response['ret'] is False:
- return response
- result['ret'] = True
- data = response['data']
-
- # Loop through Members and their StorageControllers
- # and gather properties from each StorageController
- if data[u'Members']:
- for storage_member in data[u'Members']:
- storage_member_uri = storage_member[u'@odata.id']
- response = self.get_request(self.root_uri + storage_member_uri)
- data = response['data']
-
- if key in data:
- controller_list = data[key]
- for controller in controller_list:
- controller_result = {}
- for property in properties:
- if property in controller:
- controller_result[property] = controller[property]
- controller_results.append(controller_result)
- result['entries'] = controller_results
- return result
- else:
- return {'ret': False, 'msg': "Storage resource not found"}
-
- def get_multi_storage_controller_inventory(self):
- return self.aggregate_systems(self.get_storage_controller_inventory)
-
- def get_disk_inventory(self, systems_uri):
- result = {'entries': []}
- controller_list = []
- # Get these entries, but does not fail if not found
- properties = ['BlockSizeBytes', 'CapableSpeedGbs', 'CapacityBytes',
- 'EncryptionAbility', 'EncryptionStatus',
- 'FailurePredicted', 'HotspareType', 'Id', 'Identifiers',
- 'Manufacturer', 'MediaType', 'Model', 'Name',
- 'PartNumber', 'PhysicalLocation', 'Protocol', 'Revision',
- 'RotationSpeedRPM', 'SerialNumber', 'Status']
-
- # Find Storage service
- response = self.get_request(self.root_uri + systems_uri)
- if response['ret'] is False:
- return response
- data = response['data']
-
- if 'SimpleStorage' not in data and 'Storage' not in data:
- return {'ret': False, 'msg': "SimpleStorage and Storage resource \
- not found"}
-
- if 'Storage' in data:
- # Get a list of all storage controllers and build respective URIs
- storage_uri = data[u'Storage'][u'@odata.id']
- response = self.get_request(self.root_uri + storage_uri)
- if response['ret'] is False:
- return response
- result['ret'] = True
- data = response['data']
-
- if data[u'Members']:
- for controller in data[u'Members']:
- controller_list.append(controller[u'@odata.id'])
- for c in controller_list:
- uri = self.root_uri + c
- response = self.get_request(uri)
- if response['ret'] is False:
- return response
- data = response['data']
- controller_name = 'Controller 1'
- if 'StorageControllers' in data:
- sc = data['StorageControllers']
- if sc:
- if 'Name' in sc[0]:
- controller_name = sc[0]['Name']
- else:
- sc_id = sc[0].get('Id', '1')
- controller_name = 'Controller %s' % sc_id
- drive_results = []
- if 'Drives' in data:
- for device in data[u'Drives']:
- disk_uri = self.root_uri + device[u'@odata.id']
- response = self.get_request(disk_uri)
- data = response['data']
-
- drive_result = {}
- for property in properties:
- if property in data:
- if data[property] is not None:
- drive_result[property] = data[property]
- drive_results.append(drive_result)
- drives = {'Controller': controller_name,
- 'Drives': drive_results}
- result["entries"].append(drives)
-
- if 'SimpleStorage' in data:
- # Get a list of all storage controllers and build respective URIs
- storage_uri = data["SimpleStorage"]["@odata.id"]
- response = self.get_request(self.root_uri + storage_uri)
- if response['ret'] is False:
- return response
- result['ret'] = True
- data = response['data']
-
- for controller in data[u'Members']:
- controller_list.append(controller[u'@odata.id'])
-
- for c in controller_list:
- uri = self.root_uri + c
- response = self.get_request(uri)
- if response['ret'] is False:
- return response
- data = response['data']
- if 'Name' in data:
- controller_name = data['Name']
- else:
- sc_id = data.get('Id', '1')
- controller_name = 'Controller %s' % sc_id
- drive_results = []
- for device in data[u'Devices']:
- drive_result = {}
- for property in properties:
- if property in device:
- drive_result[property] = device[property]
- drive_results.append(drive_result)
- drives = {'Controller': controller_name,
- 'Drives': drive_results}
- result["entries"].append(drives)
-
- return result
-
- def get_multi_disk_inventory(self):
- return self.aggregate_systems(self.get_disk_inventory)
-
- def get_volume_inventory(self, systems_uri):
- result = {'entries': []}
- controller_list = []
- volume_list = []
- # Get these entries, but does not fail if not found
- properties = ['Id', 'Name', 'RAIDType', 'VolumeType', 'BlockSizeBytes',
- 'Capacity', 'CapacityBytes', 'CapacitySources',
- 'Encrypted', 'EncryptionTypes', 'Identifiers',
- 'Operations', 'OptimumIOSizeBytes', 'AccessCapabilities',
- 'AllocatedPools', 'Status']
-
- # Find Storage service
- response = self.get_request(self.root_uri + systems_uri)
- if response['ret'] is False:
- return response
- data = response['data']
-
- if 'SimpleStorage' not in data and 'Storage' not in data:
- return {'ret': False, 'msg': "SimpleStorage and Storage resource \
- not found"}
-
- if 'Storage' in data:
- # Get a list of all storage controllers and build respective URIs
- storage_uri = data[u'Storage'][u'@odata.id']
- response = self.get_request(self.root_uri + storage_uri)
- if response['ret'] is False:
- return response
- result['ret'] = True
- data = response['data']
-
- if data.get('Members'):
- for controller in data[u'Members']:
- controller_list.append(controller[u'@odata.id'])
- for c in controller_list:
- uri = self.root_uri + c
- response = self.get_request(uri)
- if response['ret'] is False:
- return response
- data = response['data']
- controller_name = 'Controller 1'
- if 'StorageControllers' in data:
- sc = data['StorageControllers']
- if sc:
- if 'Name' in sc[0]:
- controller_name = sc[0]['Name']
- else:
- sc_id = sc[0].get('Id', '1')
- controller_name = 'Controller %s' % sc_id
- volume_results = []
- if 'Volumes' in data:
- # Get a list of all volumes and build respective URIs
- volumes_uri = data[u'Volumes'][u'@odata.id']
- response = self.get_request(self.root_uri + volumes_uri)
- data = response['data']
-
- if data.get('Members'):
- for volume in data[u'Members']:
- volume_list.append(volume[u'@odata.id'])
- for v in volume_list:
- uri = self.root_uri + v
- response = self.get_request(uri)
- if response['ret'] is False:
- return response
- data = response['data']
-
- volume_result = {}
- for property in properties:
- if property in data:
- if data[property] is not None:
- volume_result[property] = data[property]
-
- # Get related Drives Id
- drive_id_list = []
- if 'Links' in data:
- if 'Drives' in data[u'Links']:
- for link in data[u'Links'][u'Drives']:
- drive_id_link = link[u'@odata.id']
- drive_id = drive_id_link.split("/")[-1]
- drive_id_list.append({'Id': drive_id})
- volume_result['Linked_drives'] = drive_id_list
- volume_results.append(volume_result)
- volumes = {'Controller': controller_name,
- 'Volumes': volume_results}
- result["entries"].append(volumes)
- else:
- return {'ret': False, 'msg': "Storage resource not found"}
-
- return result
-
- def get_multi_volume_inventory(self):
- return self.aggregate_systems(self.get_volume_inventory)
-
- def manage_indicator_led(self, command):
- result = {}
- key = 'IndicatorLED'
-
- payloads = {'IndicatorLedOn': 'Lit', 'IndicatorLedOff': 'Off', "IndicatorLedBlink": 'Blinking'}
-
- result = {}
- response = self.get_request(self.root_uri + self.chassis_uri)
- if response['ret'] is False:
- return response
- result['ret'] = True
- data = response['data']
- if key not in data:
- return {'ret': False, 'msg': "Key %s not found" % key}
-
- if command in payloads.keys():
- payload = {'IndicatorLED': payloads[command]}
- response = self.patch_request(self.root_uri + self.chassis_uri, payload)
- if response['ret'] is False:
- return response
- else:
- return {'ret': False, 'msg': 'Invalid command'}
-
- return result
-
- def _map_reset_type(self, reset_type, allowable_values):
- equiv_types = {
- 'On': 'ForceOn',
- 'ForceOn': 'On',
- 'ForceOff': 'GracefulShutdown',
- 'GracefulShutdown': 'ForceOff',
- 'GracefulRestart': 'ForceRestart',
- 'ForceRestart': 'GracefulRestart'
- }
-
- if reset_type in allowable_values:
- return reset_type
- if reset_type not in equiv_types:
- return reset_type
- mapped_type = equiv_types[reset_type]
- if mapped_type in allowable_values:
- return mapped_type
- return reset_type
-
- def manage_system_power(self, command):
- return self.manage_power(command, self.systems_uri,
- '#ComputerSystem.Reset')
-
- def manage_manager_power(self, command):
- return self.manage_power(command, self.manager_uri,
- '#Manager.Reset')
-
- def manage_power(self, command, resource_uri, action_name):
- key = "Actions"
- reset_type_values = ['On', 'ForceOff', 'GracefulShutdown',
- 'GracefulRestart', 'ForceRestart', 'Nmi',
- 'ForceOn', 'PushPowerButton', 'PowerCycle']
-
- # command should be PowerOn, PowerForceOff, etc.
- if not command.startswith('Power'):
- return {'ret': False, 'msg': 'Invalid Command (%s)' % command}
- reset_type = command[5:]
-
- # map Reboot to a ResetType that does a reboot
- if reset_type == 'Reboot':
- reset_type = 'GracefulRestart'
-
- if reset_type not in reset_type_values:
- return {'ret': False, 'msg': 'Invalid Command (%s)' % command}
-
- # read the resource and get the current power state
- response = self.get_request(self.root_uri + resource_uri)
- if response['ret'] is False:
- return response
- data = response['data']
- power_state = data.get('PowerState')
-
- # if power is already in target state, nothing to do
- if power_state == "On" and reset_type in ['On', 'ForceOn']:
- return {'ret': True, 'changed': False}
- if power_state == "Off" and reset_type in ['GracefulShutdown', 'ForceOff']:
- return {'ret': True, 'changed': False}
-
- # get the reset Action and target URI
- if key not in data or action_name not in data[key]:
- return {'ret': False, 'msg': 'Action %s not found' % action_name}
- reset_action = data[key][action_name]
- if 'target' not in reset_action:
- return {'ret': False,
- 'msg': 'target URI missing from Action %s' % action_name}
- action_uri = reset_action['target']
-
- # get AllowableValues
- ai = self._get_all_action_info_values(reset_action)
- allowable_values = ai.get('ResetType', {}).get('AllowableValues', [])
-
- # map ResetType to an allowable value if needed
- if reset_type not in allowable_values:
- reset_type = self._map_reset_type(reset_type, allowable_values)
-
- # define payload
- payload = {'ResetType': reset_type}
-
- # POST to Action URI
- response = self.post_request(self.root_uri + action_uri, payload)
- if response['ret'] is False:
- return response
- return {'ret': True, 'changed': True}
-
- def _find_account_uri(self, username=None, acct_id=None):
- if not any((username, acct_id)):
- return {'ret': False, 'msg':
- 'Must provide either account_id or account_username'}
-
- response = self.get_request(self.root_uri + self.accounts_uri)
- if response['ret'] is False:
- return response
- data = response['data']
-
- uris = [a.get('@odata.id') for a in data.get('Members', []) if
- a.get('@odata.id')]
- for uri in uris:
- response = self.get_request(self.root_uri + uri)
- if response['ret'] is False:
- continue
- data = response['data']
- headers = response['headers']
- if username:
- if username == data.get('UserName'):
- return {'ret': True, 'data': data,
- 'headers': headers, 'uri': uri}
- if acct_id:
- if acct_id == data.get('Id'):
- return {'ret': True, 'data': data,
- 'headers': headers, 'uri': uri}
-
- return {'ret': False, 'no_match': True, 'msg':
- 'No account with the given account_id or account_username found'}
-
- def _find_empty_account_slot(self):
- response = self.get_request(self.root_uri + self.accounts_uri)
- if response['ret'] is False:
- return response
- data = response['data']
-
- uris = [a.get('@odata.id') for a in data.get('Members', []) if
- a.get('@odata.id')]
- if uris:
- # first slot may be reserved, so move to end of list
- uris += [uris.pop(0)]
- for uri in uris:
- response = self.get_request(self.root_uri + uri)
- if response['ret'] is False:
- continue
- data = response['data']
- headers = response['headers']
- if data.get('UserName') == "" and not data.get('Enabled', True):
- return {'ret': True, 'data': data,
- 'headers': headers, 'uri': uri}
-
- return {'ret': False, 'no_match': True, 'msg':
- 'No empty account slot found'}
-
- def list_users(self):
- result = {}
- # listing all users has always been slower than other operations, why?
- user_list = []
- users_results = []
- # Get these entries, but does not fail if not found
- properties = ['Id', 'Name', 'UserName', 'RoleId', 'Locked', 'Enabled']
-
- response = self.get_request(self.root_uri + self.accounts_uri)
- if response['ret'] is False:
- return response
- result['ret'] = True
- data = response['data']
-
- for users in data.get('Members', []):
- user_list.append(users[u'@odata.id']) # user_list[] are URIs
-
- # for each user, get details
- for uri in user_list:
- user = {}
- response = self.get_request(self.root_uri + uri)
- if response['ret'] is False:
- return response
- data = response['data']
-
- for property in properties:
- if property in data:
- user[property] = data[property]
-
- users_results.append(user)
- result["entries"] = users_results
- return result
-
- def add_user_via_patch(self, user):
- if user.get('account_id'):
- # If Id slot specified, use it
- response = self._find_account_uri(acct_id=user.get('account_id'))
- else:
- # Otherwise find first empty slot
- response = self._find_empty_account_slot()
-
- if not response['ret']:
- return response
- uri = response['uri']
- payload = {}
- if user.get('account_username'):
- payload['UserName'] = user.get('account_username')
- if user.get('account_password'):
- payload['Password'] = user.get('account_password')
- if user.get('account_roleid'):
- payload['RoleId'] = user.get('account_roleid')
- response = self.patch_request(self.root_uri + uri, payload)
- if response['ret'] is False:
- return response
- return {'ret': True}
-
- def add_user(self, user):
- if not user.get('account_username'):
- return {'ret': False, 'msg':
- 'Must provide account_username for AddUser command'}
-
- response = self._find_account_uri(username=user.get('account_username'))
- if response['ret']:
- # account_username already exists, nothing to do
- return {'ret': True, 'changed': False}
-
- response = self.get_request(self.root_uri + self.accounts_uri)
- if not response['ret']:
- return response
- headers = response['headers']
-
- if 'allow' in headers:
- methods = [m.strip() for m in headers.get('allow').split(',')]
- if 'POST' not in methods:
- # if Allow header present and POST not listed, add via PATCH
- return self.add_user_via_patch(user)
-
- payload = {}
- if user.get('account_username'):
- payload['UserName'] = user.get('account_username')
- if user.get('account_password'):
- payload['Password'] = user.get('account_password')
- if user.get('account_roleid'):
- payload['RoleId'] = user.get('account_roleid')
- if user.get('account_id'):
- payload['Id'] = user.get('account_id')
-
- response = self.post_request(self.root_uri + self.accounts_uri, payload)
- if not response['ret']:
- if response.get('status') == 405:
- # if POST returned a 405, try to add via PATCH
- return self.add_user_via_patch(user)
- else:
- return response
- return {'ret': True}
-
- def enable_user(self, user):
- response = self._find_account_uri(username=user.get('account_username'),
- acct_id=user.get('account_id'))
- if not response['ret']:
- return response
- uri = response['uri']
- data = response['data']
-
- if data.get('Enabled', True):
- # account already enabled, nothing to do
- return {'ret': True, 'changed': False}
-
- payload = {'Enabled': True}
- response = self.patch_request(self.root_uri + uri, payload)
- if response['ret'] is False:
- return response
- return {'ret': True}
-
- def delete_user_via_patch(self, user, uri=None, data=None):
- if not uri:
- response = self._find_account_uri(username=user.get('account_username'),
- acct_id=user.get('account_id'))
- if not response['ret']:
- return response
- uri = response['uri']
- data = response['data']
-
- if data and data.get('UserName') == '' and not data.get('Enabled', False):
- # account UserName already cleared, nothing to do
- return {'ret': True, 'changed': False}
-
- payload = {'UserName': ''}
- if data.get('Enabled', False):
- payload['Enabled'] = False
- response = self.patch_request(self.root_uri + uri, payload)
- if response['ret'] is False:
- return response
- return {'ret': True}
-
- def delete_user(self, user):
- response = self._find_account_uri(username=user.get('account_username'),
- acct_id=user.get('account_id'))
- if not response['ret']:
- if response.get('no_match'):
- # account does not exist, nothing to do
- return {'ret': True, 'changed': False}
- else:
- # some error encountered
- return response
-
- uri = response['uri']
- headers = response['headers']
- data = response['data']
-
- if 'allow' in headers:
- methods = [m.strip() for m in headers.get('allow').split(',')]
- if 'DELETE' not in methods:
- # if Allow header present and DELETE not listed, del via PATCH
- return self.delete_user_via_patch(user, uri=uri, data=data)
-
- response = self.delete_request(self.root_uri + uri)
- if not response['ret']:
- if response.get('status') == 405:
- # if DELETE returned a 405, try to delete via PATCH
- return self.delete_user_via_patch(user, uri=uri, data=data)
- else:
- return response
- return {'ret': True}
-
- def disable_user(self, user):
- response = self._find_account_uri(username=user.get('account_username'),
- acct_id=user.get('account_id'))
- if not response['ret']:
- return response
- uri = response['uri']
- data = response['data']
-
- if not data.get('Enabled'):
- # account already disabled, nothing to do
- return {'ret': True, 'changed': False}
-
- payload = {'Enabled': False}
- response = self.patch_request(self.root_uri + uri, payload)
- if response['ret'] is False:
- return response
- return {'ret': True}
-
- def update_user_role(self, user):
- if not user.get('account_roleid'):
- return {'ret': False, 'msg':
- 'Must provide account_roleid for UpdateUserRole command'}
-
- response = self._find_account_uri(username=user.get('account_username'),
- acct_id=user.get('account_id'))
- if not response['ret']:
- return response
- uri = response['uri']
- data = response['data']
-
- if data.get('RoleId') == user.get('account_roleid'):
- # account already has RoleId , nothing to do
- return {'ret': True, 'changed': False}
-
- payload = {'RoleId': user.get('account_roleid')}
- response = self.patch_request(self.root_uri + uri, payload)
- if response['ret'] is False:
- return response
- return {'ret': True}
-
- def update_user_password(self, user):
- response = self._find_account_uri(username=user.get('account_username'),
- acct_id=user.get('account_id'))
- if not response['ret']:
- return response
- uri = response['uri']
- payload = {'Password': user['account_password']}
- response = self.patch_request(self.root_uri + uri, payload)
- if response['ret'] is False:
- return response
- return {'ret': True}
-
- def update_user_name(self, user):
- if not user.get('account_updatename'):
- return {'ret': False, 'msg':
- 'Must provide account_updatename for UpdateUserName command'}
-
- response = self._find_account_uri(username=user.get('account_username'),
- acct_id=user.get('account_id'))
- if not response['ret']:
- return response
- uri = response['uri']
- payload = {'UserName': user['account_updatename']}
- response = self.patch_request(self.root_uri + uri, payload)
- if response['ret'] is False:
- return response
- return {'ret': True}
-
- def update_accountservice_properties(self, user):
- if user.get('account_properties') is None:
- return {'ret': False, 'msg':
- 'Must provide account_properties for UpdateAccountServiceProperties command'}
- account_properties = user.get('account_properties')
-
- # Find AccountService
- response = self.get_request(self.root_uri + self.service_root)
- if response['ret'] is False:
- return response
- data = response['data']
- if 'AccountService' not in data:
- return {'ret': False, 'msg': "AccountService resource not found"}
- accountservice_uri = data["AccountService"]["@odata.id"]
-
- # Check support or not
- response = self.get_request(self.root_uri + accountservice_uri)
- if response['ret'] is False:
- return response
- data = response['data']
- for property_name in account_properties.keys():
- if property_name not in data:
- return {'ret': False, 'msg':
- 'property %s not supported' % property_name}
-
- # if properties is already matched, nothing to do
- need_change = False
- for property_name in account_properties.keys():
- if account_properties[property_name] != data[property_name]:
- need_change = True
- break
-
- if not need_change:
- return {'ret': True, 'changed': False, 'msg': "AccountService properties already set"}
-
- payload = account_properties
- response = self.patch_request(self.root_uri + accountservice_uri, payload)
- if response['ret'] is False:
- return response
- return {'ret': True, 'changed': True, 'msg': "Modified AccountService properties"}
-
- def get_sessions(self):
- result = {}
- # listing all users has always been slower than other operations, why?
- session_list = []
- sessions_results = []
- # Get these entries, but does not fail if not found
- properties = ['Description', 'Id', 'Name', 'UserName']
-
- response = self.get_request(self.root_uri + self.sessions_uri)
- if response['ret'] is False:
- return response
- result['ret'] = True
- data = response['data']
-
- for sessions in data[u'Members']:
- session_list.append(sessions[u'@odata.id']) # session_list[] are URIs
-
- # for each session, get details
- for uri in session_list:
- session = {}
- response = self.get_request(self.root_uri + uri)
- if response['ret'] is False:
- return response
- data = response['data']
-
- for property in properties:
- if property in data:
- session[property] = data[property]
-
- sessions_results.append(session)
- result["entries"] = sessions_results
- return result
-
- def clear_sessions(self):
- response = self.get_request(self.root_uri + self.sessions_uri)
- if response['ret'] is False:
- return response
- data = response['data']
-
- # if no active sessions, return as success
- if data['Members@odata.count'] == 0:
- return {'ret': True, 'changed': False, 'msg': "There is no active sessions"}
-
- # loop to delete every active session
- for session in data[u'Members']:
- response = self.delete_request(self.root_uri + session[u'@odata.id'])
- if response['ret'] is False:
- return response
-
- return {'ret': True, 'changed': True, 'msg': "Clear all sessions successfully"}
-
- def create_session(self):
- if not self.creds.get('user') or not self.creds.get('pswd'):
- return {'ret': False, 'msg':
- 'Must provide the username and password parameters for '
- 'the CreateSession command'}
-
- payload = {
- 'UserName': self.creds['user'],
- 'Password': self.creds['pswd']
- }
- response = self.post_request(self.root_uri + self.sessions_uri, payload)
- if response['ret'] is False:
- return response
-
- headers = response['headers']
- if 'x-auth-token' not in headers:
- return {'ret': False, 'msg':
- 'The service did not return the X-Auth-Token header in '
- 'the response from the Sessions collection POST'}
-
- if 'location' not in headers:
- self.module.warn(
- 'The service did not return the Location header for the '
- 'session URL in the response from the Sessions collection '
- 'POST')
- session_uri = None
- else:
- session_uri = urlparse(headers.get('location')).path
-
- session = dict()
- session['token'] = headers.get('x-auth-token')
- session['uri'] = session_uri
- return {'ret': True, 'changed': True, 'session': session,
- 'msg': 'Session created successfully'}
-
- def delete_session(self, session_uri):
- if not session_uri:
- return {'ret': False, 'msg':
- 'Must provide the session_uri parameter for the '
- 'DeleteSession command'}
-
- response = self.delete_request(self.root_uri + session_uri)
- if response['ret'] is False:
- return response
-
- return {'ret': True, 'changed': True,
- 'msg': 'Session deleted successfully'}
-
- def get_firmware_update_capabilities(self):
- result = {}
- response = self.get_request(self.root_uri + self.update_uri)
- if response['ret'] is False:
- return response
-
- result['ret'] = True
-
- result['entries'] = {}
-
- data = response['data']
-
- if "Actions" in data:
- actions = data['Actions']
- if len(actions) > 0:
- for key in actions.keys():
- action = actions.get(key)
- if 'title' in action:
- title = action['title']
- else:
- title = key
- result['entries'][title] = action.get('TransferProtocol@Redfish.AllowableValues',
- ["Key TransferProtocol@Redfish.AllowableValues not found"])
- else:
- return {'ret': "False", 'msg': "Actions list is empty."}
- else:
- return {'ret': "False", 'msg': "Key Actions not found."}
- return result
-
- def _software_inventory(self, uri):
- result = {}
- response = self.get_request(self.root_uri + uri)
- if response['ret'] is False:
- return response
- result['ret'] = True
- data = response['data']
-
- result['entries'] = []
- for member in data[u'Members']:
- uri = self.root_uri + member[u'@odata.id']
- # Get details for each software or firmware member
- response = self.get_request(uri)
- if response['ret'] is False:
- return response
- result['ret'] = True
- data = response['data']
- software = {}
- # Get these standard properties if present
- for key in ['Name', 'Id', 'Status', 'Version', 'Updateable',
- 'SoftwareId', 'LowestSupportedVersion', 'Manufacturer',
- 'ReleaseDate']:
- if key in data:
- software[key] = data.get(key)
- result['entries'].append(software)
- return result
-
- def get_firmware_inventory(self):
- if self.firmware_uri is None:
- return {'ret': False, 'msg': 'No FirmwareInventory resource found'}
- else:
- return self._software_inventory(self.firmware_uri)
-
- def get_software_inventory(self):
- if self.software_uri is None:
- return {'ret': False, 'msg': 'No SoftwareInventory resource found'}
- else:
- return self._software_inventory(self.software_uri)
-
- def simple_update(self, update_opts):
- image_uri = update_opts.get('update_image_uri')
- protocol = update_opts.get('update_protocol')
- targets = update_opts.get('update_targets')
- creds = update_opts.get('update_creds')
-
- if not image_uri:
- return {'ret': False, 'msg':
- 'Must specify update_image_uri for the SimpleUpdate command'}
-
- response = self.get_request(self.root_uri + self.update_uri)
- if response['ret'] is False:
- return response
- data = response['data']
- if 'Actions' not in data:
- return {'ret': False, 'msg': 'Service does not support SimpleUpdate'}
- if '#UpdateService.SimpleUpdate' not in data['Actions']:
- return {'ret': False, 'msg': 'Service does not support SimpleUpdate'}
- action = data['Actions']['#UpdateService.SimpleUpdate']
- if 'target' not in action:
- return {'ret': False, 'msg': 'Service does not support SimpleUpdate'}
- update_uri = action['target']
- if protocol:
- default_values = ['CIFS', 'FTP', 'SFTP', 'HTTP', 'HTTPS', 'NSF',
- 'SCP', 'TFTP', 'OEM', 'NFS']
- allowable_values = self._get_allowable_values(action,
- 'TransferProtocol',
- default_values)
- if protocol not in allowable_values:
- return {'ret': False,
- 'msg': 'Specified update_protocol (%s) not supported '
- 'by service. Supported protocols: %s' %
- (protocol, allowable_values)}
- if targets:
- allowable_values = self._get_allowable_values(action, 'Targets')
- if allowable_values:
- for target in targets:
- if target not in allowable_values:
- return {'ret': False,
- 'msg': 'Specified target (%s) not supported '
- 'by service. Supported targets: %s' %
- (target, allowable_values)}
-
- payload = {
- 'ImageURI': image_uri
- }
- if protocol:
- payload["TransferProtocol"] = protocol
- if targets:
- payload["Targets"] = targets
- if creds:
- if creds.get('username'):
- payload["Username"] = creds.get('username')
- if creds.get('password'):
- payload["Password"] = creds.get('password')
- response = self.post_request(self.root_uri + update_uri, payload)
- if response['ret'] is False:
- return response
- return {'ret': True, 'changed': True,
- 'msg': "SimpleUpdate requested"}
-
- def get_bios_attributes(self, systems_uri):
- result = {}
- bios_attributes = {}
- key = "Bios"
-
- # Search for 'key' entry and extract URI from it
- response = self.get_request(self.root_uri + systems_uri)
- if response['ret'] is False:
- return response
- result['ret'] = True
- data = response['data']
-
- if key not in data:
- return {'ret': False, 'msg': "Key %s not found" % key}
-
- bios_uri = data[key]["@odata.id"]
-
- response = self.get_request(self.root_uri + bios_uri)
- if response['ret'] is False:
- return response
- result['ret'] = True
- data = response['data']
- for attribute in data[u'Attributes'].items():
- bios_attributes[attribute[0]] = attribute[1]
- result["entries"] = bios_attributes
- return result
-
- def get_multi_bios_attributes(self):
- return self.aggregate_systems(self.get_bios_attributes)
-
- def _get_boot_options_dict(self, boot):
- # Get these entries from BootOption, if present
- properties = ['DisplayName', 'BootOptionReference']
-
- # Retrieve BootOptions if present
- if 'BootOptions' in boot and '@odata.id' in boot['BootOptions']:
- boot_options_uri = boot['BootOptions']["@odata.id"]
- # Get BootOptions resource
- response = self.get_request(self.root_uri + boot_options_uri)
- if response['ret'] is False:
- return {}
- data = response['data']
-
- # Retrieve Members array
- if 'Members' not in data:
- return {}
- members = data['Members']
- else:
- members = []
-
- # Build dict of BootOptions keyed by BootOptionReference
- boot_options_dict = {}
- for member in members:
- if '@odata.id' not in member:
- return {}
- boot_option_uri = member['@odata.id']
- response = self.get_request(self.root_uri + boot_option_uri)
- if response['ret'] is False:
- return {}
- data = response['data']
- if 'BootOptionReference' not in data:
- return {}
- boot_option_ref = data['BootOptionReference']
-
- # fetch the props to display for this boot device
- boot_props = {}
- for prop in properties:
- if prop in data:
- boot_props[prop] = data[prop]
-
- boot_options_dict[boot_option_ref] = boot_props
-
- return boot_options_dict
-
- def get_boot_order(self, systems_uri):
- result = {}
-
- # Retrieve System resource
- response = self.get_request(self.root_uri + systems_uri)
- if response['ret'] is False:
- return response
- result['ret'] = True
- data = response['data']
-
- # Confirm needed Boot properties are present
- if 'Boot' not in data or 'BootOrder' not in data['Boot']:
- return {'ret': False, 'msg': "Key BootOrder not found"}
-
- boot = data['Boot']
- boot_order = boot['BootOrder']
- boot_options_dict = self._get_boot_options_dict(boot)
-
- # Build boot device list
- boot_device_list = []
- for ref in boot_order:
- boot_device_list.append(
- boot_options_dict.get(ref, {'BootOptionReference': ref}))
-
- result["entries"] = boot_device_list
- return result
-
- def get_multi_boot_order(self):
- return self.aggregate_systems(self.get_boot_order)
-
- def get_boot_override(self, systems_uri):
- result = {}
-
- properties = ["BootSourceOverrideEnabled", "BootSourceOverrideTarget",
- "BootSourceOverrideMode", "UefiTargetBootSourceOverride", "BootSourceOverrideTarget@Redfish.AllowableValues"]
-
- response = self.get_request(self.root_uri + systems_uri)
- if response['ret'] is False:
- return response
- result['ret'] = True
- data = response['data']
-
- if 'Boot' not in data:
- return {'ret': False, 'msg': "Key Boot not found"}
-
- boot = data['Boot']
-
- boot_overrides = {}
- if "BootSourceOverrideEnabled" in boot:
- if boot["BootSourceOverrideEnabled"] is not False:
- for property in properties:
- if property in boot:
- if boot[property] is not None:
- boot_overrides[property] = boot[property]
- else:
- return {'ret': False, 'msg': "No boot override is enabled."}
-
- result['entries'] = boot_overrides
- return result
-
- def get_multi_boot_override(self):
- return self.aggregate_systems(self.get_boot_override)
-
- def set_bios_default_settings(self):
- result = {}
- key = "Bios"
-
- # Search for 'key' entry and extract URI from it
- response = self.get_request(self.root_uri + self.systems_uri)
- if response['ret'] is False:
- return response
- result['ret'] = True
- data = response['data']
-
- if key not in data:
- return {'ret': False, 'msg': "Key %s not found" % key}
-
- bios_uri = data[key]["@odata.id"]
-
- # Extract proper URI
- response = self.get_request(self.root_uri + bios_uri)
- if response['ret'] is False:
- return response
- result['ret'] = True
- data = response['data']
- reset_bios_settings_uri = data["Actions"]["#Bios.ResetBios"]["target"]
-
- response = self.post_request(self.root_uri + reset_bios_settings_uri, {})
- if response['ret'] is False:
- return response
- return {'ret': True, 'changed': True, 'msg': "Set BIOS to default settings"}
-
- def set_boot_override(self, boot_opts):
- result = {}
- key = "Boot"
-
- bootdevice = boot_opts.get('bootdevice')
- uefi_target = boot_opts.get('uefi_target')
- boot_next = boot_opts.get('boot_next')
- override_enabled = boot_opts.get('override_enabled')
- boot_override_mode = boot_opts.get('boot_override_mode')
-
- if not bootdevice and override_enabled != 'Disabled':
- return {'ret': False,
- 'msg': "bootdevice option required for temporary boot override"}
-
- # Search for 'key' entry and extract URI from it
- response = self.get_request(self.root_uri + self.systems_uri)
- if response['ret'] is False:
- return response
- result['ret'] = True
- data = response['data']
-
- if key not in data:
- return {'ret': False, 'msg': "Key %s not found" % key}
-
- boot = data[key]
-
- if override_enabled != 'Disabled':
- annotation = 'BootSourceOverrideTarget@Redfish.AllowableValues'
- if annotation in boot:
- allowable_values = boot[annotation]
- if isinstance(allowable_values, list) and bootdevice not in allowable_values:
- return {'ret': False,
- 'msg': "Boot device %s not in list of allowable values (%s)" %
- (bootdevice, allowable_values)}
-
- # read existing values
- cur_enabled = boot.get('BootSourceOverrideEnabled')
- target = boot.get('BootSourceOverrideTarget')
- cur_uefi_target = boot.get('UefiTargetBootSourceOverride')
- cur_boot_next = boot.get('BootNext')
- cur_override_mode = boot.get('BootSourceOverrideMode')
-
- if override_enabled == 'Disabled':
- payload = {
- 'Boot': {
- 'BootSourceOverrideEnabled': override_enabled
- }
- }
- elif bootdevice == 'UefiTarget':
- if not uefi_target:
- return {'ret': False,
- 'msg': "uefi_target option required to SetOneTimeBoot for UefiTarget"}
- if override_enabled == cur_enabled and target == bootdevice and uefi_target == cur_uefi_target:
- # If properties are already set, no changes needed
- return {'ret': True, 'changed': False}
- payload = {
- 'Boot': {
- 'BootSourceOverrideEnabled': override_enabled,
- 'BootSourceOverrideTarget': bootdevice,
- 'UefiTargetBootSourceOverride': uefi_target
- }
- }
- elif bootdevice == 'UefiBootNext':
- if not boot_next:
- return {'ret': False,
- 'msg': "boot_next option required to SetOneTimeBoot for UefiBootNext"}
- if cur_enabled == override_enabled and target == bootdevice and boot_next == cur_boot_next:
- # If properties are already set, no changes needed
- return {'ret': True, 'changed': False}
- payload = {
- 'Boot': {
- 'BootSourceOverrideEnabled': override_enabled,
- 'BootSourceOverrideTarget': bootdevice,
- 'BootNext': boot_next
- }
- }
- else:
- if (cur_enabled == override_enabled and target == bootdevice and
- (cur_override_mode == boot_override_mode or not boot_override_mode)):
- # If properties are already set, no changes needed
- return {'ret': True, 'changed': False}
- payload = {
- 'Boot': {
- 'BootSourceOverrideEnabled': override_enabled,
- 'BootSourceOverrideTarget': bootdevice
- }
- }
- if boot_override_mode:
- payload['Boot']['BootSourceOverrideMode'] = boot_override_mode
-
- response = self.patch_request(self.root_uri + self.systems_uri, payload)
- if response['ret'] is False:
- return response
- return {'ret': True, 'changed': True}
-
- def set_bios_attributes(self, attributes):
- result = {}
- key = "Bios"
-
- # Search for 'key' entry and extract URI from it
- response = self.get_request(self.root_uri + self.systems_uri)
- if response['ret'] is False:
- return response
- result['ret'] = True
- data = response['data']
-
- if key not in data:
- return {'ret': False, 'msg': "Key %s not found" % key}
-
- bios_uri = data[key]["@odata.id"]
-
- # Extract proper URI
- response = self.get_request(self.root_uri + bios_uri)
- if response['ret'] is False:
- return response
- result['ret'] = True
- data = response['data']
-
- # Make a copy of the attributes dict
- attrs_to_patch = dict(attributes)
- # List to hold attributes not found
- attrs_bad = {}
-
- # Check the attributes
- for attr_name, attr_value in attributes.items():
- # Check if attribute exists
- if attr_name not in data[u'Attributes']:
- # Remove and proceed to next attribute if this isn't valid
- attrs_bad.update({attr_name: attr_value})
- del attrs_to_patch[attr_name]
- continue
-
- # If already set to requested value, remove it from PATCH payload
- if data[u'Attributes'][attr_name] == attributes[attr_name]:
- del attrs_to_patch[attr_name]
-
- warning = ""
- if attrs_bad:
- warning = "Incorrect attributes %s" % (attrs_bad)
-
- # Return success w/ changed=False if no attrs need to be changed
- if not attrs_to_patch:
- return {'ret': True, 'changed': False,
- 'msg': "BIOS attributes already set",
- 'warning': warning}
-
- # Get the SettingsObject URI
- set_bios_attr_uri = data["@Redfish.Settings"]["SettingsObject"]["@odata.id"]
-
- # Construct payload and issue PATCH command
- payload = {"Attributes": attrs_to_patch}
- response = self.patch_request(self.root_uri + set_bios_attr_uri, payload)
- if response['ret'] is False:
- return response
- return {'ret': True, 'changed': True,
- 'msg': "Modified BIOS attributes %s" % (attrs_to_patch),
- 'warning': warning}
-
- def set_boot_order(self, boot_list):
- if not boot_list:
- return {'ret': False,
- 'msg': "boot_order list required for SetBootOrder command"}
-
- systems_uri = self.systems_uri
- response = self.get_request(self.root_uri + systems_uri)
- if response['ret'] is False:
- return response
- data = response['data']
-
- # Confirm needed Boot properties are present
- if 'Boot' not in data or 'BootOrder' not in data['Boot']:
- return {'ret': False, 'msg': "Key BootOrder not found"}
-
- boot = data['Boot']
- boot_order = boot['BootOrder']
- boot_options_dict = self._get_boot_options_dict(boot)
-
- # validate boot_list against BootOptionReferences if available
- if boot_options_dict:
- boot_option_references = boot_options_dict.keys()
- for ref in boot_list:
- if ref not in boot_option_references:
- return {'ret': False,
- 'msg': "BootOptionReference %s not found in BootOptions" % ref}
-
- # If requested BootOrder is already set, nothing to do
- if boot_order == boot_list:
- return {'ret': True, 'changed': False,
- 'msg': "BootOrder already set to %s" % boot_list}
-
- payload = {
- 'Boot': {
- 'BootOrder': boot_list
- }
- }
- response = self.patch_request(self.root_uri + systems_uri, payload)
- if response['ret'] is False:
- return response
- return {'ret': True, 'changed': True, 'msg': "BootOrder set"}
-
- def set_default_boot_order(self):
- systems_uri = self.systems_uri
- response = self.get_request(self.root_uri + systems_uri)
- if response['ret'] is False:
- return response
- data = response['data']
-
- # get the #ComputerSystem.SetDefaultBootOrder Action and target URI
- action = '#ComputerSystem.SetDefaultBootOrder'
- if 'Actions' not in data or action not in data['Actions']:
- return {'ret': False, 'msg': 'Action %s not found' % action}
- if 'target' not in data['Actions'][action]:
- return {'ret': False,
- 'msg': 'target URI missing from Action %s' % action}
- action_uri = data['Actions'][action]['target']
-
- # POST to Action URI
- payload = {}
- response = self.post_request(self.root_uri + action_uri, payload)
- if response['ret'] is False:
- return response
- return {'ret': True, 'changed': True,
- 'msg': "BootOrder set to default"}
-
- def get_chassis_inventory(self):
- result = {}
- chassis_results = []
-
- # Get these entries, but does not fail if not found
- properties = ['Name', 'Id', 'ChassisType', 'PartNumber', 'AssetTag',
- 'Manufacturer', 'IndicatorLED', 'SerialNumber', 'Model']
-
- # Go through list
- for chassis_uri in self.chassis_uris:
- response = self.get_request(self.root_uri + chassis_uri)
- if response['ret'] is False:
- return response
- result['ret'] = True
- data = response['data']
- chassis_result = {}
- for property in properties:
- if property in data:
- chassis_result[property] = data[property]
- chassis_results.append(chassis_result)
-
- result["entries"] = chassis_results
- return result
-
- def get_fan_inventory(self):
- result = {}
- fan_results = []
- key = "Thermal"
- # Get these entries, but does not fail if not found
- properties = ['Name', 'FanName', 'Reading', 'ReadingUnits', 'Status']
-
- # Go through list
- for chassis_uri in self.chassis_uris:
- response = self.get_request(self.root_uri + chassis_uri)
- if response['ret'] is False:
- return response
- result['ret'] = True
- data = response['data']
- if key in data:
- # match: found an entry for "Thermal" information = fans
- thermal_uri = data[key]["@odata.id"]
- response = self.get_request(self.root_uri + thermal_uri)
- if response['ret'] is False:
- return response
- result['ret'] = True
- data = response['data']
-
- # Checking if fans are present
- if u'Fans' in data:
- for device in data[u'Fans']:
- fan = {}
- for property in properties:
- if property in device:
- fan[property] = device[property]
- fan_results.append(fan)
- else:
- return {'ret': False, 'msg': "No Fans present"}
- result["entries"] = fan_results
- return result
-
- def get_chassis_power(self):
- result = {}
- key = "Power"
-
- # Get these entries, but does not fail if not found
- properties = ['Name', 'PowerAllocatedWatts',
- 'PowerAvailableWatts', 'PowerCapacityWatts',
- 'PowerConsumedWatts', 'PowerMetrics',
- 'PowerRequestedWatts', 'RelatedItem', 'Status']
-
- chassis_power_results = []
- # Go through list
- for chassis_uri in self.chassis_uris:
- chassis_power_result = {}
- response = self.get_request(self.root_uri + chassis_uri)
- if response['ret'] is False:
- return response
- result['ret'] = True
- data = response['data']
- if key in data:
- response = self.get_request(self.root_uri + data[key]['@odata.id'])
- data = response['data']
- if 'PowerControl' in data:
- if len(data['PowerControl']) > 0:
- data = data['PowerControl'][0]
- for property in properties:
- if property in data:
- chassis_power_result[property] = data[property]
- else:
- return {'ret': False, 'msg': 'Key PowerControl not found.'}
- chassis_power_results.append(chassis_power_result)
- else:
- return {'ret': False, 'msg': 'Key Power not found.'}
-
- result['entries'] = chassis_power_results
- return result
-
- def get_chassis_thermals(self):
- result = {}
- sensors = []
- key = "Thermal"
-
- # Get these entries, but does not fail if not found
- properties = ['Name', 'PhysicalContext', 'UpperThresholdCritical',
- 'UpperThresholdFatal', 'UpperThresholdNonCritical',
- 'LowerThresholdCritical', 'LowerThresholdFatal',
- 'LowerThresholdNonCritical', 'MaxReadingRangeTemp',
- 'MinReadingRangeTemp', 'ReadingCelsius', 'RelatedItem',
- 'SensorNumber', 'Status']
-
- # Go through list
- for chassis_uri in self.chassis_uris:
- response = self.get_request(self.root_uri + chassis_uri)
- if response['ret'] is False:
- return response
- result['ret'] = True
- data = response['data']
- if key in data:
- thermal_uri = data[key]["@odata.id"]
- response = self.get_request(self.root_uri + thermal_uri)
- if response['ret'] is False:
- return response
- result['ret'] = True
- data = response['data']
- if "Temperatures" in data:
- for sensor in data[u'Temperatures']:
- sensor_result = {}
- for property in properties:
- if property in sensor:
- if sensor[property] is not None:
- sensor_result[property] = sensor[property]
- sensors.append(sensor_result)
-
- if sensors is None:
- return {'ret': False, 'msg': 'Key Temperatures was not found.'}
-
- result['entries'] = sensors
- return result
-
- def get_cpu_inventory(self, systems_uri):
- result = {}
- cpu_list = []
- cpu_results = []
- key = "Processors"
- # Get these entries, but does not fail if not found
- properties = ['Id', 'Name', 'Manufacturer', 'Model', 'MaxSpeedMHz',
- 'TotalCores', 'TotalThreads', 'Status']
-
- # Search for 'key' entry and extract URI from it
- response = self.get_request(self.root_uri + systems_uri)
- if response['ret'] is False:
- return response
- result['ret'] = True
- data = response['data']
-
- if key not in data:
- return {'ret': False, 'msg': "Key %s not found" % key}
-
- processors_uri = data[key]["@odata.id"]
-
- # Get a list of all CPUs and build respective URIs
- response = self.get_request(self.root_uri + processors_uri)
- if response['ret'] is False:
- return response
- result['ret'] = True
- data = response['data']
-
- for cpu in data[u'Members']:
- cpu_list.append(cpu[u'@odata.id'])
-
- for c in cpu_list:
- cpu = {}
- uri = self.root_uri + c
- response = self.get_request(uri)
- if response['ret'] is False:
- return response
- data = response['data']
-
- for property in properties:
- if property in data:
- cpu[property] = data[property]
-
- cpu_results.append(cpu)
- result["entries"] = cpu_results
- return result
-
- def get_multi_cpu_inventory(self):
- return self.aggregate_systems(self.get_cpu_inventory)
-
- def get_memory_inventory(self, systems_uri):
- result = {}
- memory_list = []
- memory_results = []
- key = "Memory"
- # Get these entries, but does not fail if not found
- properties = ['Id', 'SerialNumber', 'MemoryDeviceType', 'PartNumber',
- 'MemoryLocation', 'RankCount', 'CapacityMiB', 'OperatingMemoryModes', 'Status', 'Manufacturer', 'Name']
-
- # Search for 'key' entry and extract URI from it
- response = self.get_request(self.root_uri + systems_uri)
- if response['ret'] is False:
- return response
- result['ret'] = True
- data = response['data']
-
- if key not in data:
- return {'ret': False, 'msg': "Key %s not found" % key}
-
- memory_uri = data[key]["@odata.id"]
-
- # Get a list of all DIMMs and build respective URIs
- response = self.get_request(self.root_uri + memory_uri)
- if response['ret'] is False:
- return response
- result['ret'] = True
- data = response['data']
-
- for dimm in data[u'Members']:
- memory_list.append(dimm[u'@odata.id'])
-
- for m in memory_list:
- dimm = {}
- uri = self.root_uri + m
- response = self.get_request(uri)
- if response['ret'] is False:
- return response
- data = response['data']
-
- if "Status" in data:
- if "State" in data["Status"]:
- if data["Status"]["State"] == "Absent":
- continue
- else:
- continue
-
- for property in properties:
- if property in data:
- dimm[property] = data[property]
-
- memory_results.append(dimm)
- result["entries"] = memory_results
- return result
-
- def get_multi_memory_inventory(self):
- return self.aggregate_systems(self.get_memory_inventory)
-
- def get_nic(self, resource_uri):
- result = {}
- properties = ['Name', 'Id', 'Description', 'FQDN', 'IPv4Addresses', 'IPv6Addresses',
- 'NameServers', 'MACAddress', 'PermanentMACAddress',
- 'SpeedMbps', 'MTUSize', 'AutoNeg', 'Status']
- response = self.get_request(self.root_uri + resource_uri)
- if response['ret'] is False:
- return response
- result['ret'] = True
- data = response['data']
- nic = {}
- for property in properties:
- if property in data:
- nic[property] = data[property]
- result['entries'] = nic
- return(result)
-
- def get_nic_inventory(self, resource_uri):
- result = {}
- nic_list = []
- nic_results = []
- key = "EthernetInterfaces"
-
- response = self.get_request(self.root_uri + resource_uri)
- if response['ret'] is False:
- return response
- result['ret'] = True
- data = response['data']
-
- if key not in data:
- return {'ret': False, 'msg': "Key %s not found" % key}
-
- ethernetinterfaces_uri = data[key]["@odata.id"]
-
- # Get a list of all network controllers and build respective URIs
- response = self.get_request(self.root_uri + ethernetinterfaces_uri)
- if response['ret'] is False:
- return response
- result['ret'] = True
- data = response['data']
-
- for nic in data[u'Members']:
- nic_list.append(nic[u'@odata.id'])
-
- for n in nic_list:
- nic = self.get_nic(n)
- if nic['ret']:
- nic_results.append(nic['entries'])
- result["entries"] = nic_results
- return result
-
- def get_multi_nic_inventory(self, resource_type):
- ret = True
- entries = []
-
- # Given resource_type, use the proper URI
- if resource_type == 'Systems':
- resource_uris = self.systems_uris
- elif resource_type == 'Manager':
- resource_uris = self.manager_uris
-
- for resource_uri in resource_uris:
- inventory = self.get_nic_inventory(resource_uri)
- ret = inventory.pop('ret') and ret
- if 'entries' in inventory:
- entries.append(({'resource_uri': resource_uri},
- inventory['entries']))
- return dict(ret=ret, entries=entries)
-
- def get_virtualmedia(self, resource_uri):
- result = {}
- virtualmedia_list = []
- virtualmedia_results = []
- key = "VirtualMedia"
- # Get these entries, but does not fail if not found
- properties = ['Description', 'ConnectedVia', 'Id', 'MediaTypes',
- 'Image', 'ImageName', 'Name', 'WriteProtected',
- 'TransferMethod', 'TransferProtocolType']
-
- response = self.get_request(self.root_uri + resource_uri)
- if response['ret'] is False:
- return response
- result['ret'] = True
- data = response['data']
-
- if key not in data:
- return {'ret': False, 'msg': "Key %s not found" % key}
-
- virtualmedia_uri = data[key]["@odata.id"]
-
- # Get a list of all virtual media and build respective URIs
- response = self.get_request(self.root_uri + virtualmedia_uri)
- if response['ret'] is False:
- return response
- result['ret'] = True
- data = response['data']
-
- for virtualmedia in data[u'Members']:
- virtualmedia_list.append(virtualmedia[u'@odata.id'])
-
- for n in virtualmedia_list:
- virtualmedia = {}
- uri = self.root_uri + n
- response = self.get_request(uri)
- if response['ret'] is False:
- return response
- data = response['data']
-
- for property in properties:
- if property in data:
- virtualmedia[property] = data[property]
-
- virtualmedia_results.append(virtualmedia)
- result["entries"] = virtualmedia_results
- return result
-
- def get_multi_virtualmedia(self):
- ret = True
- entries = []
-
- resource_uris = self.manager_uris
-
- for resource_uri in resource_uris:
- virtualmedia = self.get_virtualmedia(resource_uri)
- ret = virtualmedia.pop('ret') and ret
- if 'entries' in virtualmedia:
- entries.append(({'resource_uri': resource_uri},
- virtualmedia['entries']))
- return dict(ret=ret, entries=entries)
-
- @staticmethod
- def _find_empty_virt_media_slot(resources, media_types,
- media_match_strict=True):
- for uri, data in resources.items():
- # check MediaTypes
- if 'MediaTypes' in data and media_types:
- if not set(media_types).intersection(set(data['MediaTypes'])):
- continue
- else:
- if media_match_strict:
- continue
- # if ejected, 'Inserted' should be False and 'ImageName' cleared
- if (not data.get('Inserted', False) and
- not data.get('ImageName')):
- return uri, data
- return None, None
-
- @staticmethod
- def _virt_media_image_inserted(resources, image_url):
- for uri, data in resources.items():
- if data.get('Image'):
- if urlparse(image_url) == urlparse(data.get('Image')):
- if data.get('Inserted', False) and data.get('ImageName'):
- return True
- return False
-
- @staticmethod
- def _find_virt_media_to_eject(resources, image_url):
- matched_uri, matched_data = None, None
- for uri, data in resources.items():
- if data.get('Image'):
- if urlparse(image_url) == urlparse(data.get('Image')):
- matched_uri, matched_data = uri, data
- if data.get('Inserted', True) and data.get('ImageName', 'x'):
- return uri, data, True
- return matched_uri, matched_data, False
-
- def _read_virt_media_resources(self, uri_list):
- resources = {}
- headers = {}
- for uri in uri_list:
- response = self.get_request(self.root_uri + uri)
- if response['ret'] is False:
- continue
- resources[uri] = response['data']
- headers[uri] = response['headers']
- return resources, headers
-
- @staticmethod
- def _insert_virt_media_payload(options, param_map, data, ai):
- payload = {
- 'Image': options.get('image_url')
- }
- for param, option in param_map.items():
- if options.get(option) is not None and param in data:
- allowable = ai.get(param, {}).get('AllowableValues', [])
- if allowable and options.get(option) not in allowable:
- return {'ret': False,
- 'msg': "Value '%s' specified for option '%s' not "
- "in list of AllowableValues %s" % (
- options.get(option), option,
- allowable)}
- payload[param] = options.get(option)
- return payload
-
- def virtual_media_insert_via_patch(self, options, param_map, uri, data):
- # get AllowableValues
- ai = dict((k[:-24],
- {'AllowableValues': v}) for k, v in data.items()
- if k.endswith('@Redfish.AllowableValues'))
- # construct payload
- payload = self._insert_virt_media_payload(options, param_map, data, ai)
- if 'Inserted' not in payload:
- payload['Inserted'] = True
- # PATCH the resource
- response = self.patch_request(self.root_uri + uri, payload)
- if response['ret'] is False:
- return response
- return {'ret': True, 'changed': True, 'msg': "VirtualMedia inserted"}
-
- def virtual_media_insert(self, options):
- param_map = {
- 'Inserted': 'inserted',
- 'WriteProtected': 'write_protected',
- 'UserName': 'username',
- 'Password': 'password',
- 'TransferProtocolType': 'transfer_protocol_type',
- 'TransferMethod': 'transfer_method'
- }
- image_url = options.get('image_url')
- if not image_url:
- return {'ret': False,
- 'msg': "image_url option required for VirtualMediaInsert"}
- media_types = options.get('media_types')
-
- # locate and read the VirtualMedia resources
- response = self.get_request(self.root_uri + self.manager_uri)
- if response['ret'] is False:
- return response
- data = response['data']
- if 'VirtualMedia' not in data:
- return {'ret': False, 'msg': "VirtualMedia resource not found"}
- virt_media_uri = data["VirtualMedia"]["@odata.id"]
- response = self.get_request(self.root_uri + virt_media_uri)
- if response['ret'] is False:
- return response
- data = response['data']
- virt_media_list = []
- for member in data[u'Members']:
- virt_media_list.append(member[u'@odata.id'])
- resources, headers = self._read_virt_media_resources(virt_media_list)
-
- # see if image already inserted; if so, nothing to do
- if self._virt_media_image_inserted(resources, image_url):
- return {'ret': True, 'changed': False,
- 'msg': "VirtualMedia '%s' already inserted" % image_url}
-
- # find an empty slot to insert the media
- # try first with strict media_type matching
- uri, data = self._find_empty_virt_media_slot(
- resources, media_types, media_match_strict=True)
- if not uri:
- # if not found, try without strict media_type matching
- uri, data = self._find_empty_virt_media_slot(
- resources, media_types, media_match_strict=False)
- if not uri:
- return {'ret': False,
- 'msg': "Unable to find an available VirtualMedia resource "
- "%s" % ('supporting ' + str(media_types)
- if media_types else '')}
-
- # confirm InsertMedia action found
- if ('Actions' not in data or
- '#VirtualMedia.InsertMedia' not in data['Actions']):
- # try to insert via PATCH if no InsertMedia action found
- h = headers[uri]
- if 'allow' in h:
- methods = [m.strip() for m in h.get('allow').split(',')]
- if 'PATCH' not in methods:
- # if Allow header present and PATCH missing, return error
- return {'ret': False,
- 'msg': "%s action not found and PATCH not allowed"
- % '#VirtualMedia.InsertMedia'}
- return self.virtual_media_insert_via_patch(options, param_map,
- uri, data)
-
- # get the action property
- action = data['Actions']['#VirtualMedia.InsertMedia']
- if 'target' not in action:
- return {'ret': False,
- 'msg': "target URI missing from Action "
- "#VirtualMedia.InsertMedia"}
- action_uri = action['target']
- # get ActionInfo or AllowableValues
- ai = self._get_all_action_info_values(action)
- # construct payload
- payload = self._insert_virt_media_payload(options, param_map, data, ai)
- # POST to action
- response = self.post_request(self.root_uri + action_uri, payload)
- if response['ret'] is False:
- return response
- return {'ret': True, 'changed': True, 'msg': "VirtualMedia inserted"}
-
- def virtual_media_eject_via_patch(self, uri):
- # construct payload
- payload = {
- 'Inserted': False,
- 'Image': None
- }
- # PATCH resource
- response = self.patch_request(self.root_uri + uri, payload)
- if response['ret'] is False:
- return response
- return {'ret': True, 'changed': True,
- 'msg': "VirtualMedia ejected"}
-
- def virtual_media_eject(self, options):
- image_url = options.get('image_url')
- if not image_url:
- return {'ret': False,
- 'msg': "image_url option required for VirtualMediaEject"}
-
- # locate and read the VirtualMedia resources
- response = self.get_request(self.root_uri + self.manager_uri)
- if response['ret'] is False:
- return response
- data = response['data']
- if 'VirtualMedia' not in data:
- return {'ret': False, 'msg': "VirtualMedia resource not found"}
- virt_media_uri = data["VirtualMedia"]["@odata.id"]
- response = self.get_request(self.root_uri + virt_media_uri)
- if response['ret'] is False:
- return response
- data = response['data']
- virt_media_list = []
- for member in data[u'Members']:
- virt_media_list.append(member[u'@odata.id'])
- resources, headers = self._read_virt_media_resources(virt_media_list)
-
- # find the VirtualMedia resource to eject
- uri, data, eject = self._find_virt_media_to_eject(resources, image_url)
- if uri and eject:
- if ('Actions' not in data or
- '#VirtualMedia.EjectMedia' not in data['Actions']):
- # try to eject via PATCH if no EjectMedia action found
- h = headers[uri]
- if 'allow' in h:
- methods = [m.strip() for m in h.get('allow').split(',')]
- if 'PATCH' not in methods:
- # if Allow header present and PATCH missing, return error
- return {'ret': False,
- 'msg': "%s action not found and PATCH not allowed"
- % '#VirtualMedia.EjectMedia'}
- return self.virtual_media_eject_via_patch(uri)
- else:
- # POST to the EjectMedia Action
- action = data['Actions']['#VirtualMedia.EjectMedia']
- if 'target' not in action:
- return {'ret': False,
- 'msg': "target URI property missing from Action "
- "#VirtualMedia.EjectMedia"}
- action_uri = action['target']
- # empty payload for Eject action
- payload = {}
- # POST to action
- response = self.post_request(self.root_uri + action_uri,
- payload)
- if response['ret'] is False:
- return response
- return {'ret': True, 'changed': True,
- 'msg': "VirtualMedia ejected"}
- elif uri and not eject:
- # already ejected: return success but changed=False
- return {'ret': True, 'changed': False,
- 'msg': "VirtualMedia image '%s' already ejected" %
- image_url}
- else:
- # return failure (no resources matching image_url found)
- return {'ret': False, 'changed': False,
- 'msg': "No VirtualMedia resource found with image '%s' "
- "inserted" % image_url}
-
- def get_psu_inventory(self):
- result = {}
- psu_list = []
- psu_results = []
- key = "PowerSupplies"
- # Get these entries, but does not fail if not found
- properties = ['Name', 'Model', 'SerialNumber', 'PartNumber', 'Manufacturer',
- 'FirmwareVersion', 'PowerCapacityWatts', 'PowerSupplyType',
- 'Status']
-
- # Get a list of all Chassis and build URIs, then get all PowerSupplies
- # from each Power entry in the Chassis
- chassis_uri_list = self.chassis_uris
- for chassis_uri in chassis_uri_list:
- response = self.get_request(self.root_uri + chassis_uri)
- if response['ret'] is False:
- return response
-
- result['ret'] = True
- data = response['data']
-
- if 'Power' in data:
- power_uri = data[u'Power'][u'@odata.id']
- else:
- continue
-
- response = self.get_request(self.root_uri + power_uri)
- data = response['data']
-
- if key not in data:
- return {'ret': False, 'msg': "Key %s not found" % key}
-
- psu_list = data[key]
- for psu in psu_list:
- psu_not_present = False
- psu_data = {}
- for property in properties:
- if property in psu:
- if psu[property] is not None:
- if property == 'Status':
- if 'State' in psu[property]:
- if psu[property]['State'] == 'Absent':
- psu_not_present = True
- psu_data[property] = psu[property]
- if psu_not_present:
- continue
- psu_results.append(psu_data)
-
- result["entries"] = psu_results
- if not result["entries"]:
- return {'ret': False, 'msg': "No PowerSupply objects found"}
- return result
-
- def get_multi_psu_inventory(self):
- return self.aggregate_systems(self.get_psu_inventory)
-
- def get_system_inventory(self, systems_uri):
- result = {}
- inventory = {}
- # Get these entries, but does not fail if not found
- properties = ['Status', 'HostName', 'PowerState', 'Model', 'Manufacturer',
- 'PartNumber', 'SystemType', 'AssetTag', 'ServiceTag',
- 'SerialNumber', 'SKU', 'BiosVersion', 'MemorySummary',
- 'ProcessorSummary', 'TrustedModules', 'Name', 'Id']
-
- response = self.get_request(self.root_uri + systems_uri)
- if response['ret'] is False:
- return response
- result['ret'] = True
- data = response['data']
-
- for property in properties:
- if property in data:
- inventory[property] = data[property]
-
- result["entries"] = inventory
- return result
-
- def get_multi_system_inventory(self):
- return self.aggregate_systems(self.get_system_inventory)
-
- def get_network_protocols(self):
- result = {}
- service_result = {}
- # Find NetworkProtocol
- response = self.get_request(self.root_uri + self.manager_uri)
- if response['ret'] is False:
- return response
- data = response['data']
- if 'NetworkProtocol' not in data:
- return {'ret': False, 'msg': "NetworkProtocol resource not found"}
- networkprotocol_uri = data["NetworkProtocol"]["@odata.id"]
-
- response = self.get_request(self.root_uri + networkprotocol_uri)
- if response['ret'] is False:
- return response
- data = response['data']
- protocol_services = ['SNMP', 'VirtualMedia', 'Telnet', 'SSDP', 'IPMI', 'SSH',
- 'KVMIP', 'NTP', 'HTTP', 'HTTPS', 'DHCP', 'DHCPv6', 'RDP',
- 'RFB']
- for protocol_service in protocol_services:
- if protocol_service in data.keys():
- service_result[protocol_service] = data[protocol_service]
-
- result['ret'] = True
- result["entries"] = service_result
- return result
-
- def set_network_protocols(self, manager_services):
- # Check input data validity
- protocol_services = ['SNMP', 'VirtualMedia', 'Telnet', 'SSDP', 'IPMI', 'SSH',
- 'KVMIP', 'NTP', 'HTTP', 'HTTPS', 'DHCP', 'DHCPv6', 'RDP',
- 'RFB']
- protocol_state_onlist = ['true', 'True', True, 'on', 1]
- protocol_state_offlist = ['false', 'False', False, 'off', 0]
- payload = {}
- for service_name in manager_services.keys():
- if service_name not in protocol_services:
- return {'ret': False, 'msg': "Service name %s is invalid" % service_name}
- payload[service_name] = {}
- for service_property in manager_services[service_name].keys():
- value = manager_services[service_name][service_property]
- if service_property in ['ProtocolEnabled', 'protocolenabled']:
- if value in protocol_state_onlist:
- payload[service_name]['ProtocolEnabled'] = True
- elif value in protocol_state_offlist:
- payload[service_name]['ProtocolEnabled'] = False
- else:
- return {'ret': False, 'msg': "Value of property %s is invalid" % service_property}
- elif service_property in ['port', 'Port']:
- if isinstance(value, int):
- payload[service_name]['Port'] = value
- elif isinstance(value, str) and value.isdigit():
- payload[service_name]['Port'] = int(value)
- else:
- return {'ret': False, 'msg': "Value of property %s is invalid" % service_property}
- else:
- payload[service_name][service_property] = value
-
- # Find NetworkProtocol
- response = self.get_request(self.root_uri + self.manager_uri)
- if response['ret'] is False:
- return response
- data = response['data']
- if 'NetworkProtocol' not in data:
- return {'ret': False, 'msg': "NetworkProtocol resource not found"}
- networkprotocol_uri = data["NetworkProtocol"]["@odata.id"]
-
- # Check service property support or not
- response = self.get_request(self.root_uri + networkprotocol_uri)
- if response['ret'] is False:
- return response
- data = response['data']
- for service_name in payload.keys():
- if service_name not in data:
- return {'ret': False, 'msg': "%s service not supported" % service_name}
- for service_property in payload[service_name].keys():
- if service_property not in data[service_name]:
- return {'ret': False, 'msg': "%s property for %s service not supported" % (service_property, service_name)}
-
- # if the protocol is already set, nothing to do
- need_change = False
- for service_name in payload.keys():
- for service_property in payload[service_name].keys():
- value = payload[service_name][service_property]
- if value != data[service_name][service_property]:
- need_change = True
- break
-
- if not need_change:
- return {'ret': True, 'changed': False, 'msg': "Manager NetworkProtocol services already set"}
-
- response = self.patch_request(self.root_uri + networkprotocol_uri, payload)
- if response['ret'] is False:
- return response
- return {'ret': True, 'changed': True, 'msg': "Modified Manager NetworkProtocol services"}
-
- @staticmethod
- def to_singular(resource_name):
- if resource_name.endswith('ies'):
- resource_name = resource_name[:-3] + 'y'
- elif resource_name.endswith('s'):
- resource_name = resource_name[:-1]
- return resource_name
-
- def get_health_resource(self, subsystem, uri, health, expanded):
- status = 'Status'
-
- if expanded:
- d = expanded
- else:
- r = self.get_request(self.root_uri + uri)
- if r.get('ret'):
- d = r.get('data')
- else:
- return
-
- if 'Members' in d: # collections case
- for m in d.get('Members'):
- u = m.get('@odata.id')
- r = self.get_request(self.root_uri + u)
- if r.get('ret'):
- p = r.get('data')
- if p:
- e = {self.to_singular(subsystem.lower()) + '_uri': u,
- status: p.get(status,
- "Status not available")}
- health[subsystem].append(e)
- else: # non-collections case
- e = {self.to_singular(subsystem.lower()) + '_uri': uri,
- status: d.get(status,
- "Status not available")}
- health[subsystem].append(e)
-
- def get_health_subsystem(self, subsystem, data, health):
- if subsystem in data:
- sub = data.get(subsystem)
- if isinstance(sub, list):
- for r in sub:
- if '@odata.id' in r:
- uri = r.get('@odata.id')
- expanded = None
- if '#' in uri and len(r) > 1:
- expanded = r
- self.get_health_resource(subsystem, uri, health, expanded)
- elif isinstance(sub, dict):
- if '@odata.id' in sub:
- uri = sub.get('@odata.id')
- self.get_health_resource(subsystem, uri, health, None)
- elif 'Members' in data:
- for m in data.get('Members'):
- u = m.get('@odata.id')
- r = self.get_request(self.root_uri + u)
- if r.get('ret'):
- d = r.get('data')
- self.get_health_subsystem(subsystem, d, health)
-
- def get_health_report(self, category, uri, subsystems):
- result = {}
- health = {}
- status = 'Status'
-
- # Get health status of top level resource
- response = self.get_request(self.root_uri + uri)
- if response['ret'] is False:
- return response
- result['ret'] = True
- data = response['data']
- health[category] = {status: data.get(status, "Status not available")}
-
- # Get health status of subsystems
- for sub in subsystems:
- d = None
- if sub.startswith('Links.'): # ex: Links.PCIeDevices
- sub = sub[len('Links.'):]
- d = data.get('Links', {})
- elif '.' in sub: # ex: Thermal.Fans
- p, sub = sub.split('.')
- u = data.get(p, {}).get('@odata.id')
- if u:
- r = self.get_request(self.root_uri + u)
- if r['ret']:
- d = r['data']
- if not d:
- continue
- else: # ex: Memory
- d = data
- health[sub] = []
- self.get_health_subsystem(sub, d, health)
- if not health[sub]:
- del health[sub]
-
- result["entries"] = health
- return result
-
- def get_system_health_report(self, systems_uri):
- subsystems = ['Processors', 'Memory', 'SimpleStorage', 'Storage',
- 'EthernetInterfaces', 'NetworkInterfaces.NetworkPorts',
- 'NetworkInterfaces.NetworkDeviceFunctions']
- return self.get_health_report('System', systems_uri, subsystems)
-
- def get_multi_system_health_report(self):
- return self.aggregate_systems(self.get_system_health_report)
-
- def get_chassis_health_report(self, chassis_uri):
- subsystems = ['Power.PowerSupplies', 'Thermal.Fans',
- 'Links.PCIeDevices']
- return self.get_health_report('Chassis', chassis_uri, subsystems)
-
- def get_multi_chassis_health_report(self):
- return self.aggregate_chassis(self.get_chassis_health_report)
-
- def get_manager_health_report(self, manager_uri):
- subsystems = []
- return self.get_health_report('Manager', manager_uri, subsystems)
-
- def get_multi_manager_health_report(self):
- return self.aggregate_managers(self.get_manager_health_report)
-
- def set_manager_nic(self, nic_addr, nic_config):
- # Get the manager ethernet interface uri
- nic_info = self.get_manager_ethernet_uri(nic_addr)
-
- if nic_info.get('nic_addr') is None:
- return nic_info
- else:
- target_ethernet_uri = nic_info['nic_addr']
- target_ethernet_current_setting = nic_info['ethernet_setting']
-
- # Convert input to payload and check validity
- payload = {}
- for property in nic_config.keys():
- value = nic_config[property]
- if property not in target_ethernet_current_setting:
- return {'ret': False, 'msg': "Property %s in nic_config is invalid" % property}
- if isinstance(value, dict):
- if isinstance(target_ethernet_current_setting[property], dict):
- payload[property] = value
- elif isinstance(target_ethernet_current_setting[property], list):
- payload[property] = list()
- payload[property].append(value)
- else:
- return {'ret': False, 'msg': "Value of property %s in nic_config is invalid" % property}
- else:
- payload[property] = value
-
- # If no need change, nothing to do. If error detected, report it
- need_change = False
- for property in payload.keys():
- set_value = payload[property]
- cur_value = target_ethernet_current_setting[property]
- # type is simple(not dict/list)
- if not isinstance(set_value, dict) and not isinstance(set_value, list):
- if set_value != cur_value:
- need_change = True
- # type is dict
- if isinstance(set_value, dict):
- for subprop in payload[property].keys():
- if subprop not in target_ethernet_current_setting[property]:
- # Not configured already; need to apply the request
- need_change = True
- break
- sub_set_value = payload[property][subprop]
- sub_cur_value = target_ethernet_current_setting[property][subprop]
- if sub_set_value != sub_cur_value:
- need_change = True
- # type is list
- if isinstance(set_value, list):
- if len(set_value) != len(cur_value):
- # if arrays are not the same len, no need to check each element
- need_change = True
- continue
- for i in range(len(set_value)):
- for subprop in payload[property][i].keys():
- if subprop not in target_ethernet_current_setting[property][i]:
- # Not configured already; need to apply the request
- need_change = True
- break
- sub_set_value = payload[property][i][subprop]
- sub_cur_value = target_ethernet_current_setting[property][i][subprop]
- if sub_set_value != sub_cur_value:
- need_change = True
-
- if not need_change:
- return {'ret': True, 'changed': False, 'msg': "Manager NIC already set"}
-
- response = self.patch_request(self.root_uri + target_ethernet_uri, payload)
- if response['ret'] is False:
- return response
- return {'ret': True, 'changed': True, 'msg': "Modified Manager NIC"}
-
- # A helper function to get the EthernetInterface URI
- def get_manager_ethernet_uri(self, nic_addr='null'):
- # Get EthernetInterface collection
- response = self.get_request(self.root_uri + self.manager_uri)
- if not response['ret']:
- return response
- data = response['data']
- if 'EthernetInterfaces' not in data:
- return {'ret': False, 'msg': "EthernetInterfaces resource not found"}
- ethernetinterfaces_uri = data["EthernetInterfaces"]["@odata.id"]
- response = self.get_request(self.root_uri + ethernetinterfaces_uri)
- if not response['ret']:
- return response
- data = response['data']
- uris = [a.get('@odata.id') for a in data.get('Members', []) if
- a.get('@odata.id')]
-
- # Find target EthernetInterface
- target_ethernet_uri = None
- target_ethernet_current_setting = None
- if nic_addr == 'null':
- # Find root_uri matched EthernetInterface when nic_addr is not specified
- nic_addr = (self.root_uri).split('/')[-1]
- nic_addr = nic_addr.split(':')[0] # split port if existing
- for uri in uris:
- response = self.get_request(self.root_uri + uri)
- if not response['ret']:
- return response
- data = response['data']
- data_string = json.dumps(data)
- if nic_addr.lower() in data_string.lower():
- target_ethernet_uri = uri
- target_ethernet_current_setting = data
- break
-
- nic_info = {}
- nic_info['nic_addr'] = target_ethernet_uri
- nic_info['ethernet_setting'] = target_ethernet_current_setting
-
- if target_ethernet_uri is None:
- return {}
- else:
- return nic_info
-
- def set_hostinterface_attributes(self, hostinterface_config, hostinterface_id=None):
- response = self.get_request(self.root_uri + self.manager_uri)
- if response['ret'] is False:
- return response
- data = response['data']
- if 'HostInterfaces' not in data:
- return {'ret': False, 'msg': "HostInterfaces resource not found"}
-
- hostinterfaces_uri = data["HostInterfaces"]["@odata.id"]
- response = self.get_request(self.root_uri + hostinterfaces_uri)
- if response['ret'] is False:
- return response
- data = response['data']
- uris = [a.get('@odata.id') for a in data.get('Members', []) if a.get('@odata.id')]
- # Capture list of URIs that match a specified HostInterface resource ID
- if hostinterface_id:
- matching_hostinterface_uris = [uri for uri in uris if hostinterface_id in uri.split('/')[-1]]
-
- if hostinterface_id and matching_hostinterface_uris:
- hostinterface_uri = list.pop(matching_hostinterface_uris)
- elif hostinterface_id and not matching_hostinterface_uris:
- return {'ret': False, 'msg': "HostInterface ID %s not present." % hostinterface_id}
- elif len(uris) == 1:
- hostinterface_uri = list.pop(uris)
- else:
- return {'ret': False, 'msg': "HostInterface ID not defined and multiple interfaces detected."}
-
- response = self.get_request(self.root_uri + hostinterface_uri)
- if response['ret'] is False:
- return response
- current_hostinterface_config = response['data']
- payload = {}
- for property in hostinterface_config.keys():
- value = hostinterface_config[property]
- if property not in current_hostinterface_config:
- return {'ret': False, 'msg': "Property %s in hostinterface_config is invalid" % property}
- if isinstance(value, dict):
- if isinstance(current_hostinterface_config[property], dict):
- payload[property] = value
- elif isinstance(current_hostinterface_config[property], list):
- payload[property] = list()
- payload[property].append(value)
- else:
- return {'ret': False, 'msg': "Value of property %s in hostinterface_config is invalid" % property}
- else:
- payload[property] = value
-
- need_change = False
- for property in payload.keys():
- set_value = payload[property]
- cur_value = current_hostinterface_config[property]
- if not isinstance(set_value, dict) and not isinstance(set_value, list):
- if set_value != cur_value:
- need_change = True
- if isinstance(set_value, dict):
- for subprop in payload[property].keys():
- if subprop not in current_hostinterface_config[property]:
- need_change = True
- break
- sub_set_value = payload[property][subprop]
- sub_cur_value = current_hostinterface_config[property][subprop]
- if sub_set_value != sub_cur_value:
- need_change = True
- if isinstance(set_value, list):
- if len(set_value) != len(cur_value):
- need_change = True
- continue
- for i in range(len(set_value)):
- for subprop in payload[property][i].keys():
- if subprop not in current_hostinterface_config[property][i]:
- need_change = True
- break
- sub_set_value = payload[property][i][subprop]
- sub_cur_value = current_hostinterface_config[property][i][subprop]
- if sub_set_value != sub_cur_value:
- need_change = True
- if not need_change:
- return {'ret': True, 'changed': False, 'msg': "Host Interface already configured"}
-
- response = self.patch_request(self.root_uri + hostinterface_uri, payload)
- if response['ret'] is False:
- return response
- return {'ret': True, 'changed': True, 'msg': "Modified Host Interface"}
-
- def get_hostinterfaces(self):
- result = {}
- hostinterface_results = []
- properties = ['Id', 'Name', 'Description', 'HostInterfaceType', 'Status',
- 'InterfaceEnabled', 'ExternallyAccessible', 'AuthenticationModes',
- 'AuthNoneRoleId', 'CredentialBootstrapping']
- manager_uri_list = self.manager_uris
- for manager_uri in manager_uri_list:
- response = self.get_request(self.root_uri + manager_uri)
- if response['ret'] is False:
- return response
-
- result['ret'] = True
- data = response['data']
-
- if 'HostInterfaces' in data:
- hostinterfaces_uri = data[u'HostInterfaces'][u'@odata.id']
- else:
- continue
-
- response = self.get_request(self.root_uri + hostinterfaces_uri)
- data = response['data']
-
- if 'Members' in data:
- for hostinterface in data['Members']:
- hostinterface_uri = hostinterface['@odata.id']
- hostinterface_response = self.get_request(self.root_uri + hostinterface_uri)
- # dictionary for capturing individual HostInterface properties
- hostinterface_data_temp = {}
- if hostinterface_response['ret'] is False:
- return hostinterface_response
- hostinterface_data = hostinterface_response['data']
- for property in properties:
- if property in hostinterface_data:
- if hostinterface_data[property] is not None:
- hostinterface_data_temp[property] = hostinterface_data[property]
- # Check for the presence of a ManagerEthernetInterface
- # object, a link to a _single_ EthernetInterface that the
- # BMC uses to communicate with the host.
- if 'ManagerEthernetInterface' in hostinterface_data:
- interface_uri = hostinterface_data['ManagerEthernetInterface']['@odata.id']
- interface_response = self.get_nic(interface_uri)
- if interface_response['ret'] is False:
- return interface_response
- hostinterface_data_temp['ManagerEthernetInterface'] = interface_response['entries']
-
- # Check for the presence of a HostEthernetInterfaces
- # object, a link to a _collection_ of EthernetInterfaces
- # that the host uses to communicate with the BMC.
- if 'HostEthernetInterfaces' in hostinterface_data:
- interfaces_uri = hostinterface_data['HostEthernetInterfaces']['@odata.id']
- interfaces_response = self.get_request(self.root_uri + interfaces_uri)
- if interfaces_response['ret'] is False:
- return interfaces_response
- interfaces_data = interfaces_response['data']
- if 'Members' in interfaces_data:
- for interface in interfaces_data['Members']:
- interface_uri = interface['@odata.id']
- interface_response = self.get_nic(interface_uri)
- if interface_response['ret'] is False:
- return interface_response
- # Check if this is the first
- # HostEthernetInterfaces item and create empty
- # list if so.
- if 'HostEthernetInterfaces' not in hostinterface_data_temp:
- hostinterface_data_temp['HostEthernetInterfaces'] = []
-
- hostinterface_data_temp['HostEthernetInterfaces'].append(interface_response['entries'])
-
- hostinterface_results.append(hostinterface_data_temp)
- else:
- continue
- result["entries"] = hostinterface_results
- if not result["entries"]:
- return {'ret': False, 'msg': "No HostInterface objects found"}
- return result
diff --git a/ansible_collections/community/general/plugins/module_utils/redis.py b/ansible_collections/community/general/plugins/module_utils/redis.py
deleted file mode 100644
index de5c8c7f..00000000
--- a/ansible_collections/community/general/plugins/module_utils/redis.py
+++ /dev/null
@@ -1,97 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright: (c) 2021, Andreas Botzner
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-
-from ansible.module_utils.basic import missing_required_lib
-__metaclass__ = type
-
-import traceback
-
-REDIS_IMP_ERR = None
-try:
- from redis import Redis
- from redis import __version__ as redis_version
- HAS_REDIS_PACKAGE = True
-except ImportError:
- REDIS_IMP_ERR = traceback.format_exc()
- HAS_REDIS_PACKAGE = False
-
-try:
- import certifi
- HAS_CERTIFI_PACKAGE = True
-except ImportError:
- CERTIFI_IMPORT_ERROR = traceback.format_exc()
- HAS_CERTIFI_PACKAGE = False
-
-
-def fail_imports(module, needs_certifi=True):
- errors = []
- traceback = []
- if not HAS_REDIS_PACKAGE:
- errors.append(missing_required_lib('redis'))
- traceback.append(REDIS_IMP_ERR)
- if not HAS_CERTIFI_PACKAGE and needs_certifi:
- errors.append(missing_required_lib('certifi'))
- traceback.append(CERTIFI_IMPORT_ERROR)
- if errors:
- module.fail_json(errors=errors, traceback='\n'.join(traceback))
-
-
-def redis_auth_argument_spec(tls_default=True):
- return dict(
- login_host=dict(type='str',
- default='localhost',),
- login_user=dict(type='str'),
- login_password=dict(type='str',
- no_log=True
- ),
- login_port=dict(type='int', default=6379),
- tls=dict(type='bool',
- default=tls_default),
- validate_certs=dict(type='bool',
- default=True
- ),
- ca_certs=dict(type='str')
- )
-
-
-def redis_auth_params(module):
- login_host = module.params['login_host']
- login_user = module.params['login_user']
- login_password = module.params['login_password']
- login_port = module.params['login_port']
- tls = module.params['tls']
- validate_certs = 'required' if module.params['validate_certs'] else None
- ca_certs = module.params['ca_certs']
- if tls and ca_certs is None:
- ca_certs = str(certifi.where())
- if tuple(map(int, redis_version.split('.'))) < (3, 4, 0) and login_user is not None:
- module.fail_json(
- msg='The option `username` in only supported with redis >= 3.4.0.')
- params = {'host': login_host,
- 'port': login_port,
- 'password': login_password,
- 'ssl_ca_certs': ca_certs,
- 'ssl_cert_reqs': validate_certs,
- 'ssl': tls}
- if login_user is not None:
- params['username'] = login_user
- return params
-
-
-class RedisAnsible(object):
- '''Base class for Redis module'''
-
- def __init__(self, module):
- self.module = module
- self.connection = self._connect()
-
- def _connect(self):
- try:
- return Redis(**redis_auth_params(self.module))
- except Exception as e:
- self.module.fail_json(msg='{0}'.format(str(e)))
- return None
diff --git a/ansible_collections/community/general/plugins/module_utils/remote_management/lxca/common.py b/ansible_collections/community/general/plugins/module_utils/remote_management/lxca/common.py
deleted file mode 100644
index 07092b96..00000000
--- a/ansible_collections/community/general/plugins/module_utils/remote_management/lxca/common.py
+++ /dev/null
@@ -1,79 +0,0 @@
-# -*- coding: utf-8 -*-
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by
-# Ansible still belong to the author of the module, and may assign their
-# own license to the complete work.
-#
-# Copyright (C) 2017 Lenovo, Inc.
-# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
-#
-# Contains LXCA common class
-# Lenovo xClarity Administrator (LXCA)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-import traceback
-try:
- from pylxca import connect, disconnect
- HAS_PYLXCA = True
-except ImportError:
- HAS_PYLXCA = False
-
-
-PYLXCA_REQUIRED = "Lenovo xClarity Administrator Python Client (Python package 'pylxca') is required for this module."
-
-
-def has_pylxca(module):
- """
- Check pylxca is installed
- :param module:
- """
- if not HAS_PYLXCA:
- module.fail_json(msg=PYLXCA_REQUIRED)
-
-
-LXCA_COMMON_ARGS = dict(
- login_user=dict(required=True),
- login_password=dict(required=True, no_log=True),
- auth_url=dict(required=True),
-)
-
-
-class connection_object:
- def __init__(self, module):
- self.module = module
-
- def __enter__(self):
- return setup_conn(self.module)
-
- def __exit__(self, type, value, traceback):
- close_conn()
-
-
-def setup_conn(module):
- """
- this function create connection to LXCA
- :param module:
- :return: lxca connection
- """
- lxca_con = None
- try:
- lxca_con = connect(module.params['auth_url'],
- module.params['login_user'],
- module.params['login_password'],
- "True")
- except Exception as exception:
- error_msg = '; '.join(exception.args)
- module.fail_json(msg=error_msg, exception=traceback.format_exc())
- return lxca_con
-
-
-def close_conn():
- """
- this function close connection to LXCA
- :param module:
- :return: None
- """
- disconnect()
diff --git a/ansible_collections/community/general/plugins/module_utils/rundeck.py b/ansible_collections/community/general/plugins/module_utils/rundeck.py
deleted file mode 100644
index afbbb481..00000000
--- a/ansible_collections/community/general/plugins/module_utils/rundeck.py
+++ /dev/null
@@ -1,94 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2021, Phillipe Smith
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-import json
-
-from ansible.module_utils.urls import fetch_url, url_argument_spec
-from ansible.module_utils.common.text.converters import to_native
-
-
-def api_argument_spec():
- '''
- Creates an argument spec that can be used with any module
- that will be requesting content via Rundeck API
- '''
- api_argument_spec = url_argument_spec()
- api_argument_spec.update(dict(
- url=dict(required=True, type="str"),
- api_version=dict(type="int", default=39),
- api_token=dict(required=True, type="str", no_log=True)
- ))
-
- return api_argument_spec
-
-
-def api_request(module, endpoint, data=None, method="GET"):
- """Manages Rundeck API requests via HTTP(S)
-
- :arg module: The AnsibleModule (used to get url, api_version, api_token, etc).
- :arg endpoint: The API endpoint to be used.
- :kwarg data: The data to be sent (in case of POST/PUT).
- :kwarg method: "POST", "PUT", etc.
-
- :returns: A tuple of (**response**, **info**). Use ``response.read()`` to read the data.
- The **info** contains the 'status' and other meta data. When a HttpError (status >= 400)
- occurred then ``info['body']`` contains the error response data::
-
- Example::
-
- data={...}
- resp, info = fetch_url(module,
- "http://rundeck.example.org",
- data=module.jsonify(data),
- method="POST")
- status_code = info["status"]
- body = resp.read()
- if status_code >= 400 :
- body = info['body']
- """
-
- response, info = fetch_url(
- module=module,
- url="%s/api/%s/%s" % (
- module.params["url"],
- module.params["api_version"],
- endpoint
- ),
- data=json.dumps(data),
- method=method,
- headers={
- "Content-Type": "application/json",
- "Accept": "application/json",
- "X-Rundeck-Auth-Token": module.params["api_token"]
- }
- )
-
- if info["status"] == 403:
- module.fail_json(msg="Token authorization failed",
- execution_info=json.loads(info["body"]))
- if info["status"] == 409:
- module.fail_json(msg="Job executions limit reached",
- execution_info=json.loads(info["body"]))
- elif info["status"] >= 500:
- module.fail_json(msg="Rundeck API error",
- execution_info=json.loads(info["body"]))
-
- try:
- content = response.read()
- json_response = json.loads(content)
- return json_response, info
- except AttributeError as error:
- module.fail_json(msg="Rundeck API request error",
- exception=to_native(error),
- execution_info=info)
- except ValueError as error:
- module.fail_json(
- msg="No valid JSON response",
- exception=to_native(error),
- execution_info=content
- )
diff --git a/ansible_collections/community/general/plugins/module_utils/scaleway.py b/ansible_collections/community/general/plugins/module_utils/scaleway.py
deleted file mode 100644
index e6fb8109..00000000
--- a/ansible_collections/community/general/plugins/module_utils/scaleway.py
+++ /dev/null
@@ -1,240 +0,0 @@
-# -*- coding: utf-8 -*-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-import json
-import re
-import sys
-
-from ansible.module_utils.basic import env_fallback
-from ansible.module_utils.urls import fetch_url
-from ansible.module_utils.six.moves.urllib.parse import urlencode
-
-
-def scaleway_argument_spec():
- return dict(
- api_token=dict(required=True, fallback=(env_fallback, ['SCW_TOKEN', 'SCW_API_KEY', 'SCW_OAUTH_TOKEN', 'SCW_API_TOKEN']),
- no_log=True, aliases=['oauth_token']),
- api_url=dict(fallback=(env_fallback, ['SCW_API_URL']), default='https://api.scaleway.com', aliases=['base_url']),
- api_timeout=dict(type='int', default=30, aliases=['timeout']),
- query_parameters=dict(type='dict', default={}),
- validate_certs=dict(default=True, type='bool'),
- )
-
-
-def payload_from_object(scw_object):
- return dict(
- (k, v)
- for k, v in scw_object.items()
- if k != 'id' and v is not None
- )
-
-
-class ScalewayException(Exception):
-
- def __init__(self, message):
- self.message = message
-
-
-# Specify a complete Link header, for validation purposes
-R_LINK_HEADER = r'''<[^>]+>;\srel="(first|previous|next|last)"
- (,<[^>]+>;\srel="(first|previous|next|last)")*'''
-# Specify a single relation, for iteration and string extraction purposes
-R_RELATION = r'?(?P[^>]+)>; rel="(?Pfirst|previous|next|last)"'
-
-
-def parse_pagination_link(header):
- if not re.match(R_LINK_HEADER, header, re.VERBOSE):
- raise ScalewayException('Scaleway API answered with an invalid Link pagination header')
- else:
- relations = header.split(',')
- parsed_relations = {}
- rc_relation = re.compile(R_RELATION)
- for relation in relations:
- match = rc_relation.match(relation)
- if not match:
- raise ScalewayException('Scaleway API answered with an invalid relation in the Link pagination header')
- data = match.groupdict()
- parsed_relations[data['relation']] = data['target_IRI']
- return parsed_relations
-
-
-class Response(object):
-
- def __init__(self, resp, info):
- self.body = None
- if resp:
- self.body = resp.read()
- self.info = info
-
- @property
- def json(self):
- if not self.body:
- if "body" in self.info:
- return json.loads(self.info["body"])
- return None
- try:
- return json.loads(self.body)
- except ValueError:
- return None
-
- @property
- def status_code(self):
- return self.info["status"]
-
- @property
- def ok(self):
- return self.status_code in (200, 201, 202, 204)
-
-
-class Scaleway(object):
-
- def __init__(self, module):
- self.module = module
- self.headers = {
- 'X-Auth-Token': self.module.params.get('api_token'),
- 'User-Agent': self.get_user_agent_string(module),
- 'Content-Type': 'application/json',
- }
- self.name = None
-
- def get_resources(self):
- results = self.get('/%s' % self.name)
-
- if not results.ok:
- raise ScalewayException('Error fetching {0} ({1}) [{2}: {3}]'.format(
- self.name, '%s/%s' % (self.module.params.get('api_url'), self.name),
- results.status_code, results.json['message']
- ))
-
- return results.json.get(self.name)
-
- def _url_builder(self, path, params):
- d = self.module.params.get('query_parameters')
- if params is not None:
- d.update(params)
- query_string = urlencode(d, doseq=True)
-
- if path[0] == '/':
- path = path[1:]
- return '%s/%s?%s' % (self.module.params.get('api_url'), path, query_string)
-
- def send(self, method, path, data=None, headers=None, params=None):
- url = self._url_builder(path=path, params=params)
- self.warn(url)
-
- if headers is not None:
- self.headers.update(headers)
-
- if self.headers['Content-Type'] == "application/json":
- data = self.module.jsonify(data)
-
- resp, info = fetch_url(
- self.module, url, data=data, headers=self.headers, method=method,
- timeout=self.module.params.get('api_timeout')
- )
-
- # Exceptions in fetch_url may result in a status -1, the ensures a proper error to the user in all cases
- if info['status'] == -1:
- self.module.fail_json(msg=info['msg'])
-
- return Response(resp, info)
-
- @staticmethod
- def get_user_agent_string(module):
- return "ansible %s Python %s" % (module.ansible_version, sys.version.split(' ', 1)[0])
-
- def get(self, path, data=None, headers=None, params=None):
- return self.send(method='GET', path=path, data=data, headers=headers, params=params)
-
- def put(self, path, data=None, headers=None, params=None):
- return self.send(method='PUT', path=path, data=data, headers=headers, params=params)
-
- def post(self, path, data=None, headers=None, params=None):
- return self.send(method='POST', path=path, data=data, headers=headers, params=params)
-
- def delete(self, path, data=None, headers=None, params=None):
- return self.send(method='DELETE', path=path, data=data, headers=headers, params=params)
-
- def patch(self, path, data=None, headers=None, params=None):
- return self.send(method="PATCH", path=path, data=data, headers=headers, params=params)
-
- def update(self, path, data=None, headers=None, params=None):
- return self.send(method="UPDATE", path=path, data=data, headers=headers, params=params)
-
- def warn(self, x):
- self.module.warn(str(x))
-
-
-SCALEWAY_LOCATION = {
- 'par1': {
- 'name': 'Paris 1',
- 'country': 'FR',
- 'api_endpoint': 'https://api.scaleway.com/instance/v1/zones/fr-par-1',
- 'api_endpoint_vpc': 'https://api.scaleway.com/vpc/v1/zones/fr-par-1'
- },
-
- 'EMEA-FR-PAR1': {
- 'name': 'Paris 1',
- 'country': 'FR',
- 'api_endpoint': 'https://api.scaleway.com/instance/v1/zones/fr-par-1',
- 'api_endpoint_vpc': 'https://api.scaleway.com/vpc/v1/zones/fr-par-1'
- },
-
- 'par2': {
- 'name': 'Paris 2',
- 'country': 'FR',
- 'api_endpoint': 'https://api.scaleway.com/instance/v1/zones/fr-par-2',
- 'api_endpoint_vpc': 'https://api.scaleway.com/vpc/v1/zones/fr-par-2'
- },
-
- 'EMEA-FR-PAR2': {
- 'name': 'Paris 2',
- 'country': 'FR',
- 'api_endpoint': 'https://api.scaleway.com/instance/v1/zones/fr-par-2',
- 'api_endpoint_vpc': 'https://api.scaleway.com/vpc/v1/zones/fr-par-2'
- },
-
- 'ams1': {
- 'name': 'Amsterdam 1',
- 'country': 'NL',
- 'api_endpoint': 'https://api.scaleway.com/instance/v1/zones/nl-ams-1',
- 'api_endpoint_vpc': 'https://api.scaleway.com/vpc/v1/zones/nl-ams-10'
- },
-
- 'EMEA-NL-EVS': {
- 'name': 'Amsterdam 1',
- 'country': 'NL',
- 'api_endpoint': 'https://api.scaleway.com/instance/v1/zones/nl-ams-1',
- 'api_endpoint_vpc': 'https://api.scaleway.com/vpc/v1/zones/nl-ams-1'
- },
-
- 'waw1': {
- 'name': 'Warsaw 1',
- 'country': 'PL',
- 'api_endpoint': 'https://api.scaleway.com/instance/v1/zones/pl-waw-1',
- 'api_endpoint_vpc': 'https://api.scaleway.com/vpc/v1/zones/pl-waw-1'
- },
-
- 'EMEA-PL-WAW1': {
- 'name': 'Warsaw 1',
- 'country': 'PL',
- 'api_endpoint': 'https://api.scaleway.com/instance/v1/zones/pl-waw-1',
- 'api_endpoint_vpc': 'https://api.scaleway.com/vpc/v1/zones/pl-waw-1'
- },
-}
-
-SCALEWAY_ENDPOINT = "https://api.scaleway.com"
-
-SCALEWAY_REGIONS = [
- "fr-par",
- "nl-ams",
- "pl-waw",
-]
-
-SCALEWAY_ZONES = [
- "fr-par-1",
- "fr-par-2",
- "nl-ams-1",
- "pl-waw-1",
-]
diff --git a/ansible_collections/community/general/plugins/module_utils/source_control/bitbucket.py b/ansible_collections/community/general/plugins/module_utils/source_control/bitbucket.py
deleted file mode 100644
index 1d584391..00000000
--- a/ansible_collections/community/general/plugins/module_utils/source_control/bitbucket.py
+++ /dev/null
@@ -1,94 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-import json
-
-from ansible.module_utils.common.text.converters import to_text
-from ansible.module_utils.basic import env_fallback
-from ansible.module_utils.urls import fetch_url, basic_auth_header
-
-
-class BitbucketHelper:
- BITBUCKET_API_URL = 'https://api.bitbucket.org'
-
- def __init__(self, module):
- self.module = module
- self.access_token = None
-
- @staticmethod
- def bitbucket_argument_spec():
- return dict(
- client_id=dict(type='str', fallback=(env_fallback, ['BITBUCKET_CLIENT_ID'])),
- client_secret=dict(type='str', no_log=True, fallback=(env_fallback, ['BITBUCKET_CLIENT_SECRET'])),
- # TODO:
- # - Rename user to username once current usage of username is removed
- # - Alias user to username and deprecate it
- user=dict(type='str', fallback=(env_fallback, ['BITBUCKET_USERNAME'])),
- password=dict(type='str', no_log=True, fallback=(env_fallback, ['BITBUCKET_PASSWORD'])),
- )
-
- @staticmethod
- def bitbucket_required_one_of():
- return [['client_id', 'client_secret', 'user', 'password']]
-
- @staticmethod
- def bitbucket_required_together():
- return [['client_id', 'client_secret'], ['user', 'password']]
-
- def fetch_access_token(self):
- if self.module.params['client_id'] and self.module.params['client_secret']:
- headers = {
- 'Authorization': basic_auth_header(self.module.params['client_id'], self.module.params['client_secret']),
- }
-
- info, content = self.request(
- api_url='https://bitbucket.org/site/oauth2/access_token',
- method='POST',
- data='grant_type=client_credentials',
- headers=headers,
- )
-
- if info['status'] == 200:
- self.access_token = content['access_token']
- else:
- self.module.fail_json(msg='Failed to retrieve access token: {0}'.format(info))
-
- def request(self, api_url, method, data=None, headers=None):
- headers = headers or {}
-
- if self.access_token:
- headers.update({
- 'Authorization': 'Bearer {0}'.format(self.access_token),
- })
- elif self.module.params['user'] and self.module.params['password']:
- headers.update({
- 'Authorization': basic_auth_header(self.module.params['user'], self.module.params['password']),
- })
-
- if isinstance(data, dict):
- data = self.module.jsonify(data)
- headers.update({
- 'Content-type': 'application/json',
- })
-
- response, info = fetch_url(
- module=self.module,
- url=api_url,
- method=method,
- headers=headers,
- data=data,
- force=True,
- )
-
- content = {}
-
- if response is not None:
- body = to_text(response.read())
- if body:
- content = json.loads(body)
-
- return info, content
diff --git a/ansible_collections/community/general/plugins/module_utils/storage/emc/emc_vnx.py b/ansible_collections/community/general/plugins/module_utils/storage/emc/emc_vnx.py
deleted file mode 100644
index 59225126..00000000
--- a/ansible_collections/community/general/plugins/module_utils/storage/emc/emc_vnx.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# -*- coding: utf-8 -*-
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by Ansible
-# still belong to the author of the module, and may assign their own license
-# to the complete work.
-#
-# (c) 2018 Luca 'remix_tj' Lorenzetto
-#
-# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-emc_vnx_argument_spec = {
- 'sp_address': dict(type='str', required=True),
- 'sp_user': dict(type='str', required=False, default='sysadmin'),
- 'sp_password': dict(type='str', required=False, default='sysadmin',
- no_log=True),
-}
diff --git a/ansible_collections/community/general/plugins/module_utils/storage/hpe3par/hpe3par.py b/ansible_collections/community/general/plugins/module_utils/storage/hpe3par/hpe3par.py
deleted file mode 100644
index b7734444..00000000
--- a/ansible_collections/community/general/plugins/module_utils/storage/hpe3par/hpe3par.py
+++ /dev/null
@@ -1,95 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright: (c) 2018, Hewlett Packard Enterprise Development LP
-# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-from ansible.module_utils import basic
-
-
-def convert_to_binary_multiple(size_with_unit):
- if size_with_unit is None:
- return -1
- valid_units = ['MiB', 'GiB', 'TiB']
- valid_unit = False
- for unit in valid_units:
- if size_with_unit.strip().endswith(unit):
- valid_unit = True
- size = size_with_unit.split(unit)[0]
- if float(size) < 0:
- return -1
- if not valid_unit:
- raise ValueError("%s does not have a valid unit. The unit must be one of %s" % (size_with_unit, valid_units))
-
- size = size_with_unit.replace(" ", "").split('iB')[0]
- size_kib = basic.human_to_bytes(size)
- return int(size_kib / (1024 * 1024))
-
-
-storage_system_spec = {
- "storage_system_ip": {
- "required": True,
- "type": "str"
- },
- "storage_system_username": {
- "required": True,
- "type": "str",
- "no_log": True
- },
- "storage_system_password": {
- "required": True,
- "type": "str",
- "no_log": True
- },
- "secure": {
- "type": "bool",
- "default": False
- }
-}
-
-
-def cpg_argument_spec():
- spec = {
- "state": {
- "required": True,
- "choices": ['present', 'absent'],
- "type": 'str'
- },
- "cpg_name": {
- "required": True,
- "type": "str"
- },
- "domain": {
- "type": "str"
- },
- "growth_increment": {
- "type": "str",
- },
- "growth_limit": {
- "type": "str",
- },
- "growth_warning": {
- "type": "str",
- },
- "raid_type": {
- "required": False,
- "type": "str",
- "choices": ['R0', 'R1', 'R5', 'R6']
- },
- "set_size": {
- "required": False,
- "type": "int"
- },
- "high_availability": {
- "type": "str",
- "choices": ['PORT', 'CAGE', 'MAG']
- },
- "disk_type": {
- "type": "str",
- "choices": ['FC', 'NL', 'SSD']
- }
- }
- spec.update(storage_system_spec)
- return spec
diff --git a/ansible_collections/community/general/plugins/module_utils/version.py b/ansible_collections/community/general/plugins/module_utils/version.py
deleted file mode 100644
index a236a34d..00000000
--- a/ansible_collections/community/general/plugins/module_utils/version.py
+++ /dev/null
@@ -1,17 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2021, Felix Fontein
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-"""Provide version object to compare version numbers."""
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-# Once we drop support for Ansible 2.9, ansible-base 2.10, and ansible-core 2.11, we can
-# remove the _version.py file, and replace the following import by
-#
-# from ansible.module_utils.compat.version import LooseVersion
-
-from ._version import LooseVersion
diff --git a/ansible_collections/community/general/plugins/module_utils/vexata.py b/ansible_collections/community/general/plugins/module_utils/vexata.py
deleted file mode 100644
index 3d6fb7aa..00000000
--- a/ansible_collections/community/general/plugins/module_utils/vexata.py
+++ /dev/null
@@ -1,97 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright: (c) 2019, Sandeep Kasargod
-# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-HAS_VEXATAPI = True
-try:
- from vexatapi.vexata_api_proxy import VexataAPIProxy
-except ImportError:
- HAS_VEXATAPI = False
-
-from ansible.module_utils.common.text.converters import to_native
-from ansible.module_utils.basic import env_fallback
-
-VXOS_VERSION = None
-
-
-def get_version(iocs_json):
- if not iocs_json:
- raise Exception('Invalid IOC json')
- active = filter(lambda x: x['mgmtRole'], iocs_json)
- if not active:
- raise Exception('Unable to detect active IOC')
- active = active[0]
- ver = active['swVersion']
- if ver[0] != 'v':
- raise Exception('Illegal version string')
- ver = ver[1:ver.find('-')]
- ver = map(int, ver.split('.'))
- return tuple(ver)
-
-
-def get_array(module):
- """Return storage array object or fail"""
- global VXOS_VERSION
- array = module.params['array']
- user = module.params.get('user', None)
- password = module.params.get('password', None)
- validate = module.params.get('validate_certs')
-
- if not HAS_VEXATAPI:
- module.fail_json(msg='vexatapi library is required for this module. '
- 'To install, use `pip install vexatapi`')
-
- if user and password:
- system = VexataAPIProxy(array, user, password, verify_cert=validate)
- else:
- module.fail_json(msg='The user/password are required to be passed in to '
- 'the module as arguments or by setting the '
- 'VEXATA_USER and VEXATA_PASSWORD environment variables.')
- try:
- if system.test_connection():
- VXOS_VERSION = get_version(system.iocs())
- return system
- else:
- module.fail_json(msg='Test connection to array failed.')
- except Exception as e:
- module.fail_json(msg='Vexata API access failed: {0}'.format(to_native(e)))
-
-
-def argument_spec():
- """Return standard base dictionary used for the argument_spec argument in AnsibleModule"""
- return dict(
- array=dict(type='str',
- required=True),
- user=dict(type='str',
- fallback=(env_fallback, ['VEXATA_USER'])),
- password=dict(type='str',
- no_log=True,
- fallback=(env_fallback, ['VEXATA_PASSWORD'])),
- validate_certs=dict(type='bool',
- required=False,
- default=False),
- )
-
-
-def required_together():
- """Return the default list used for the required_together argument to AnsibleModule"""
- return [['user', 'password']]
-
-
-def size_to_MiB(size):
- """Convert a '[MGT]' string to MiB, return -1 on error."""
- quant = size[:-1]
- exponent = size[-1]
- if not quant.isdigit() or exponent not in 'MGT':
- return -1
- quant = int(quant)
- if exponent == 'G':
- quant <<= 10
- elif exponent == 'T':
- quant <<= 20
- return quant
diff --git a/ansible_collections/community/general/plugins/module_utils/xenserver.py b/ansible_collections/community/general/plugins/module_utils/xenserver.py
deleted file mode 100644
index 015b1021..00000000
--- a/ansible_collections/community/general/plugins/module_utils/xenserver.py
+++ /dev/null
@@ -1,861 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright: (c) 2018, Bojan Vitnik
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-import atexit
-import time
-import re
-import traceback
-
-XENAPI_IMP_ERR = None
-try:
- import XenAPI
- HAS_XENAPI = True
-except ImportError:
- HAS_XENAPI = False
- XENAPI_IMP_ERR = traceback.format_exc()
-
-from ansible.module_utils.basic import env_fallback, missing_required_lib
-from ansible.module_utils.ansible_release import __version__ as ANSIBLE_VERSION
-
-
-def xenserver_common_argument_spec():
- return dict(
- hostname=dict(type='str',
- aliases=['host', 'pool'],
- required=False,
- default='localhost',
- fallback=(env_fallback, ['XENSERVER_HOST']),
- ),
- username=dict(type='str',
- aliases=['user', 'admin'],
- required=False,
- default='root',
- fallback=(env_fallback, ['XENSERVER_USER'])),
- password=dict(type='str',
- aliases=['pass', 'pwd'],
- required=False,
- no_log=True,
- fallback=(env_fallback, ['XENSERVER_PASSWORD'])),
- validate_certs=dict(type='bool',
- required=False,
- default=True,
- fallback=(env_fallback, ['XENSERVER_VALIDATE_CERTS'])),
- )
-
-
-def xapi_to_module_vm_power_state(power_state):
- """Maps XAPI VM power states to module VM power states."""
- module_power_state_map = {
- "running": "poweredon",
- "halted": "poweredoff",
- "suspended": "suspended",
- "paused": "paused"
- }
-
- return module_power_state_map.get(power_state)
-
-
-def module_to_xapi_vm_power_state(power_state):
- """Maps module VM power states to XAPI VM power states."""
- vm_power_state_map = {
- "poweredon": "running",
- "poweredoff": "halted",
- "restarted": "running",
- "suspended": "suspended",
- "shutdownguest": "halted",
- "rebootguest": "running",
- }
-
- return vm_power_state_map.get(power_state)
-
-
-def is_valid_ip_addr(ip_addr):
- """Validates given string as IPv4 address for given string.
-
- Args:
- ip_addr (str): string to validate as IPv4 address.
-
- Returns:
- bool: True if string is valid IPv4 address, else False.
- """
- ip_addr_split = ip_addr.split('.')
-
- if len(ip_addr_split) != 4:
- return False
-
- for ip_addr_octet in ip_addr_split:
- if not ip_addr_octet.isdigit():
- return False
-
- ip_addr_octet_int = int(ip_addr_octet)
-
- if ip_addr_octet_int < 0 or ip_addr_octet_int > 255:
- return False
-
- return True
-
-
-def is_valid_ip_netmask(ip_netmask):
- """Validates given string as IPv4 netmask.
-
- Args:
- ip_netmask (str): string to validate as IPv4 netmask.
-
- Returns:
- bool: True if string is valid IPv4 netmask, else False.
- """
- ip_netmask_split = ip_netmask.split('.')
-
- if len(ip_netmask_split) != 4:
- return False
-
- valid_octet_values = ['0', '128', '192', '224', '240', '248', '252', '254', '255']
-
- for ip_netmask_octet in ip_netmask_split:
- if ip_netmask_octet not in valid_octet_values:
- return False
-
- if ip_netmask_split[0] != '255' and (ip_netmask_split[1] != '0' or ip_netmask_split[2] != '0' or ip_netmask_split[3] != '0'):
- return False
- elif ip_netmask_split[1] != '255' and (ip_netmask_split[2] != '0' or ip_netmask_split[3] != '0'):
- return False
- elif ip_netmask_split[2] != '255' and ip_netmask_split[3] != '0':
- return False
-
- return True
-
-
-def is_valid_ip_prefix(ip_prefix):
- """Validates given string as IPv4 prefix.
-
- Args:
- ip_prefix (str): string to validate as IPv4 prefix.
-
- Returns:
- bool: True if string is valid IPv4 prefix, else False.
- """
- if not ip_prefix.isdigit():
- return False
-
- ip_prefix_int = int(ip_prefix)
-
- if ip_prefix_int < 0 or ip_prefix_int > 32:
- return False
-
- return True
-
-
-def ip_prefix_to_netmask(ip_prefix, skip_check=False):
- """Converts IPv4 prefix to netmask.
-
- Args:
- ip_prefix (str): IPv4 prefix to convert.
- skip_check (bool): Skip validation of IPv4 prefix
- (default: False). Use if you are sure IPv4 prefix is valid.
-
- Returns:
- str: IPv4 netmask equivalent to given IPv4 prefix if
- IPv4 prefix is valid, else an empty string.
- """
- if skip_check:
- ip_prefix_valid = True
- else:
- ip_prefix_valid = is_valid_ip_prefix(ip_prefix)
-
- if ip_prefix_valid:
- return '.'.join([str((0xffffffff << (32 - int(ip_prefix)) >> i) & 0xff) for i in [24, 16, 8, 0]])
- else:
- return ""
-
-
-def ip_netmask_to_prefix(ip_netmask, skip_check=False):
- """Converts IPv4 netmask to prefix.
-
- Args:
- ip_netmask (str): IPv4 netmask to convert.
- skip_check (bool): Skip validation of IPv4 netmask
- (default: False). Use if you are sure IPv4 netmask is valid.
-
- Returns:
- str: IPv4 prefix equivalent to given IPv4 netmask if
- IPv4 netmask is valid, else an empty string.
- """
- if skip_check:
- ip_netmask_valid = True
- else:
- ip_netmask_valid = is_valid_ip_netmask(ip_netmask)
-
- if ip_netmask_valid:
- return str(sum([bin(int(i)).count("1") for i in ip_netmask.split(".")]))
- else:
- return ""
-
-
-def is_valid_ip6_addr(ip6_addr):
- """Validates given string as IPv6 address.
-
- Args:
- ip6_addr (str): string to validate as IPv6 address.
-
- Returns:
- bool: True if string is valid IPv6 address, else False.
- """
- ip6_addr = ip6_addr.lower()
- ip6_addr_split = ip6_addr.split(':')
-
- if ip6_addr_split[0] == "":
- ip6_addr_split.pop(0)
-
- if ip6_addr_split[-1] == "":
- ip6_addr_split.pop(-1)
-
- if len(ip6_addr_split) > 8:
- return False
-
- if ip6_addr_split.count("") > 1:
- return False
- elif ip6_addr_split.count("") == 1:
- ip6_addr_split.remove("")
- else:
- if len(ip6_addr_split) != 8:
- return False
-
- ip6_addr_hextet_regex = re.compile('^[0-9a-f]{1,4}$')
-
- for ip6_addr_hextet in ip6_addr_split:
- if not bool(ip6_addr_hextet_regex.match(ip6_addr_hextet)):
- return False
-
- return True
-
-
-def is_valid_ip6_prefix(ip6_prefix):
- """Validates given string as IPv6 prefix.
-
- Args:
- ip6_prefix (str): string to validate as IPv6 prefix.
-
- Returns:
- bool: True if string is valid IPv6 prefix, else False.
- """
- if not ip6_prefix.isdigit():
- return False
-
- ip6_prefix_int = int(ip6_prefix)
-
- if ip6_prefix_int < 0 or ip6_prefix_int > 128:
- return False
-
- return True
-
-
-def get_object_ref(module, name, uuid=None, obj_type="VM", fail=True, msg_prefix=""):
- """Finds and returns a reference to arbitrary XAPI object.
-
- An object is searched by using either name (name_label) or UUID
- with UUID taken precedence over name.
-
- Args:
- module: Reference to Ansible module object.
- name (str): Name (name_label) of an object to search for.
- uuid (str): UUID of an object to search for.
- obj_type (str): Any valid XAPI object type. See XAPI docs.
- fail (bool): Should function fail with error message if object
- is not found or exit silently (default: True). The function
- always fails if multiple objects with same name are found.
- msg_prefix (str): A string error messages should be prefixed
- with (default: "").
-
- Returns:
- XAPI reference to found object or None if object is not found
- and fail=False.
- """
- xapi_session = XAPI.connect(module)
-
- if obj_type in ["template", "snapshot"]:
- real_obj_type = "VM"
- elif obj_type == "home server":
- real_obj_type = "host"
- elif obj_type == "ISO image":
- real_obj_type = "VDI"
- else:
- real_obj_type = obj_type
-
- obj_ref = None
-
- # UUID has precedence over name.
- if uuid:
- try:
- # Find object by UUID. If no object is found using given UUID,
- # an exception will be generated.
- obj_ref = xapi_session.xenapi_request("%s.get_by_uuid" % real_obj_type, (uuid,))
- except XenAPI.Failure as f:
- if fail:
- module.fail_json(msg="%s%s with UUID '%s' not found!" % (msg_prefix, obj_type, uuid))
- elif name:
- try:
- # Find object by name (name_label).
- obj_ref_list = xapi_session.xenapi_request("%s.get_by_name_label" % real_obj_type, (name,))
- except XenAPI.Failure as f:
- module.fail_json(msg="XAPI ERROR: %s" % f.details)
-
- # If obj_ref_list is empty.
- if not obj_ref_list:
- if fail:
- module.fail_json(msg="%s%s with name '%s' not found!" % (msg_prefix, obj_type, name))
- # If obj_ref_list contains multiple object references.
- elif len(obj_ref_list) > 1:
- module.fail_json(msg="%smultiple %ss with name '%s' found! Please use UUID." % (msg_prefix, obj_type, name))
- # The obj_ref_list contains only one object reference.
- else:
- obj_ref = obj_ref_list[0]
- else:
- module.fail_json(msg="%sno valid name or UUID supplied for %s!" % (msg_prefix, obj_type))
-
- return obj_ref
-
-
-def gather_vm_params(module, vm_ref):
- """Gathers all VM parameters available in XAPI database.
-
- Args:
- module: Reference to Ansible module object.
- vm_ref (str): XAPI reference to VM.
-
- Returns:
- dict: VM parameters.
- """
- # We silently return empty vm_params if bad vm_ref was supplied.
- if not vm_ref or vm_ref == "OpaqueRef:NULL":
- return {}
-
- xapi_session = XAPI.connect(module)
-
- try:
- vm_params = xapi_session.xenapi.VM.get_record(vm_ref)
-
- # We need some params like affinity, VBDs, VIFs, VDIs etc. dereferenced.
-
- # Affinity.
- if vm_params['affinity'] != "OpaqueRef:NULL":
- vm_affinity = xapi_session.xenapi.host.get_record(vm_params['affinity'])
- vm_params['affinity'] = vm_affinity
- else:
- vm_params['affinity'] = {}
-
- # VBDs.
- vm_vbd_params_list = [xapi_session.xenapi.VBD.get_record(vm_vbd_ref) for vm_vbd_ref in vm_params['VBDs']]
-
- # List of VBDs is usually sorted by userdevice but we sort just
- # in case. We need this list sorted by userdevice so that we can
- # make positional pairing with module.params['disks'].
- vm_vbd_params_list = sorted(vm_vbd_params_list, key=lambda vm_vbd_params: int(vm_vbd_params['userdevice']))
- vm_params['VBDs'] = vm_vbd_params_list
-
- # VDIs.
- for vm_vbd_params in vm_params['VBDs']:
- if vm_vbd_params['VDI'] != "OpaqueRef:NULL":
- vm_vdi_params = xapi_session.xenapi.VDI.get_record(vm_vbd_params['VDI'])
- else:
- vm_vdi_params = {}
-
- vm_vbd_params['VDI'] = vm_vdi_params
-
- # VIFs.
- vm_vif_params_list = [xapi_session.xenapi.VIF.get_record(vm_vif_ref) for vm_vif_ref in vm_params['VIFs']]
-
- # List of VIFs is usually sorted by device but we sort just
- # in case. We need this list sorted by device so that we can
- # make positional pairing with module.params['networks'].
- vm_vif_params_list = sorted(vm_vif_params_list, key=lambda vm_vif_params: int(vm_vif_params['device']))
- vm_params['VIFs'] = vm_vif_params_list
-
- # Networks.
- for vm_vif_params in vm_params['VIFs']:
- if vm_vif_params['network'] != "OpaqueRef:NULL":
- vm_network_params = xapi_session.xenapi.network.get_record(vm_vif_params['network'])
- else:
- vm_network_params = {}
-
- vm_vif_params['network'] = vm_network_params
-
- # Guest metrics.
- if vm_params['guest_metrics'] != "OpaqueRef:NULL":
- vm_guest_metrics = xapi_session.xenapi.VM_guest_metrics.get_record(vm_params['guest_metrics'])
- vm_params['guest_metrics'] = vm_guest_metrics
- else:
- vm_params['guest_metrics'] = {}
-
- # Detect customization agent.
- xenserver_version = get_xenserver_version(module)
-
- if (xenserver_version[0] >= 7 and xenserver_version[1] >= 0 and vm_params.get('guest_metrics') and
- "feature-static-ip-setting" in vm_params['guest_metrics']['other']):
- vm_params['customization_agent'] = "native"
- else:
- vm_params['customization_agent'] = "custom"
-
- except XenAPI.Failure as f:
- module.fail_json(msg="XAPI ERROR: %s" % f.details)
-
- return vm_params
-
-
-def gather_vm_facts(module, vm_params):
- """Gathers VM facts.
-
- Args:
- module: Reference to Ansible module object.
- vm_params (dict): A dictionary with VM parameters as returned
- by gather_vm_params() function.
-
- Returns:
- dict: VM facts.
- """
- # We silently return empty vm_facts if no vm_params are available.
- if not vm_params:
- return {}
-
- xapi_session = XAPI.connect(module)
-
- # Gather facts.
- vm_facts = {
- "state": xapi_to_module_vm_power_state(vm_params['power_state'].lower()),
- "name": vm_params['name_label'],
- "name_desc": vm_params['name_description'],
- "uuid": vm_params['uuid'],
- "is_template": vm_params['is_a_template'],
- "folder": vm_params['other_config'].get('folder', ''),
- "hardware": {
- "num_cpus": int(vm_params['VCPUs_max']),
- "num_cpu_cores_per_socket": int(vm_params['platform'].get('cores-per-socket', '1')),
- "memory_mb": int(int(vm_params['memory_dynamic_max']) / 1048576),
- },
- "disks": [],
- "cdrom": {},
- "networks": [],
- "home_server": vm_params['affinity'].get('name_label', ''),
- "domid": vm_params['domid'],
- "platform": vm_params['platform'],
- "other_config": vm_params['other_config'],
- "xenstore_data": vm_params['xenstore_data'],
- "customization_agent": vm_params['customization_agent'],
- }
-
- for vm_vbd_params in vm_params['VBDs']:
- if vm_vbd_params['type'] == "Disk":
- vm_disk_sr_params = xapi_session.xenapi.SR.get_record(vm_vbd_params['VDI']['SR'])
-
- vm_disk_params = {
- "size": int(vm_vbd_params['VDI']['virtual_size']),
- "name": vm_vbd_params['VDI']['name_label'],
- "name_desc": vm_vbd_params['VDI']['name_description'],
- "sr": vm_disk_sr_params['name_label'],
- "sr_uuid": vm_disk_sr_params['uuid'],
- "os_device": vm_vbd_params['device'],
- "vbd_userdevice": vm_vbd_params['userdevice'],
- }
-
- vm_facts['disks'].append(vm_disk_params)
- elif vm_vbd_params['type'] == "CD":
- if vm_vbd_params['empty']:
- vm_facts['cdrom'].update(type="none")
- else:
- vm_facts['cdrom'].update(type="iso")
- vm_facts['cdrom'].update(iso_name=vm_vbd_params['VDI']['name_label'])
-
- for vm_vif_params in vm_params['VIFs']:
- vm_guest_metrics_networks = vm_params['guest_metrics'].get('networks', {})
-
- vm_network_params = {
- "name": vm_vif_params['network']['name_label'],
- "mac": vm_vif_params['MAC'],
- "vif_device": vm_vif_params['device'],
- "mtu": vm_vif_params['MTU'],
- "ip": vm_guest_metrics_networks.get("%s/ip" % vm_vif_params['device'], ''),
- "prefix": "",
- "netmask": "",
- "gateway": "",
- "ip6": [vm_guest_metrics_networks[ipv6] for ipv6 in sorted(vm_guest_metrics_networks.keys()) if ipv6.startswith("%s/ipv6/" %
- vm_vif_params['device'])],
- "prefix6": "",
- "gateway6": "",
- }
-
- if vm_params['customization_agent'] == "native":
- if vm_vif_params['ipv4_addresses'] and vm_vif_params['ipv4_addresses'][0]:
- vm_network_params['prefix'] = vm_vif_params['ipv4_addresses'][0].split('/')[1]
- vm_network_params['netmask'] = ip_prefix_to_netmask(vm_network_params['prefix'])
-
- vm_network_params['gateway'] = vm_vif_params['ipv4_gateway']
-
- if vm_vif_params['ipv6_addresses'] and vm_vif_params['ipv6_addresses'][0]:
- vm_network_params['prefix6'] = vm_vif_params['ipv6_addresses'][0].split('/')[1]
-
- vm_network_params['gateway6'] = vm_vif_params['ipv6_gateway']
-
- elif vm_params['customization_agent'] == "custom":
- vm_xenstore_data = vm_params['xenstore_data']
-
- for f in ['prefix', 'netmask', 'gateway', 'prefix6', 'gateway6']:
- vm_network_params[f] = vm_xenstore_data.get("vm-data/networks/%s/%s" % (vm_vif_params['device'], f), "")
-
- vm_facts['networks'].append(vm_network_params)
-
- return vm_facts
-
-
-def set_vm_power_state(module, vm_ref, power_state, timeout=300):
- """Controls VM power state.
-
- Args:
- module: Reference to Ansible module object.
- vm_ref (str): XAPI reference to VM.
- power_state (str): Power state to put VM into. Accepted values:
-
- - poweredon
- - poweredoff
- - restarted
- - suspended
- - shutdownguest
- - rebootguest
-
- timeout (int): timeout in seconds (default: 300).
-
- Returns:
- tuple (bool, str): Bool element is True if VM power state has
- changed by calling this function, else False. Str element carries
- a value of resulting power state as defined by XAPI - 'running',
- 'halted' or 'suspended'.
- """
- # Fail if we don't have a valid VM reference.
- if not vm_ref or vm_ref == "OpaqueRef:NULL":
- module.fail_json(msg="Cannot set VM power state. Invalid VM reference supplied!")
-
- xapi_session = XAPI.connect(module)
-
- power_state = power_state.replace('_', '').replace('-', '').lower()
- vm_power_state_resulting = module_to_xapi_vm_power_state(power_state)
-
- state_changed = False
-
- try:
- # Get current state of the VM.
- vm_power_state_current = xapi_to_module_vm_power_state(xapi_session.xenapi.VM.get_power_state(vm_ref).lower())
-
- if vm_power_state_current != power_state:
- if power_state == "poweredon":
- if not module.check_mode:
- # VM can be in either halted, suspended, paused or running state.
- # For VM to be in running state, start has to be called on halted,
- # resume on suspended and unpause on paused VM.
- if vm_power_state_current == "poweredoff":
- xapi_session.xenapi.VM.start(vm_ref, False, False)
- elif vm_power_state_current == "suspended":
- xapi_session.xenapi.VM.resume(vm_ref, False, False)
- elif vm_power_state_current == "paused":
- xapi_session.xenapi.VM.unpause(vm_ref)
- elif power_state == "poweredoff":
- if not module.check_mode:
- # hard_shutdown will halt VM regardless of current state.
- xapi_session.xenapi.VM.hard_shutdown(vm_ref)
- elif power_state == "restarted":
- # hard_reboot will restart VM only if VM is in paused or running state.
- if vm_power_state_current in ["paused", "poweredon"]:
- if not module.check_mode:
- xapi_session.xenapi.VM.hard_reboot(vm_ref)
- else:
- module.fail_json(msg="Cannot restart VM in state '%s'!" % vm_power_state_current)
- elif power_state == "suspended":
- # running state is required for suspend.
- if vm_power_state_current == "poweredon":
- if not module.check_mode:
- xapi_session.xenapi.VM.suspend(vm_ref)
- else:
- module.fail_json(msg="Cannot suspend VM in state '%s'!" % vm_power_state_current)
- elif power_state == "shutdownguest":
- # running state is required for guest shutdown.
- if vm_power_state_current == "poweredon":
- if not module.check_mode:
- if timeout == 0:
- xapi_session.xenapi.VM.clean_shutdown(vm_ref)
- else:
- task_ref = xapi_session.xenapi.Async.VM.clean_shutdown(vm_ref)
- task_result = wait_for_task(module, task_ref, timeout)
-
- if task_result:
- module.fail_json(msg="Guest shutdown task failed: '%s'!" % task_result)
- else:
- module.fail_json(msg="Cannot shutdown guest when VM is in state '%s'!" % vm_power_state_current)
- elif power_state == "rebootguest":
- # running state is required for guest reboot.
- if vm_power_state_current == "poweredon":
- if not module.check_mode:
- if timeout == 0:
- xapi_session.xenapi.VM.clean_reboot(vm_ref)
- else:
- task_ref = xapi_session.xenapi.Async.VM.clean_reboot(vm_ref)
- task_result = wait_for_task(module, task_ref, timeout)
-
- if task_result:
- module.fail_json(msg="Guest reboot task failed: '%s'!" % task_result)
- else:
- module.fail_json(msg="Cannot reboot guest when VM is in state '%s'!" % vm_power_state_current)
- else:
- module.fail_json(msg="Requested VM power state '%s' is unsupported!" % power_state)
-
- state_changed = True
- except XenAPI.Failure as f:
- module.fail_json(msg="XAPI ERROR: %s" % f.details)
-
- return (state_changed, vm_power_state_resulting)
-
-
-def wait_for_task(module, task_ref, timeout=300):
- """Waits for async XAPI task to finish.
-
- Args:
- module: Reference to Ansible module object.
- task_ref (str): XAPI reference to task.
- timeout (int): timeout in seconds (default: 300).
-
- Returns:
- str: failure message on failure, else an empty string.
- """
- # Fail if we don't have a valid task reference.
- if not task_ref or task_ref == "OpaqueRef:NULL":
- module.fail_json(msg="Cannot wait for task. Invalid task reference supplied!")
-
- xapi_session = XAPI.connect(module)
-
- interval = 2
-
- result = ""
-
- # If we have to wait indefinitely, make time_left larger than 0 so we can
- # enter while loop.
- if timeout == 0:
- time_left = 1
- else:
- time_left = timeout
-
- try:
- while time_left > 0:
- task_status = xapi_session.xenapi.task.get_status(task_ref).lower()
-
- if task_status == "pending":
- # Task is still running.
- time.sleep(interval)
-
- # We decrease time_left only if we don't wait indefinitely.
- if timeout != 0:
- time_left -= interval
-
- continue
- elif task_status == "success":
- # Task is done.
- break
- else:
- # Task failed.
- result = task_status
- break
- else:
- # We timed out.
- result = "timeout"
-
- xapi_session.xenapi.task.destroy(task_ref)
- except XenAPI.Failure as f:
- module.fail_json(msg="XAPI ERROR: %s" % f.details)
-
- return result
-
-
-def wait_for_vm_ip_address(module, vm_ref, timeout=300):
- """Waits for VM to acquire an IP address.
-
- Args:
- module: Reference to Ansible module object.
- vm_ref (str): XAPI reference to VM.
- timeout (int): timeout in seconds (default: 300).
-
- Returns:
- dict: VM guest metrics as retrieved by
- VM_guest_metrics.get_record() XAPI method with info
- on IP address acquired.
- """
- # Fail if we don't have a valid VM reference.
- if not vm_ref or vm_ref == "OpaqueRef:NULL":
- module.fail_json(msg="Cannot wait for VM IP address. Invalid VM reference supplied!")
-
- xapi_session = XAPI.connect(module)
-
- vm_guest_metrics = {}
-
- try:
- # We translate VM power state string so that error message can be
- # consistent with module VM power states.
- vm_power_state = xapi_to_module_vm_power_state(xapi_session.xenapi.VM.get_power_state(vm_ref).lower())
-
- if vm_power_state != 'poweredon':
- module.fail_json(msg="Cannot wait for VM IP address when VM is in state '%s'!" % vm_power_state)
-
- interval = 2
-
- # If we have to wait indefinitely, make time_left larger than 0 so we can
- # enter while loop.
- if timeout == 0:
- time_left = 1
- else:
- time_left = timeout
-
- while time_left > 0:
- vm_guest_metrics_ref = xapi_session.xenapi.VM.get_guest_metrics(vm_ref)
-
- if vm_guest_metrics_ref != "OpaqueRef:NULL":
- vm_guest_metrics = xapi_session.xenapi.VM_guest_metrics.get_record(vm_guest_metrics_ref)
- vm_ips = vm_guest_metrics['networks']
-
- if "0/ip" in vm_ips:
- break
-
- time.sleep(interval)
-
- # We decrease time_left only if we don't wait indefinitely.
- if timeout != 0:
- time_left -= interval
- else:
- # We timed out.
- module.fail_json(msg="Timed out waiting for VM IP address!")
-
- except XenAPI.Failure as f:
- module.fail_json(msg="XAPI ERROR: %s" % f.details)
-
- return vm_guest_metrics
-
-
-def get_xenserver_version(module):
- """Returns XenServer version.
-
- Args:
- module: Reference to Ansible module object.
-
- Returns:
- list: Element [0] is major version. Element [1] is minor version.
- Element [2] is update number.
- """
- xapi_session = XAPI.connect(module)
-
- host_ref = xapi_session.xenapi.session.get_this_host(xapi_session._session)
-
- try:
- xenserver_version = [int(version_number) for version_number in xapi_session.xenapi.host.get_software_version(host_ref)['product_version'].split('.')]
- except ValueError:
- xenserver_version = [0, 0, 0]
-
- return xenserver_version
-
-
-class XAPI(object):
- """Class for XAPI session management."""
- _xapi_session = None
-
- @classmethod
- def connect(cls, module, disconnect_atexit=True):
- """Establishes XAPI connection and returns session reference.
-
- If no existing session is available, establishes a new one
- and returns it, else returns existing one.
-
- Args:
- module: Reference to Ansible module object.
- disconnect_atexit (bool): Controls if method should
- register atexit handler to disconnect from XenServer
- on module exit (default: True).
-
- Returns:
- XAPI session reference.
- """
- if cls._xapi_session is not None:
- return cls._xapi_session
-
- hostname = module.params['hostname']
- username = module.params['username']
- password = module.params['password']
- ignore_ssl = not module.params['validate_certs']
-
- if hostname == 'localhost':
- cls._xapi_session = XenAPI.xapi_local()
- username = ''
- password = ''
- else:
- # If scheme is not specified we default to http:// because https://
- # is problematic in most setups.
- if not hostname.startswith("http://") and not hostname.startswith("https://"):
- hostname = "http://%s" % hostname
-
- try:
- # ignore_ssl is supported in XenAPI library from XenServer 7.2
- # SDK onward but there is no way to tell which version we
- # are using. TypeError will be raised if ignore_ssl is not
- # supported. Additionally, ignore_ssl requires Python 2.7.9
- # or newer.
- cls._xapi_session = XenAPI.Session(hostname, ignore_ssl=ignore_ssl)
- except TypeError:
- # Try without ignore_ssl.
- cls._xapi_session = XenAPI.Session(hostname)
-
- if not password:
- password = ''
-
- try:
- cls._xapi_session.login_with_password(username, password, ANSIBLE_VERSION, 'Ansible')
- except XenAPI.Failure as f:
- module.fail_json(msg="Unable to log on to XenServer at %s as %s: %s" % (hostname, username, f.details))
-
- # Disabling atexit should be used in special cases only.
- if disconnect_atexit:
- atexit.register(cls._xapi_session.logout)
-
- return cls._xapi_session
-
-
-class XenServerObject(object):
- """Base class for all XenServer objects.
-
- This class contains active XAPI session reference and common
- attributes with useful info about XenServer host/pool.
-
- Attributes:
- module: Reference to Ansible module object.
- xapi_session: Reference to XAPI session.
- pool_ref (str): XAPI reference to a pool currently connected to.
- default_sr_ref (str): XAPI reference to a pool default
- Storage Repository.
- host_ref (str): XAPI rerefence to a host currently connected to.
- xenserver_version (list of str): Contains XenServer major and
- minor version.
- """
-
- def __init__(self, module):
- """Inits XenServerObject using common module parameters.
-
- Args:
- module: Reference to Ansible module object.
- """
- if not HAS_XENAPI:
- module.fail_json(changed=False, msg=missing_required_lib("XenAPI"), exception=XENAPI_IMP_ERR)
-
- self.module = module
- self.xapi_session = XAPI.connect(module)
-
- try:
- self.pool_ref = self.xapi_session.xenapi.pool.get_all()[0]
- self.default_sr_ref = self.xapi_session.xenapi.pool.get_default_SR(self.pool_ref)
- self.xenserver_version = get_xenserver_version(module)
- except XenAPI.Failure as f:
- self.module.fail_json(msg="XAPI ERROR: %s" % f.details)
diff --git a/ansible_collections/community/general/plugins/modules/aerospike_migrations.py b/ansible_collections/community/general/plugins/modules/aerospike_migrations.py
deleted file mode 120000
index 4d351842..00000000
--- a/ansible_collections/community/general/plugins/modules/aerospike_migrations.py
+++ /dev/null
@@ -1 +0,0 @@
-database/aerospike/aerospike_migrations.py
\ No newline at end of file
diff --git a/ansible_collections/community/general/plugins/modules/airbrake_deployment.py b/ansible_collections/community/general/plugins/modules/airbrake_deployment.py
deleted file mode 120000
index 1d2a42b7..00000000
--- a/ansible_collections/community/general/plugins/modules/airbrake_deployment.py
+++ /dev/null
@@ -1 +0,0 @@
-monitoring/airbrake_deployment.py
\ No newline at end of file
diff --git a/ansible_collections/community/general/plugins/modules/aix_devices.py b/ansible_collections/community/general/plugins/modules/aix_devices.py
deleted file mode 120000
index 091e7d39..00000000
--- a/ansible_collections/community/general/plugins/modules/aix_devices.py
+++ /dev/null
@@ -1 +0,0 @@
-system/aix_devices.py
\ No newline at end of file
diff --git a/ansible_collections/community/general/plugins/modules/aix_filesystem.py b/ansible_collections/community/general/plugins/modules/aix_filesystem.py
deleted file mode 120000
index 4e4076c8..00000000
--- a/ansible_collections/community/general/plugins/modules/aix_filesystem.py
+++ /dev/null
@@ -1 +0,0 @@
-system/aix_filesystem.py
\ No newline at end of file
diff --git a/ansible_collections/community/general/plugins/modules/aix_inittab.py b/ansible_collections/community/general/plugins/modules/aix_inittab.py
deleted file mode 120000
index 9ce4fa68..00000000
--- a/ansible_collections/community/general/plugins/modules/aix_inittab.py
+++ /dev/null
@@ -1 +0,0 @@
-system/aix_inittab.py
\ No newline at end of file
diff --git a/ansible_collections/community/general/plugins/modules/aix_lvg.py b/ansible_collections/community/general/plugins/modules/aix_lvg.py
deleted file mode 120000
index 92d71021..00000000
--- a/ansible_collections/community/general/plugins/modules/aix_lvg.py
+++ /dev/null
@@ -1 +0,0 @@
-system/aix_lvg.py
\ No newline at end of file
diff --git a/ansible_collections/community/general/plugins/modules/aix_lvol.py b/ansible_collections/community/general/plugins/modules/aix_lvol.py
deleted file mode 120000
index 5ebb4034..00000000
--- a/ansible_collections/community/general/plugins/modules/aix_lvol.py
+++ /dev/null
@@ -1 +0,0 @@
-system/aix_lvol.py
\ No newline at end of file
diff --git a/ansible_collections/community/general/plugins/modules/ali_instance.py b/ansible_collections/community/general/plugins/modules/ali_instance.py
deleted file mode 120000
index 829f0d03..00000000
--- a/ansible_collections/community/general/plugins/modules/ali_instance.py
+++ /dev/null
@@ -1 +0,0 @@
-cloud/alicloud/ali_instance.py
\ No newline at end of file
diff --git a/ansible_collections/community/general/plugins/modules/ali_instance_info.py b/ansible_collections/community/general/plugins/modules/ali_instance_info.py
deleted file mode 120000
index c0e57afd..00000000
--- a/ansible_collections/community/general/plugins/modules/ali_instance_info.py
+++ /dev/null
@@ -1 +0,0 @@
-cloud/alicloud/ali_instance_info.py
\ No newline at end of file
diff --git a/ansible_collections/community/general/plugins/modules/alternatives.py b/ansible_collections/community/general/plugins/modules/alternatives.py
deleted file mode 120000
index fdfc887b..00000000
--- a/ansible_collections/community/general/plugins/modules/alternatives.py
+++ /dev/null
@@ -1 +0,0 @@
-system/alternatives.py
\ No newline at end of file
diff --git a/ansible_collections/community/general/plugins/modules/ansible_galaxy_install.py b/ansible_collections/community/general/plugins/modules/ansible_galaxy_install.py
deleted file mode 120000
index 369d39db..00000000
--- a/ansible_collections/community/general/plugins/modules/ansible_galaxy_install.py
+++ /dev/null
@@ -1 +0,0 @@
-packaging/language/ansible_galaxy_install.py
\ No newline at end of file
diff --git a/ansible_collections/community/general/plugins/modules/apache2_mod_proxy.py b/ansible_collections/community/general/plugins/modules/apache2_mod_proxy.py
deleted file mode 120000
index 212a1197..00000000
--- a/ansible_collections/community/general/plugins/modules/apache2_mod_proxy.py
+++ /dev/null
@@ -1 +0,0 @@
-web_infrastructure/apache2_mod_proxy.py
\ No newline at end of file
diff --git a/ansible_collections/community/general/plugins/modules/apache2_module.py b/ansible_collections/community/general/plugins/modules/apache2_module.py
deleted file mode 120000
index a4d07a8c..00000000
--- a/ansible_collections/community/general/plugins/modules/apache2_module.py
+++ /dev/null
@@ -1 +0,0 @@
-web_infrastructure/apache2_module.py
\ No newline at end of file
diff --git a/ansible_collections/community/general/plugins/modules/apk.py b/ansible_collections/community/general/plugins/modules/apk.py
deleted file mode 120000
index 3496ad6f..00000000
--- a/ansible_collections/community/general/plugins/modules/apk.py
+++ /dev/null
@@ -1 +0,0 @@
-packaging/os/apk.py
\ No newline at end of file
diff --git a/ansible_collections/community/general/plugins/modules/apt_repo.py b/ansible_collections/community/general/plugins/modules/apt_repo.py
deleted file mode 120000
index df3dbae2..00000000
--- a/ansible_collections/community/general/plugins/modules/apt_repo.py
+++ /dev/null
@@ -1 +0,0 @@
-packaging/os/apt_repo.py
\ No newline at end of file
diff --git a/ansible_collections/community/general/plugins/modules/apt_rpm.py b/ansible_collections/community/general/plugins/modules/apt_rpm.py
deleted file mode 120000
index c8b0a2e6..00000000
--- a/ansible_collections/community/general/plugins/modules/apt_rpm.py
+++ /dev/null
@@ -1 +0,0 @@
-packaging/os/apt_rpm.py
\ No newline at end of file
diff --git a/ansible_collections/community/general/plugins/modules/archive.py b/ansible_collections/community/general/plugins/modules/archive.py
deleted file mode 120000
index 8e133de5..00000000
--- a/ansible_collections/community/general/plugins/modules/archive.py
+++ /dev/null
@@ -1 +0,0 @@
-files/archive.py
\ No newline at end of file
diff --git a/ansible_collections/community/general/plugins/modules/atomic_container.py b/ansible_collections/community/general/plugins/modules/atomic_container.py
deleted file mode 120000
index d6afefb3..00000000
--- a/ansible_collections/community/general/plugins/modules/atomic_container.py
+++ /dev/null
@@ -1 +0,0 @@
-cloud/atomic/atomic_container.py
\ No newline at end of file
diff --git a/ansible_collections/community/general/plugins/modules/atomic_host.py b/ansible_collections/community/general/plugins/modules/atomic_host.py
deleted file mode 120000
index 407f9b9a..00000000
--- a/ansible_collections/community/general/plugins/modules/atomic_host.py
+++ /dev/null
@@ -1 +0,0 @@
-cloud/atomic/atomic_host.py
\ No newline at end of file
diff --git a/ansible_collections/community/general/plugins/modules/atomic_image.py b/ansible_collections/community/general/plugins/modules/atomic_image.py
deleted file mode 120000
index ca8f119e..00000000
--- a/ansible_collections/community/general/plugins/modules/atomic_image.py
+++ /dev/null
@@ -1 +0,0 @@
-cloud/atomic/atomic_image.py
\ No newline at end of file
diff --git a/ansible_collections/community/general/plugins/modules/awall.py b/ansible_collections/community/general/plugins/modules/awall.py
deleted file mode 120000
index ca397959..00000000
--- a/ansible_collections/community/general/plugins/modules/awall.py
+++ /dev/null
@@ -1 +0,0 @@
-system/awall.py
\ No newline at end of file
diff --git a/ansible_collections/community/general/plugins/modules/beadm.py b/ansible_collections/community/general/plugins/modules/beadm.py
deleted file mode 120000
index 48aae8df..00000000
--- a/ansible_collections/community/general/plugins/modules/beadm.py
+++ /dev/null
@@ -1 +0,0 @@
-system/beadm.py
\ No newline at end of file
diff --git a/ansible_collections/community/general/plugins/modules/bearychat.py b/ansible_collections/community/general/plugins/modules/bearychat.py
deleted file mode 120000
index 66a23399..00000000
--- a/ansible_collections/community/general/plugins/modules/bearychat.py
+++ /dev/null
@@ -1 +0,0 @@
-notification/bearychat.py
\ No newline at end of file
diff --git a/ansible_collections/community/general/plugins/modules/bigpanda.py b/ansible_collections/community/general/plugins/modules/bigpanda.py
deleted file mode 120000
index e3adb6c5..00000000
--- a/ansible_collections/community/general/plugins/modules/bigpanda.py
+++ /dev/null
@@ -1 +0,0 @@
-monitoring/bigpanda.py
\ No newline at end of file
diff --git a/ansible_collections/community/general/plugins/modules/bitbucket_access_key.py b/ansible_collections/community/general/plugins/modules/bitbucket_access_key.py
deleted file mode 120000
index 6719686a..00000000
--- a/ansible_collections/community/general/plugins/modules/bitbucket_access_key.py
+++ /dev/null
@@ -1 +0,0 @@
-source_control/bitbucket/bitbucket_access_key.py
\ No newline at end of file
diff --git a/ansible_collections/community/general/plugins/modules/bitbucket_pipeline_key_pair.py b/ansible_collections/community/general/plugins/modules/bitbucket_pipeline_key_pair.py
deleted file mode 120000
index ab706ead..00000000
--- a/ansible_collections/community/general/plugins/modules/bitbucket_pipeline_key_pair.py
+++ /dev/null
@@ -1 +0,0 @@
-source_control/bitbucket/bitbucket_pipeline_key_pair.py
\ No newline at end of file
diff --git a/ansible_collections/community/general/plugins/modules/bitbucket_pipeline_known_host.py b/ansible_collections/community/general/plugins/modules/bitbucket_pipeline_known_host.py
deleted file mode 120000
index 0e2ff5e3..00000000
--- a/ansible_collections/community/general/plugins/modules/bitbucket_pipeline_known_host.py
+++ /dev/null
@@ -1 +0,0 @@
-source_control/bitbucket/bitbucket_pipeline_known_host.py
\ No newline at end of file
diff --git a/ansible_collections/community/general/plugins/modules/bitbucket_pipeline_variable.py b/ansible_collections/community/general/plugins/modules/bitbucket_pipeline_variable.py
deleted file mode 120000
index ab03fed9..00000000
--- a/ansible_collections/community/general/plugins/modules/bitbucket_pipeline_variable.py
+++ /dev/null
@@ -1 +0,0 @@
-source_control/bitbucket/bitbucket_pipeline_variable.py
\ No newline at end of file
diff --git a/ansible_collections/community/general/plugins/modules/bower.py b/ansible_collections/community/general/plugins/modules/bower.py
deleted file mode 120000
index e30c1646..00000000
--- a/ansible_collections/community/general/plugins/modules/bower.py
+++ /dev/null
@@ -1 +0,0 @@
-packaging/language/bower.py
\ No newline at end of file
diff --git a/ansible_collections/community/general/plugins/modules/bundler.py b/ansible_collections/community/general/plugins/modules/bundler.py
deleted file mode 120000
index 106df0c4..00000000
--- a/ansible_collections/community/general/plugins/modules/bundler.py
+++ /dev/null
@@ -1 +0,0 @@
-packaging/language/bundler.py
\ No newline at end of file
diff --git a/ansible_collections/community/general/plugins/modules/bzr.py b/ansible_collections/community/general/plugins/modules/bzr.py
deleted file mode 120000
index d04b1124..00000000
--- a/ansible_collections/community/general/plugins/modules/bzr.py
+++ /dev/null
@@ -1 +0,0 @@
-source_control/bzr.py
\ No newline at end of file
diff --git a/ansible_collections/community/general/plugins/modules/campfire.py b/ansible_collections/community/general/plugins/modules/campfire.py
deleted file mode 120000
index 5a29d232..00000000
--- a/ansible_collections/community/general/plugins/modules/campfire.py
+++ /dev/null
@@ -1 +0,0 @@
-notification/campfire.py
\ No newline at end of file
diff --git a/ansible_collections/community/general/plugins/modules/capabilities.py b/ansible_collections/community/general/plugins/modules/capabilities.py
deleted file mode 120000
index a4fdcb9c..00000000
--- a/ansible_collections/community/general/plugins/modules/capabilities.py
+++ /dev/null
@@ -1 +0,0 @@
-system/capabilities.py
\ No newline at end of file
diff --git a/ansible_collections/community/general/plugins/modules/cargo.py b/ansible_collections/community/general/plugins/modules/cargo.py
deleted file mode 120000
index 4cfbb506..00000000
--- a/ansible_collections/community/general/plugins/modules/cargo.py
+++ /dev/null
@@ -1 +0,0 @@
-packaging/language/cargo.py
\ No newline at end of file
diff --git a/ansible_collections/community/general/plugins/modules/catapult.py b/ansible_collections/community/general/plugins/modules/catapult.py
deleted file mode 120000
index 2ac0c142..00000000
--- a/ansible_collections/community/general/plugins/modules/catapult.py
+++ /dev/null
@@ -1 +0,0 @@
-notification/catapult.py
\ No newline at end of file
diff --git a/ansible_collections/community/general/plugins/modules/circonus_annotation.py b/ansible_collections/community/general/plugins/modules/circonus_annotation.py
deleted file mode 120000
index e2278645..00000000
--- a/ansible_collections/community/general/plugins/modules/circonus_annotation.py
+++ /dev/null
@@ -1 +0,0 @@
-monitoring/circonus_annotation.py
\ No newline at end of file
diff --git a/ansible_collections/community/general/plugins/modules/cisco_spark.py b/ansible_collections/community/general/plugins/modules/cisco_spark.py
deleted file mode 120000
index af172516..00000000
--- a/ansible_collections/community/general/plugins/modules/cisco_spark.py
+++ /dev/null
@@ -1 +0,0 @@
-notification/cisco_webex.py
\ No newline at end of file
diff --git a/ansible_collections/community/general/plugins/modules/cisco_webex.py b/ansible_collections/community/general/plugins/modules/cisco_webex.py
deleted file mode 120000
index af172516..00000000
--- a/ansible_collections/community/general/plugins/modules/cisco_webex.py
+++ /dev/null
@@ -1 +0,0 @@
-notification/cisco_webex.py
\ No newline at end of file
diff --git a/ansible_collections/community/general/plugins/modules/clc_aa_policy.py b/ansible_collections/community/general/plugins/modules/clc_aa_policy.py
deleted file mode 120000
index c9c633f3..00000000
--- a/ansible_collections/community/general/plugins/modules/clc_aa_policy.py
+++ /dev/null
@@ -1 +0,0 @@
-cloud/centurylink/clc_aa_policy.py
\ No newline at end of file
diff --git a/ansible_collections/community/general/plugins/modules/clc_alert_policy.py b/ansible_collections/community/general/plugins/modules/clc_alert_policy.py
deleted file mode 120000
index 50ef7db6..00000000
--- a/ansible_collections/community/general/plugins/modules/clc_alert_policy.py
+++ /dev/null
@@ -1 +0,0 @@
-cloud/centurylink/clc_alert_policy.py
\ No newline at end of file
diff --git a/ansible_collections/community/general/plugins/modules/clc_blueprint_package.py b/ansible_collections/community/general/plugins/modules/clc_blueprint_package.py
deleted file mode 120000
index 3982cea6..00000000
--- a/ansible_collections/community/general/plugins/modules/clc_blueprint_package.py
+++ /dev/null
@@ -1 +0,0 @@
-cloud/centurylink/clc_blueprint_package.py
\ No newline at end of file
diff --git a/ansible_collections/community/general/plugins/modules/clc_firewall_policy.py b/ansible_collections/community/general/plugins/modules/clc_firewall_policy.py
deleted file mode 120000
index 0b05ba17..00000000
--- a/ansible_collections/community/general/plugins/modules/clc_firewall_policy.py
+++ /dev/null
@@ -1 +0,0 @@
-cloud/centurylink/clc_firewall_policy.py
\ No newline at end of file
diff --git a/ansible_collections/community/general/plugins/modules/clc_group.py b/ansible_collections/community/general/plugins/modules/clc_group.py
deleted file mode 120000
index 5a1f6954..00000000
--- a/ansible_collections/community/general/plugins/modules/clc_group.py
+++ /dev/null
@@ -1 +0,0 @@
-cloud/centurylink/clc_group.py
\ No newline at end of file
diff --git a/ansible_collections/community/general/plugins/modules/clc_loadbalancer.py b/ansible_collections/community/general/plugins/modules/clc_loadbalancer.py
deleted file mode 120000
index e50d52f6..00000000
--- a/ansible_collections/community/general/plugins/modules/clc_loadbalancer.py
+++ /dev/null
@@ -1 +0,0 @@
-cloud/centurylink/clc_loadbalancer.py
\ No newline at end of file
diff --git a/ansible_collections/community/general/plugins/modules/clc_modify_server.py b/ansible_collections/community/general/plugins/modules/clc_modify_server.py
deleted file mode 120000
index 306530d0..00000000
--- a/ansible_collections/community/general/plugins/modules/clc_modify_server.py
+++ /dev/null
@@ -1 +0,0 @@
-cloud/centurylink/clc_modify_server.py
\ No newline at end of file
diff --git a/ansible_collections/community/general/plugins/modules/clc_publicip.py b/ansible_collections/community/general/plugins/modules/clc_publicip.py
deleted file mode 120000
index 682925e5..00000000
--- a/ansible_collections/community/general/plugins/modules/clc_publicip.py
+++ /dev/null
@@ -1 +0,0 @@
-cloud/centurylink/clc_publicip.py
\ No newline at end of file
diff --git a/ansible_collections/community/general/plugins/modules/clc_server.py b/ansible_collections/community/general/plugins/modules/clc_server.py
deleted file mode 120000
index d59e068a..00000000
--- a/ansible_collections/community/general/plugins/modules/clc_server.py
+++ /dev/null
@@ -1 +0,0 @@
-cloud/centurylink/clc_server.py
\ No newline at end of file
diff --git a/ansible_collections/community/general/plugins/modules/clc_server_snapshot.py b/ansible_collections/community/general/plugins/modules/clc_server_snapshot.py
deleted file mode 120000
index a411552e..00000000
--- a/ansible_collections/community/general/plugins/modules/clc_server_snapshot.py
+++ /dev/null
@@ -1 +0,0 @@
-cloud/centurylink/clc_server_snapshot.py
\ No newline at end of file
diff --git a/ansible_collections/community/general/plugins/modules/cloud/alicloud/ali_instance.py b/ansible_collections/community/general/plugins/modules/cloud/alicloud/ali_instance.py
deleted file mode 100644
index 09754ccd..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/alicloud/ali_instance.py
+++ /dev/null
@@ -1,1013 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# Copyright (c) 2017-present Alibaba Group Holding Limited. He Guimin
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see http://www.gnu.org/licenses/.
-
-from __future__ import (absolute_import, division, print_function)
-
-__metaclass__ = type
-
-DOCUMENTATION = '''
----
-module: ali_instance
-short_description: Create, Start, Stop, Restart or Terminate an Instance in ECS. Add or Remove Instance to/from a Security Group.
-description:
- - Create, start, stop, restart, modify or terminate ecs instances.
- - Add or remove ecs instances to/from security group.
-options:
- state:
- description:
- - The state of the instance after operating.
- default: 'present'
- choices: ['present', 'running', 'stopped', 'restarted', 'absent']
- type: str
- availability_zone:
- description:
- - Aliyun availability zone ID in which to launch the instance.
- If it is not specified, it will be allocated by system automatically.
- aliases: ['alicloud_zone', 'zone_id']
- type: str
- image_id:
- description:
- - Image ID used to launch instances. Required when C(state=present) and creating new ECS instances.
- aliases: ['image']
- type: str
- instance_type:
- description:
- - Instance type used to launch instances. Required when C(state=present) and creating new ECS instances.
- aliases: ['type']
- type: str
- security_groups:
- description:
- - A list of security group IDs.
- aliases: ['group_ids']
- type: list
- elements: str
- vswitch_id:
- description:
- - The subnet ID in which to launch the instances (VPC).
- aliases: ['subnet_id']
- type: str
- instance_name:
- description:
- - The name of ECS instance, which is a string of 2 to 128 Chinese or English characters. It must begin with an
- uppercase/lowercase letter or a Chinese character and can contain numerals, ".", "_" or "-".
- It cannot begin with http:// or https://.
- aliases: ['name']
- type: str
- description:
- description:
- - The description of ECS instance, which is a string of 2 to 256 characters. It cannot begin with http:// or https://.
- type: str
- internet_charge_type:
- description:
- - Internet charge type of ECS instance.
- default: 'PayByBandwidth'
- choices: ['PayByBandwidth', 'PayByTraffic']
- type: str
- max_bandwidth_in:
- description:
- - Maximum incoming bandwidth from the public network, measured in Mbps (Megabits per second).
- default: 200
- type: int
- max_bandwidth_out:
- description:
- - Maximum outgoing bandwidth to the public network, measured in Mbps (Megabits per second).
- Required when C(allocate_public_ip=True). Ignored when C(allocate_public_ip=False).
- default: 0
- type: int
- host_name:
- description:
- - Instance host name. Ordered hostname is not supported.
- type: str
- unique_suffix:
- description:
- - Specifies whether to add sequential suffixes to the host_name.
- The sequential suffix ranges from 001 to 999.
- default: False
- type: bool
- version_added: '0.2.0'
- password:
- description:
- - The password to login instance. After rebooting instances, modified password will take effect.
- type: str
- system_disk_category:
- description:
- - Category of the system disk.
- default: 'cloud_efficiency'
- choices: ['cloud_efficiency', 'cloud_ssd']
- type: str
- system_disk_size:
- description:
- - Size of the system disk, in GB. The valid values are 40~500.
- default: 40
- type: int
- system_disk_name:
- description:
- - Name of the system disk.
- type: str
- system_disk_description:
- description:
- - Description of the system disk.
- type: str
- count:
- description:
- - The number of the new instance. An integer value which indicates how many instances that match I(count_tag)
- should be running. Instances are either created or terminated based on this value.
- default: 1
- type: int
- count_tag:
- description:
- - I(count) determines how many instances based on a specific tag criteria should be present.
- This can be expressed in multiple ways and is shown in the EXAMPLES section.
- The specified count_tag must already exist or be passed in as the I(tags) option.
- If it is not specified, it will be replaced by I(instance_name).
- type: str
- allocate_public_ip:
- description:
- - Whether allocate a public ip for the new instance.
- default: False
- aliases: [ 'assign_public_ip' ]
- type: bool
- instance_charge_type:
- description:
- - The charge type of the instance.
- choices: ['PrePaid', 'PostPaid']
- default: 'PostPaid'
- type: str
- period:
- description:
- - The charge duration of the instance, in month. Required when C(instance_charge_type=PrePaid).
- - The valid value are [1-9, 12, 24, 36].
- default: 1
- type: int
- auto_renew:
- description:
- - Whether automate renew the charge of the instance.
- type: bool
- default: False
- auto_renew_period:
- description:
- - The duration of the automatic renew the charge of the instance. Required when C(auto_renew=True).
- choices: [1, 2, 3, 6, 12]
- type: int
- instance_ids:
- description:
- - A list of instance ids. It is required when need to operate existing instances.
- If it is specified, I(count) will lose efficacy.
- type: list
- elements: str
- force:
- description:
- - Whether the current operation needs to be execute forcibly.
- default: False
- type: bool
- tags:
- description:
- - A hash/dictionaries of instance tags, to add to the new instance or for starting/stopping instance by tag. C({"key":"value"})
- aliases: ["instance_tags"]
- type: dict
- version_added: '0.2.0'
- purge_tags:
- description:
- - Delete any tags not specified in the task that are on the instance.
- If True, it means you have to specify all the desired tags on each task affecting an instance.
- default: False
- type: bool
- version_added: '0.2.0'
- key_name:
- description:
- - The name of key pair which is used to access ECS instance in SSH.
- required: false
- type: str
- aliases: ['keypair']
- user_data:
- description:
- - User-defined data to customize the startup behaviors of an ECS instance and to pass data into an ECS instance.
- It only will take effect when launching the new ECS instances.
- required: false
- type: str
- ram_role_name:
- description:
- - The name of the instance RAM role.
- type: str
- version_added: '0.2.0'
- spot_price_limit:
- description:
- - The maximum hourly price for the preemptible instance. This parameter supports a maximum of three decimal
- places and takes effect when the SpotStrategy parameter is set to SpotWithPriceLimit.
- type: float
- version_added: '0.2.0'
- spot_strategy:
- description:
- - The bidding mode of the pay-as-you-go instance. This parameter is valid when InstanceChargeType is set to PostPaid.
- choices: ['NoSpot', 'SpotWithPriceLimit', 'SpotAsPriceGo']
- default: 'NoSpot'
- type: str
- version_added: '0.2.0'
- period_unit:
- description:
- - The duration unit that you will buy the resource. It is valid when C(instance_charge_type=PrePaid)
- choices: ['Month', 'Week']
- default: 'Month'
- type: str
- version_added: '0.2.0'
- dry_run:
- description:
- - Specifies whether to send a dry-run request.
- - If I(dry_run=True), Only a dry-run request is sent and no instance is created. The system checks whether the
- required parameters are set, and validates the request format, service permissions, and available ECS instances.
- If the validation fails, the corresponding error code is returned. If the validation succeeds, the DryRunOperation error code is returned.
- - If I(dry_run=False), A request is sent. If the validation succeeds, the instance is created.
- default: False
- type: bool
- version_added: '0.2.0'
- include_data_disks:
- description:
- - Whether to change instance disks charge type when changing instance charge type.
- default: True
- type: bool
- version_added: '0.2.0'
-author:
- - "He Guimin (@xiaozhu36)"
-requirements:
- - "python >= 3.6"
- - "footmark >= 1.19.0"
-extends_documentation_fragment:
- - community.general.alicloud
-'''
-
-EXAMPLES = '''
-# basic provisioning example vpc network
-- name: Basic provisioning example
- hosts: localhost
- vars:
- alicloud_access_key:
- alicloud_secret_key:
- alicloud_region: cn-beijing
- image: ubuntu1404_64_40G_cloudinit_20160727.raw
- instance_type: ecs.n4.small
- vswitch_id: vsw-abcd1234
- assign_public_ip: True
- max_bandwidth_out: 10
- host_name: myhost
- password: mypassword
- system_disk_category: cloud_efficiency
- system_disk_size: 100
- internet_charge_type: PayByBandwidth
- security_groups: ["sg-f2rwnfh23r"]
-
- instance_ids: ["i-abcd12346", "i-abcd12345"]
- force: True
-
- tasks:
- - name: Launch ECS instance in VPC network
- community.general.ali_instance:
- alicloud_access_key: '{{ alicloud_access_key }}'
- alicloud_secret_key: '{{ alicloud_secret_key }}'
- alicloud_region: '{{ alicloud_region }}'
- image: '{{ image }}'
- system_disk_category: '{{ system_disk_category }}'
- system_disk_size: '{{ system_disk_size }}'
- instance_type: '{{ instance_type }}'
- vswitch_id: '{{ vswitch_id }}'
- assign_public_ip: '{{ assign_public_ip }}'
- internet_charge_type: '{{ internet_charge_type }}'
- max_bandwidth_out: '{{ max_bandwidth_out }}'
- tags:
- Name: created_one
- host_name: '{{ host_name }}'
- password: '{{ password }}'
-
- - name: With count and count_tag to create a number of instances
- community.general.ali_instance:
- alicloud_access_key: '{{ alicloud_access_key }}'
- alicloud_secret_key: '{{ alicloud_secret_key }}'
- alicloud_region: '{{ alicloud_region }}'
- image: '{{ image }}'
- system_disk_category: '{{ system_disk_category }}'
- system_disk_size: '{{ system_disk_size }}'
- instance_type: '{{ instance_type }}'
- assign_public_ip: '{{ assign_public_ip }}'
- security_groups: '{{ security_groups }}'
- internet_charge_type: '{{ internet_charge_type }}'
- max_bandwidth_out: '{{ max_bandwidth_out }}'
- tags:
- Name: created_one
- Version: 0.1
- count: 2
- count_tag:
- Name: created_one
- host_name: '{{ host_name }}'
- password: '{{ password }}'
-
- - name: Start instance
- community.general.ali_instance:
- alicloud_access_key: '{{ alicloud_access_key }}'
- alicloud_secret_key: '{{ alicloud_secret_key }}'
- alicloud_region: '{{ alicloud_region }}'
- instance_ids: '{{ instance_ids }}'
- state: 'running'
-
- - name: Reboot instance forcibly
- ecs:
- alicloud_access_key: '{{ alicloud_access_key }}'
- alicloud_secret_key: '{{ alicloud_secret_key }}'
- alicloud_region: '{{ alicloud_region }}'
- instance_ids: '{{ instance_ids }}'
- state: 'restarted'
- force: '{{ force }}'
-
- - name: Add instances to an security group
- ecs:
- alicloud_access_key: '{{ alicloud_access_key }}'
- alicloud_secret_key: '{{ alicloud_secret_key }}'
- alicloud_region: '{{ alicloud_region }}'
- instance_ids: '{{ instance_ids }}'
- security_groups: '{{ security_groups }}'
-'''
-
-RETURN = '''
-instances:
- description: List of ECS instances
- returned: always
- type: complex
- contains:
- availability_zone:
- description: The availability zone of the instance is in.
- returned: always
- type: str
- sample: cn-beijing-a
- block_device_mappings:
- description: Any block device mapping entries for the instance.
- returned: always
- type: complex
- contains:
- device_name:
- description: The device name exposed to the instance (for example, /dev/xvda).
- returned: always
- type: str
- sample: /dev/xvda
- attach_time:
- description: The time stamp when the attachment initiated.
- returned: always
- type: str
- sample: "2018-06-25T04:08:26Z"
- delete_on_termination:
- description: Indicates whether the volume is deleted on instance termination.
- returned: always
- type: bool
- sample: true
- status:
- description: The attachment state.
- returned: always
- type: str
- sample: in_use
- volume_id:
- description: The ID of the cloud disk.
- returned: always
- type: str
- sample: d-2zei53pjsi117y6gf9t6
- cpu:
- description: The CPU core count of the instance.
- returned: always
- type: int
- sample: 4
- creation_time:
- description: The time the instance was created.
- returned: always
- type: str
- sample: "2018-06-25T04:08Z"
- description:
- description: The instance description.
- returned: always
- type: str
- sample: "my ansible instance"
- eip:
- description: The attribution of EIP associated with the instance.
- returned: always
- type: complex
- contains:
- allocation_id:
- description: The ID of the EIP.
- returned: always
- type: str
- sample: eip-12345
- internet_charge_type:
- description: The internet charge type of the EIP.
- returned: always
- type: str
- sample: "paybybandwidth"
- ip_address:
- description: EIP address.
- returned: always
- type: str
- sample: 42.10.2.2
- expired_time:
- description: The time the instance will expire.
- returned: always
- type: str
- sample: "2099-12-31T15:59Z"
- gpu:
- description: The attribution of instance GPU.
- returned: always
- type: complex
- contains:
- amount:
- description: The count of the GPU.
- returned: always
- type: int
- sample: 0
- spec:
- description: The specification of the GPU.
- returned: always
- type: str
- sample: ""
- host_name:
- description: The host name of the instance.
- returned: always
- type: str
- sample: iZ2zewaoZ
- id:
- description: Alias of instance_id.
- returned: always
- type: str
- sample: i-abc12345
- instance_id:
- description: ECS instance resource ID.
- returned: always
- type: str
- sample: i-abc12345
- image_id:
- description: The ID of the image used to launch the instance.
- returned: always
- type: str
- sample: m-0011223344
- inner_ip_address:
- description: The inner IPv4 address of the classic instance.
- returned: always
- type: str
- sample: 10.0.0.2
- instance_charge_type:
- description: The instance charge type.
- returned: always
- type: str
- sample: PostPaid
- instance_name:
- description: The name of the instance.
- returned: always
- type: str
- sample: my-ecs
- instance_type:
- description: The instance type of the running instance.
- returned: always
- type: str
- sample: ecs.sn1ne.xlarge
- instance_type_family:
- description: The instance type family of the instance belongs.
- returned: always
- type: str
- sample: ecs.sn1ne
- internet_charge_type:
- description: The billing method of the network bandwidth.
- returned: always
- type: str
- sample: PayByBandwidth
- internet_max_bandwidth_in:
- description: Maximum incoming bandwidth from the internet network.
- returned: always
- type: int
- sample: 200
- internet_max_bandwidth_out:
- description: Maximum incoming bandwidth from the internet network.
- returned: always
- type: int
- sample: 20
- io_optimized:
- description: Indicates whether the instance is optimized for EBS I/O.
- returned: always
- type: bool
- sample: false
- memory:
- description: Memory size of the instance.
- returned: always
- type: int
- sample: 8192
- network_interfaces:
- description: One or more network interfaces for the instance.
- returned: always
- type: complex
- contains:
- mac_address:
- description: The MAC address.
- returned: always
- type: str
- sample: "00:11:22:33:44:55"
- network_interface_id:
- description: The ID of the network interface.
- returned: always
- type: str
- sample: eni-01234567
- primary_ip_address:
- description: The primary IPv4 address of the network interface within the vswitch.
- returned: always
- type: str
- sample: 10.0.0.1
- osname:
- description: The operation system name of the instance owned.
- returned: always
- type: str
- sample: CentOS
- ostype:
- description: The operation system type of the instance owned.
- returned: always
- type: str
- sample: linux
- private_ip_address:
- description: The IPv4 address of the network interface within the subnet.
- returned: always
- type: str
- sample: 10.0.0.1
- public_ip_address:
- description: The public IPv4 address assigned to the instance or eip address
- returned: always
- type: str
- sample: 43.0.0.1
- resource_group_id:
- description: The id of the resource group to which the instance belongs.
- returned: always
- type: str
- sample: my-ecs-group
- security_groups:
- description: One or more security groups for the instance.
- returned: always
- type: list
- elements: dict
- contains:
- group_id:
- description: The ID of the security group.
- returned: always
- type: str
- sample: sg-0123456
- group_name:
- description: The name of the security group.
- returned: always
- type: str
- sample: my-security-group
- status:
- description: The current status of the instance.
- returned: always
- type: str
- sample: running
- tags:
- description: Any tags assigned to the instance.
- returned: always
- type: dict
- sample:
- user_data:
- description: User-defined data.
- returned: always
- type: dict
- sample:
- vswitch_id:
- description: The ID of the vswitch in which the instance is running.
- returned: always
- type: str
- sample: vsw-dew00abcdef
- vpc_id:
- description: The ID of the VPC the instance is in.
- returned: always
- type: str
- sample: vpc-0011223344
- spot_price_limit:
- description:
- - The maximum hourly price for the preemptible instance.
- returned: always
- type: float
- sample: 0.97
- spot_strategy:
- description:
- - The bidding mode of the pay-as-you-go instance.
- returned: always
- type: str
- sample: NoSpot
-ids:
- description: List of ECS instance IDs
- returned: always
- type: list
- sample: [i-12345er, i-3245fs]
-'''
-
-import re
-import time
-import traceback
-from ansible.module_utils.basic import AnsibleModule, missing_required_lib
-from ansible_collections.community.general.plugins.module_utils.alicloud_ecs import ecs_argument_spec, ecs_connect
-
-HAS_FOOTMARK = False
-FOOTMARK_IMP_ERR = None
-try:
- from footmark.exception import ECSResponseError
- HAS_FOOTMARK = True
-except ImportError:
- FOOTMARK_IMP_ERR = traceback.format_exc()
- HAS_FOOTMARK = False
-
-
-def get_instances_info(connection, ids):
- result = []
- instances = connection.describe_instances(instance_ids=ids)
- if len(instances) > 0:
- for inst in instances:
- volumes = connection.describe_disks(instance_id=inst.id)
- setattr(inst, 'block_device_mappings', volumes)
- setattr(inst, 'user_data', inst.describe_user_data())
- result.append(inst.read())
- return result
-
-
-def run_instance(module, ecs, exact_count):
- if exact_count <= 0:
- return None
- zone_id = module.params['availability_zone']
- image_id = module.params['image_id']
- instance_type = module.params['instance_type']
- security_groups = module.params['security_groups']
- vswitch_id = module.params['vswitch_id']
- instance_name = module.params['instance_name']
- description = module.params['description']
- internet_charge_type = module.params['internet_charge_type']
- max_bandwidth_out = module.params['max_bandwidth_out']
- max_bandwidth_in = module.params['max_bandwidth_in']
- host_name = module.params['host_name']
- password = module.params['password']
- system_disk_category = module.params['system_disk_category']
- system_disk_size = module.params['system_disk_size']
- system_disk_name = module.params['system_disk_name']
- system_disk_description = module.params['system_disk_description']
- allocate_public_ip = module.params['allocate_public_ip']
- period = module.params['period']
- auto_renew = module.params['auto_renew']
- instance_charge_type = module.params['instance_charge_type']
- auto_renew_period = module.params['auto_renew_period']
- user_data = module.params['user_data']
- key_name = module.params['key_name']
- ram_role_name = module.params['ram_role_name']
- spot_price_limit = module.params['spot_price_limit']
- spot_strategy = module.params['spot_strategy']
- unique_suffix = module.params['unique_suffix']
- # check whether the required parameter passed or not
- if not image_id:
- module.fail_json(msg='image_id is required for new instance')
- if not instance_type:
- module.fail_json(msg='instance_type is required for new instance')
- if not isinstance(security_groups, list):
- module.fail_json(msg='The parameter security_groups should be a list, aborting')
- if len(security_groups) <= 0:
- module.fail_json(msg='Expected the parameter security_groups is non-empty when create new ECS instances, aborting')
-
- client_token = "Ansible-Alicloud-{0}-{1}".format(hash(str(module.params)), str(time.time()))
-
- try:
- # call to create_instance method from footmark
- instances = ecs.run_instances(image_id=image_id, instance_type=instance_type, security_group_id=security_groups[0],
- zone_id=zone_id, instance_name=instance_name, description=description,
- internet_charge_type=internet_charge_type, internet_max_bandwidth_out=max_bandwidth_out,
- internet_max_bandwidth_in=max_bandwidth_in, host_name=host_name, password=password,
- io_optimized='optimized', system_disk_category=system_disk_category,
- system_disk_size=system_disk_size, system_disk_disk_name=system_disk_name,
- system_disk_description=system_disk_description, vswitch_id=vswitch_id,
- amount=exact_count, instance_charge_type=instance_charge_type, period=period, period_unit="Month",
- auto_renew=auto_renew, auto_renew_period=auto_renew_period, key_pair_name=key_name,
- user_data=user_data, client_token=client_token, ram_role_name=ram_role_name,
- spot_price_limit=spot_price_limit, spot_strategy=spot_strategy, unique_suffix=unique_suffix)
-
- except Exception as e:
- module.fail_json(msg='Unable to create instance, error: {0}'.format(e))
-
- return instances
-
-
-def modify_instance(module, instance):
- # According to state to modify instance's some special attribute
- state = module.params["state"]
- name = module.params['instance_name']
- unique_suffix = module.params['unique_suffix']
- if not name:
- name = instance.name
-
- description = module.params['description']
- if not description:
- description = instance.description
-
- host_name = module.params['host_name']
- if unique_suffix and host_name:
- suffix = instance.host_name[-3:]
- host_name = host_name + suffix
-
- if not host_name:
- host_name = instance.host_name
-
- # password can be modified only when restart instance
- password = ""
- if state == "restarted":
- password = module.params['password']
-
- # userdata can be modified only when instance is stopped
- setattr(instance, "user_data", instance.describe_user_data())
- user_data = instance.user_data
- if state == "stopped":
- user_data = module.params['user_data'].encode()
-
- try:
- return instance.modify(name=name, description=description, host_name=host_name, password=password, user_data=user_data)
- except Exception as e:
- module.fail_json(msg="Modify instance {0} attribute got an error: {1}".format(instance.id, e))
-
-
-def wait_for_instance_modify_charge(ecs, instance_ids, charge_type, delay=10, timeout=300):
- """
- To verify instance charge type has become expected after modify instance charge type
- """
- try:
- while True:
- instances = ecs.describe_instances(instance_ids=instance_ids)
- flag = True
- for inst in instances:
- if inst and inst.instance_charge_type != charge_type:
- flag = False
- if flag:
- return
- timeout -= delay
- time.sleep(delay)
- if timeout <= 0:
- raise Exception("Timeout Error: Waiting for instance to {0}. ".format(charge_type))
- except Exception as e:
- raise e
-
-
-def main():
- argument_spec = ecs_argument_spec()
- argument_spec.update(dict(
- security_groups=dict(type='list', elements='str', aliases=['group_ids']),
- availability_zone=dict(type='str', aliases=['alicloud_zone', 'zone_id']),
- instance_type=dict(type='str', aliases=['type']),
- image_id=dict(type='str', aliases=['image']),
- count=dict(type='int', default=1),
- count_tag=dict(type='str'),
- vswitch_id=dict(type='str', aliases=['subnet_id']),
- instance_name=dict(type='str', aliases=['name']),
- host_name=dict(type='str'),
- password=dict(type='str', no_log=True),
- internet_charge_type=dict(type='str', default='PayByBandwidth', choices=['PayByBandwidth', 'PayByTraffic']),
- max_bandwidth_in=dict(type='int', default=200),
- max_bandwidth_out=dict(type='int', default=0),
- system_disk_category=dict(type='str', default='cloud_efficiency', choices=['cloud_efficiency', 'cloud_ssd']),
- system_disk_size=dict(type='int', default=40),
- system_disk_name=dict(type='str'),
- system_disk_description=dict(type='str'),
- force=dict(type='bool', default=False),
- tags=dict(type='dict', aliases=['instance_tags']),
- purge_tags=dict(type='bool', default=False),
- state=dict(default='present', choices=['present', 'running', 'stopped', 'restarted', 'absent']),
- description=dict(type='str'),
- allocate_public_ip=dict(type='bool', aliases=['assign_public_ip'], default=False),
- instance_charge_type=dict(type='str', default='PostPaid', choices=['PrePaid', 'PostPaid']),
- period=dict(type='int', default=1),
- auto_renew=dict(type='bool', default=False),
- instance_ids=dict(type='list', elements='str'),
- auto_renew_period=dict(type='int', choices=[1, 2, 3, 6, 12]),
- key_name=dict(type='str', aliases=['keypair']),
- user_data=dict(type='str'),
- ram_role_name=dict(type='str'),
- spot_price_limit=dict(type='float'),
- spot_strategy=dict(type='str', default='NoSpot', choices=['NoSpot', 'SpotWithPriceLimit', 'SpotAsPriceGo']),
- unique_suffix=dict(type='bool', default=False),
- period_unit=dict(type='str', default='Month', choices=['Month', 'Week']),
- dry_run=dict(type='bool', default=False),
- include_data_disks=dict(type='bool', default=True)
- )
- )
- module = AnsibleModule(argument_spec=argument_spec)
-
- if HAS_FOOTMARK is False:
- module.fail_json(msg=missing_required_lib('footmark'), exception=FOOTMARK_IMP_ERR)
-
- ecs = ecs_connect(module)
- host_name = module.params['host_name']
- state = module.params['state']
- instance_ids = module.params['instance_ids']
- count_tag = module.params['count_tag']
- count = module.params['count']
- instance_name = module.params['instance_name']
- force = module.params['force']
- zone_id = module.params['availability_zone']
- key_name = module.params['key_name']
- tags = module.params['tags']
- max_bandwidth_out = module.params['max_bandwidth_out']
- instance_charge_type = module.params['instance_charge_type']
- if instance_charge_type == "PrePaid":
- module.params['spot_strategy'] = ''
- changed = False
-
- instances = []
- if instance_ids:
- if not isinstance(instance_ids, list):
- module.fail_json(msg='The parameter instance_ids should be a list, aborting')
- instances = ecs.describe_instances(zone_id=zone_id, instance_ids=instance_ids)
- if not instances:
- module.fail_json(msg="There are no instances in our record based on instance_ids {0}. "
- "Please check it and try again.".format(instance_ids))
- elif count_tag:
- instances = ecs.describe_instances(zone_id=zone_id, tags=eval(count_tag))
- elif instance_name:
- instances = ecs.describe_instances(zone_id=zone_id, instance_name=instance_name)
-
- ids = []
- if state == 'absent':
- if len(instances) < 1:
- module.fail_json(msg='Please specify ECS instances that you want to operate by using '
- 'parameters instance_ids, tags or instance_name, aborting')
- try:
- targets = []
- for inst in instances:
- if inst.status != 'stopped' and not force:
- module.fail_json(msg="Instance is running, and please stop it or set 'force' as True.")
- targets.append(inst.id)
- if ecs.delete_instances(instance_ids=targets, force=force):
- changed = True
- ids.extend(targets)
-
- module.exit_json(changed=changed, ids=ids, instances=[])
- except Exception as e:
- module.fail_json(msg='Delete instance got an error: {0}'.format(e))
-
- if module.params['allocate_public_ip'] and max_bandwidth_out < 0:
- module.fail_json(msg="'max_bandwidth_out' should be greater than 0 when 'allocate_public_ip' is True.")
- if not module.params['allocate_public_ip']:
- module.params['max_bandwidth_out'] = 0
-
- if state == 'present':
- if not instance_ids:
- if len(instances) > count:
- for i in range(0, len(instances) - count):
- inst = instances[len(instances) - 1]
- if inst.status != 'stopped' and not force:
- module.fail_json(msg="That to delete instance {0} is failed results from it is running, "
- "and please stop it or set 'force' as True.".format(inst.id))
- try:
- if inst.terminate(force=force):
- changed = True
- except Exception as e:
- module.fail_json(msg="Delete instance {0} got an error: {1}".format(inst.id, e))
- instances.pop(len(instances) - 1)
- else:
- try:
- if re.search(r"-\[\d+,\d+\]-", host_name):
- module.fail_json(msg='Ordered hostname is not supported, If you want to add an ordered '
- 'suffix to the hostname, you can set unique_suffix to True')
- new_instances = run_instance(module, ecs, count - len(instances))
- if new_instances:
- changed = True
- instances.extend(new_instances)
- except Exception as e:
- module.fail_json(msg="Create new instances got an error: {0}".format(e))
-
- # Security Group join/leave begin
- security_groups = module.params['security_groups']
- if security_groups:
- if not isinstance(security_groups, list):
- module.fail_json(msg='The parameter security_groups should be a list, aborting')
- for inst in instances:
- existing = inst.security_group_ids['security_group_id']
- remove = list(set(existing).difference(set(security_groups)))
- add = list(set(security_groups).difference(set(existing)))
- for sg in remove:
- if inst.leave_security_group(sg):
- changed = True
- for sg in add:
- if inst.join_security_group(sg):
- changed = True
- # Security Group join/leave ends here
-
- # Attach/Detach key pair
- inst_ids = []
- for inst in instances:
- if key_name is not None and key_name != inst.key_name:
- if key_name == "":
- if inst.detach_key_pair():
- changed = True
- else:
- inst_ids.append(inst.id)
- if inst_ids:
- changed = ecs.attach_key_pair(instance_ids=inst_ids, key_pair_name=key_name)
-
- # Modify instance attribute
- for inst in instances:
- if modify_instance(module, inst):
- changed = True
- if inst.id not in ids:
- ids.append(inst.id)
-
- # Modify instance charge type
- ids = []
- for inst in instances:
- if inst.instance_charge_type != instance_charge_type:
- ids.append(inst.id)
- if ids:
- params = {"instance_ids": ids, "instance_charge_type": instance_charge_type,
- "include_data_disks": module.params['include_data_disks'], "dry_run": module.params['dry_run'],
- "auto_pay": True}
- if instance_charge_type == 'PrePaid':
- params['period'] = module.params['period']
- params['period_unit'] = module.params['period_unit']
-
- if ecs.modify_instance_charge_type(**params):
- changed = True
- wait_for_instance_modify_charge(ecs, ids, instance_charge_type)
-
- else:
- if len(instances) < 1:
- module.fail_json(msg='Please specify ECS instances that you want to operate by using '
- 'parameters instance_ids, tags or instance_name, aborting')
- if state == 'running':
- try:
- targets = []
- for inst in instances:
- if modify_instance(module, inst):
- changed = True
- if inst.status != "running":
- targets.append(inst.id)
- ids.append(inst.id)
- if targets and ecs.start_instances(instance_ids=targets):
- changed = True
- ids.extend(targets)
- except Exception as e:
- module.fail_json(msg='Start instances got an error: {0}'.format(e))
- elif state == 'stopped':
- try:
- targets = []
- for inst in instances:
- if inst.status != "stopped":
- targets.append(inst.id)
- if targets and ecs.stop_instances(instance_ids=targets, force_stop=force):
- changed = True
- ids.extend(targets)
- for inst in instances:
- if modify_instance(module, inst):
- changed = True
- except Exception as e:
- module.fail_json(msg='Stop instances got an error: {0}'.format(e))
- elif state == 'restarted':
- try:
- targets = []
- for inst in instances:
- if modify_instance(module, inst):
- changed = True
- targets.append(inst.id)
- if ecs.reboot_instances(instance_ids=targets, force_stop=module.params['force']):
- changed = True
- ids.extend(targets)
- except Exception as e:
- module.fail_json(msg='Reboot instances got an error: {0}'.format(e))
-
- tags = module.params['tags']
- if module.params['purge_tags']:
- for inst in instances:
- if not tags:
- tags = inst.tags
- try:
- if inst.remove_tags(tags):
- changed = True
- except Exception as e:
- module.fail_json(msg="{0}".format(e))
- module.exit_json(changed=changed, instances=get_instances_info(ecs, ids))
-
- if tags:
- for inst in instances:
- try:
- if inst.add_tags(tags):
- changed = True
- except Exception as e:
- module.fail_json(msg="{0}".format(e))
- module.exit_json(changed=changed, instances=get_instances_info(ecs, ids))
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/alicloud/ali_instance_info.py b/ansible_collections/community/general/plugins/modules/cloud/alicloud/ali_instance_info.py
deleted file mode 100644
index 06df6cb4..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/alicloud/ali_instance_info.py
+++ /dev/null
@@ -1,444 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# Copyright (c) 2017-present Alibaba Group Holding Limited. He Guimin
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see http://www.gnu.org/licenses/.
-
-from __future__ import (absolute_import, division, print_function)
-
-__metaclass__ = type
-
-DOCUMENTATION = '''
----
-module: ali_instance_info
-short_description: Gather information on instances of Alibaba Cloud ECS.
-description:
- - This module fetches data from the Open API in Alicloud.
- The module must be called from within the ECS instance itself.
- - This module was called C(ali_instance_facts) before Ansible 2.9. The usage did not change.
-
-options:
- availability_zone:
- description:
- - Aliyun availability zone ID in which to launch the instance.
- - Deprecated parameter, it will be removed in community.general 5.0.0. Please use filter item I(zone_id) instead.
- aliases: ['alicloud_zone']
- type: str
- instance_names:
- description:
- - A list of ECS instance names.
- - Deprecated parameter, it will be removed in community.general 5.0.0. Please use filter item I(instance_name) instead.
- aliases: ["names"]
- type: list
- elements: str
- instance_ids:
- description:
- - A list of ECS instance ids.
- aliases: ["ids"]
- type: list
- elements: str
- name_prefix:
- description:
- - Use a instance name prefix to filter ecs instances.
- type: str
- version_added: '0.2.0'
- tags:
- description:
- - A hash/dictionaries of instance tags. C({"key":"value"})
- aliases: ["instance_tags"]
- type: dict
- filters:
- description:
- - A dict of filters to apply. Each dict item consists of a filter key and a filter value. The filter keys can be
- all of request parameters. See U(https://www.alibabacloud.com/help/doc-detail/25506.htm) for parameter details.
- Filter keys can be same as request parameter name or be lower case and use underscore ("_") or dash ("-") to
- connect different words in one parameter. 'InstanceIds' should be a list and it will be appended to
- I(instance_ids) automatically. 'Tag.n.Key' and 'Tag.n.Value' should be a dict and using I(tags) instead.
- type: dict
- version_added: '0.2.0'
-author:
- - "He Guimin (@xiaozhu36)"
-requirements:
- - "python >= 3.6"
- - "footmark >= 1.13.0"
-extends_documentation_fragment:
- - community.general.alicloud
-'''
-
-EXAMPLES = '''
-# Fetch instances details according to setting different filters
-
-- name: Find all instances in the specified region
- community.general.ali_instance_info:
- register: all_instances
-
-- name: Find all instances based on the specified ids
- community.general.ali_instance_info:
- instance_ids:
- - "i-35b333d9"
- - "i-ddav43kd"
- register: instances_by_ids
-
-- name: Find all instances based on the specified name_prefix
- community.general.ali_instance_info:
- name_prefix: "ecs_instance_"
- register: instances_by_name_prefix
-
-- name: Find instances based on tags
- community.general.ali_instance_info:
- tags:
- Test: "add"
-'''
-
-RETURN = '''
-instances:
- description: List of ECS instances
- returned: always
- type: complex
- contains:
- availability_zone:
- description: The availability zone of the instance is in.
- returned: always
- type: str
- sample: cn-beijing-a
- block_device_mappings:
- description: Any block device mapping entries for the instance.
- returned: always
- type: complex
- contains:
- device_name:
- description: The device name exposed to the instance (for example, /dev/xvda).
- returned: always
- type: str
- sample: /dev/xvda
- attach_time:
- description: The time stamp when the attachment initiated.
- returned: always
- type: str
- sample: "2018-06-25T04:08:26Z"
- delete_on_termination:
- description: Indicates whether the volume is deleted on instance termination.
- returned: always
- type: bool
- sample: true
- status:
- description: The attachment state.
- returned: always
- type: str
- sample: in_use
- volume_id:
- description: The ID of the cloud disk.
- returned: always
- type: str
- sample: d-2zei53pjsi117y6gf9t6
- cpu:
- description: The CPU core count of the instance.
- returned: always
- type: int
- sample: 4
- creation_time:
- description: The time the instance was created.
- returned: always
- type: str
- sample: "2018-06-25T04:08Z"
- description:
- description: The instance description.
- returned: always
- type: str
- sample: "my ansible instance"
- eip:
- description: The attribution of EIP associated with the instance.
- returned: always
- type: complex
- contains:
- allocation_id:
- description: The ID of the EIP.
- returned: always
- type: str
- sample: eip-12345
- internet_charge_type:
- description: The internet charge type of the EIP.
- returned: always
- type: str
- sample: "paybybandwidth"
- ip_address:
- description: EIP address.
- returned: always
- type: str
- sample: 42.10.2.2
- expired_time:
- description: The time the instance will expire.
- returned: always
- type: str
- sample: "2099-12-31T15:59Z"
- gpu:
- description: The attribution of instance GPU.
- returned: always
- type: complex
- contains:
- amount:
- description: The count of the GPU.
- returned: always
- type: int
- sample: 0
- spec:
- description: The specification of the GPU.
- returned: always
- type: str
- sample: ""
- host_name:
- description: The host name of the instance.
- returned: always
- type: str
- sample: iZ2zewaoZ
- id:
- description: Alias of instance_id.
- returned: always
- type: str
- sample: i-abc12345
- instance_id:
- description: ECS instance resource ID.
- returned: always
- type: str
- sample: i-abc12345
- image_id:
- description: The ID of the image used to launch the instance.
- returned: always
- type: str
- sample: m-0011223344
- inner_ip_address:
- description: The inner IPv4 address of the classic instance.
- returned: always
- type: str
- sample: 10.0.0.2
- instance_charge_type:
- description: The instance charge type.
- returned: always
- type: str
- sample: PostPaid
- instance_name:
- description: The name of the instance.
- returned: always
- type: str
- sample: my-ecs
- instance_type_family:
- description: The instance type family of the instance belongs.
- returned: always
- type: str
- sample: ecs.sn1ne
- instance_type:
- description: The instance type of the running instance.
- returned: always
- type: str
- sample: ecs.sn1ne.xlarge
- internet_charge_type:
- description: The billing method of the network bandwidth.
- returned: always
- type: str
- sample: PayByBandwidth
- internet_max_bandwidth_in:
- description: Maximum incoming bandwidth from the internet network.
- returned: always
- type: int
- sample: 200
- internet_max_bandwidth_out:
- description: Maximum incoming bandwidth from the internet network.
- returned: always
- type: int
- sample: 20
- io_optimized:
- description: Indicates whether the instance is optimized for EBS I/O.
- returned: always
- type: bool
- sample: false
- memory:
- description: Memory size of the instance.
- returned: always
- type: int
- sample: 8192
- network_interfaces:
- description: One or more network interfaces for the instance.
- returned: always
- type: complex
- contains:
- mac_address:
- description: The MAC address.
- returned: always
- type: str
- sample: "00:11:22:33:44:55"
- network_interface_id:
- description: The ID of the network interface.
- returned: always
- type: str
- sample: eni-01234567
- primary_ip_address:
- description: The primary IPv4 address of the network interface within the vswitch.
- returned: always
- type: str
- sample: 10.0.0.1
- osname:
- description: The operation system name of the instance owned.
- returned: always
- type: str
- sample: CentOS
- ostype:
- description: The operation system type of the instance owned.
- returned: always
- type: str
- sample: linux
- private_ip_address:
- description: The IPv4 address of the network interface within the subnet.
- returned: always
- type: str
- sample: 10.0.0.1
- public_ip_address:
- description: The public IPv4 address assigned to the instance or eip address
- returned: always
- type: str
- sample: 43.0.0.1
- resource_group_id:
- description: The id of the resource group to which the instance belongs.
- returned: always
- type: str
- sample: my-ecs-group
- security_groups:
- description: One or more security groups for the instance.
- returned: always
- type: list
- elements: dict
- contains:
- group_id:
- description: The ID of the security group.
- returned: always
- type: str
- sample: sg-0123456
- group_name:
- description: The name of the security group.
- returned: always
- type: str
- sample: my-security-group
- status:
- description: The current status of the instance.
- returned: always
- type: str
- sample: running
- tags:
- description: Any tags assigned to the instance.
- returned: always
- type: dict
- sample:
- vswitch_id:
- description: The ID of the vswitch in which the instance is running.
- returned: always
- type: str
- sample: vsw-dew00abcdef
- vpc_id:
- description: The ID of the VPC the instance is in.
- returned: always
- type: str
- sample: vpc-0011223344
-ids:
- description: List of ECS instance IDs
- returned: always
- type: list
- sample: [i-12345er, i-3245fs]
-'''
-
-import traceback
-from ansible.module_utils.basic import AnsibleModule, missing_required_lib
-from ansible_collections.community.general.plugins.module_utils.alicloud_ecs import ecs_argument_spec, ecs_connect
-
-HAS_FOOTMARK = False
-FOOTMARK_IMP_ERR = None
-try:
- from footmark.exception import ECSResponseError
- HAS_FOOTMARK = True
-except ImportError:
- FOOTMARK_IMP_ERR = traceback.format_exc()
- HAS_FOOTMARK = False
-
-
-def main():
- argument_spec = ecs_argument_spec()
- argument_spec.update(dict(
- availability_zone=dict(aliases=['alicloud_zone'],
- removed_in_version="5.0.0", removed_from_collection="community.general"),
- instance_ids=dict(type='list', elements='str', aliases=['ids'],
- removed_in_version="5.0.0", removed_from_collection="community.general"),
- instance_names=dict(type='list', elements='str', aliases=['names']),
- name_prefix=dict(type='str'),
- tags=dict(type='dict', aliases=['instance_tags']),
- filters=dict(type='dict')
- )
- )
- module = AnsibleModule(
- argument_spec=argument_spec,
- supports_check_mode=True,
- )
-
- if HAS_FOOTMARK is False:
- module.fail_json(msg=missing_required_lib('footmark'), exception=FOOTMARK_IMP_ERR)
-
- ecs = ecs_connect(module)
-
- instances = []
- instance_ids = []
- ids = module.params['instance_ids']
- name_prefix = module.params['name_prefix']
- names = module.params['instance_names']
- zone_id = module.params['availability_zone']
- if ids and (not isinstance(ids, list) or len(ids) < 1):
- module.fail_json(msg='instance_ids should be a list of instances, aborting')
-
- if names and (not isinstance(names, list) or len(names) < 1):
- module.fail_json(msg='instance_names should be a list of instances, aborting')
-
- filters = module.params['filters']
- if not filters:
- filters = {}
- if not ids:
- ids = []
- for key, value in list(filters.items()):
- if key in ["InstanceIds", "instance_ids", "instance-ids"] and isinstance(ids, list):
- for id in value:
- if id not in ids:
- ids.append(value)
- if ids:
- filters['instance_ids'] = ids
- if module.params['tags']:
- filters['tags'] = module.params['tags']
- if zone_id:
- filters['zone_id'] = zone_id
- if names:
- filters['instance_name'] = names[0]
-
- for inst in ecs.describe_instances(**filters):
- if name_prefix:
- if not str(inst.instance_name).startswith(name_prefix):
- continue
- volumes = ecs.describe_disks(instance_id=inst.id)
- setattr(inst, 'block_device_mappings', volumes)
- setattr(inst, 'user_data', inst.describe_user_data())
- instances.append(inst.read())
- instance_ids.append(inst.id)
-
- module.exit_json(changed=False, ids=instance_ids, instances=instances)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/atomic/atomic_container.py b/ansible_collections/community/general/plugins/modules/cloud/atomic/atomic_container.py
deleted file mode 100644
index ca631256..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/atomic/atomic_container.py
+++ /dev/null
@@ -1,208 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = r'''
----
-module: atomic_container
-short_description: Manage the containers on the atomic host platform
-description:
- - Manage the containers on the atomic host platform.
- - Allows to manage the lifecycle of a container on the atomic host platform.
-author: "Giuseppe Scrivano (@giuseppe)"
-notes:
- - Host should support C(atomic) command
-requirements:
- - atomic
- - "python >= 2.6"
-options:
- backend:
- description:
- - Define the backend to use for the container.
- required: True
- choices: ["docker", "ostree"]
- type: str
- name:
- description:
- - Name of the container.
- required: True
- type: str
- image:
- description:
- - The image to use to install the container.
- required: True
- type: str
- rootfs:
- description:
- - Define the rootfs of the image.
- type: str
- state:
- description:
- - State of the container.
- choices: ["absent", "latest", "present", "rollback"]
- default: "latest"
- type: str
- mode:
- description:
- - Define if it is an user or a system container.
- choices: ["user", "system"]
- type: str
- values:
- description:
- - Values for the installation of the container.
- - This option is permitted only with mode 'user' or 'system'.
- - The values specified here will be used at installation time as --set arguments for atomic install.
- type: list
- elements: str
-'''
-
-EXAMPLES = r'''
-
-- name: Install the etcd system container
- community.general.atomic_container:
- name: etcd
- image: rhel/etcd
- backend: ostree
- state: latest
- mode: system
- values:
- - ETCD_NAME=etcd.server
-
-- name: Uninstall the etcd system container
- community.general.atomic_container:
- name: etcd
- image: rhel/etcd
- backend: ostree
- state: absent
- mode: system
-'''
-
-RETURN = r'''
-msg:
- description: The command standard output
- returned: always
- type: str
- sample: [u'Using default tag: latest ...']
-'''
-
-# import module snippets
-import traceback
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.common.text.converters import to_native
-
-
-def do_install(module, mode, rootfs, container, image, values_list, backend):
- system_list = ["--system"] if mode == 'system' else []
- user_list = ["--user"] if mode == 'user' else []
- rootfs_list = ["--rootfs=%s" % rootfs] if rootfs else []
- atomic_bin = module.get_bin_path('atomic')
- args = [atomic_bin, 'install', "--storage=%s" % backend, '--name=%s' % container] + system_list + user_list + rootfs_list + values_list + [image]
- rc, out, err = module.run_command(args, check_rc=False)
- if rc != 0:
- module.fail_json(rc=rc, msg=err)
- else:
- changed = "Extracting" in out or "Copying blob" in out
- module.exit_json(msg=out, changed=changed)
-
-
-def do_update(module, container, image, values_list):
- atomic_bin = module.get_bin_path('atomic')
- args = [atomic_bin, 'containers', 'update', "--rebase=%s" % image] + values_list + [container]
- rc, out, err = module.run_command(args, check_rc=False)
- if rc != 0:
- module.fail_json(rc=rc, msg=err)
- else:
- changed = "Extracting" in out or "Copying blob" in out
- module.exit_json(msg=out, changed=changed)
-
-
-def do_uninstall(module, name, backend):
- atomic_bin = module.get_bin_path('atomic')
- args = [atomic_bin, 'uninstall', "--storage=%s" % backend, name]
- rc, out, err = module.run_command(args, check_rc=False)
- if rc != 0:
- module.fail_json(rc=rc, msg=err)
- module.exit_json(msg=out, changed=True)
-
-
-def do_rollback(module, name):
- atomic_bin = module.get_bin_path('atomic')
- args = [atomic_bin, 'containers', 'rollback', name]
- rc, out, err = module.run_command(args, check_rc=False)
- if rc != 0:
- module.fail_json(rc=rc, msg=err)
- else:
- changed = "Rolling back" in out
- module.exit_json(msg=out, changed=changed)
-
-
-def core(module):
- mode = module.params['mode']
- name = module.params['name']
- image = module.params['image']
- rootfs = module.params['rootfs']
- values = module.params['values']
- backend = module.params['backend']
- state = module.params['state']
-
- atomic_bin = module.get_bin_path('atomic')
- module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
-
- values_list = ["--set=%s" % x for x in values] if values else []
-
- args = [atomic_bin, 'containers', 'list', '--no-trunc', '-n', '--all', '-f', 'backend=%s' % backend, '-f', 'container=%s' % name]
- rc, out, err = module.run_command(args, check_rc=False)
- if rc != 0:
- module.fail_json(rc=rc, msg=err)
- return
- present = name in out
-
- if state == 'present' and present:
- module.exit_json(msg=out, changed=False)
- elif (state in ['latest', 'present']) and not present:
- do_install(module, mode, rootfs, name, image, values_list, backend)
- elif state == 'latest':
- do_update(module, name, image, values_list)
- elif state == 'absent':
- if not present:
- module.exit_json(msg="The container is not present", changed=False)
- else:
- do_uninstall(module, name, backend)
- elif state == 'rollback':
- do_rollback(module, name)
-
-
-def main():
- module = AnsibleModule(
- argument_spec=dict(
- mode=dict(default=None, choices=['user', 'system']),
- name=dict(required=True),
- image=dict(required=True),
- rootfs=dict(default=None),
- state=dict(default='latest', choices=['present', 'absent', 'latest', 'rollback']),
- backend=dict(required=True, choices=['docker', 'ostree']),
- values=dict(type='list', default=[], elements='str'),
- ),
- )
-
- if module.params['values'] is not None and module.params['mode'] == 'default':
- module.fail_json(msg="values is supported only with user or system mode")
-
- # Verify that the platform supports atomic command
- dummy = module.get_bin_path('atomic', required=True)
-
- try:
- core(module)
- except Exception as e:
- module.fail_json(msg='Unanticipated error running atomic: %s' % to_native(e), exception=traceback.format_exc())
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/atomic/atomic_host.py b/ansible_collections/community/general/plugins/modules/cloud/atomic/atomic_host.py
deleted file mode 100644
index 85b00f91..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/atomic/atomic_host.py
+++ /dev/null
@@ -1,97 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-DOCUMENTATION = r'''
----
-module: atomic_host
-short_description: Manage the atomic host platform
-description:
- - Manage the atomic host platform.
- - Rebooting of Atomic host platform should be done outside this module.
-author:
-- Saravanan KR (@krsacme)
-notes:
- - Host should be an atomic platform (verified by existence of '/run/ostree-booted' file).
-requirements:
- - atomic
- - python >= 2.6
-options:
- revision:
- description:
- - The version number of the atomic host to be deployed.
- - Providing C(latest) will upgrade to the latest available version.
- default: 'latest'
- aliases: [ version ]
- type: str
-'''
-
-EXAMPLES = r'''
-- name: Upgrade the atomic host platform to the latest version (atomic host upgrade)
- community.general.atomic_host:
- revision: latest
-
-- name: Deploy a specific revision as the atomic host (atomic host deploy 23.130)
- community.general.atomic_host:
- revision: 23.130
-'''
-
-RETURN = r'''
-msg:
- description: The command standard output
- returned: always
- type: str
- sample: 'Already on latest'
-'''
-import os
-import traceback
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.common.text.converters import to_native
-
-
-def core(module):
- revision = module.params['revision']
- atomic_bin = module.get_bin_path('atomic', required=True)
-
- module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
-
- if revision == 'latest':
- args = [atomic_bin, 'host', 'upgrade']
- else:
- args = [atomic_bin, 'host', 'deploy', revision]
-
- rc, out, err = module.run_command(args, check_rc=False)
-
- if rc == 77 and revision == 'latest':
- module.exit_json(msg="Already on latest", changed=False)
- elif rc != 0:
- module.fail_json(rc=rc, msg=err)
- else:
- module.exit_json(msg=out, changed=True)
-
-
-def main():
- module = AnsibleModule(
- argument_spec=dict(
- revision=dict(type='str', default='latest', aliases=["version"]),
- ),
- )
-
- # Verify that the platform is atomic host
- if not os.path.exists("/run/ostree-booted"):
- module.fail_json(msg="Module atomic_host is applicable for Atomic Host Platforms only")
-
- try:
- core(module)
- except Exception as e:
- module.fail_json(msg=to_native(e), exception=traceback.format_exc())
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/atomic/atomic_image.py b/ansible_collections/community/general/plugins/modules/cloud/atomic/atomic_image.py
deleted file mode 100644
index 350ad4c2..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/atomic/atomic_image.py
+++ /dev/null
@@ -1,169 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-DOCUMENTATION = r'''
----
-module: atomic_image
-short_description: Manage the container images on the atomic host platform
-description:
- - Manage the container images on the atomic host platform.
- - Allows to execute the commands specified by the RUN label in the container image when present.
-author:
-- Saravanan KR (@krsacme)
-notes:
- - Host should support C(atomic) command.
-requirements:
- - atomic
- - python >= 2.6
-options:
- backend:
- description:
- - Define the backend where the image is pulled.
- choices: [ 'docker', 'ostree' ]
- type: str
- name:
- description:
- - Name of the container image.
- required: True
- type: str
- state:
- description:
- - The state of the container image.
- - The state C(latest) will ensure container image is upgraded to the latest version and forcefully restart container, if running.
- choices: [ 'absent', 'latest', 'present' ]
- default: 'latest'
- type: str
- started:
- description:
- - Start or Stop the container.
- type: bool
- default: 'yes'
-'''
-
-EXAMPLES = r'''
-- name: Execute the run command on rsyslog container image (atomic run rhel7/rsyslog)
- community.general.atomic_image:
- name: rhel7/rsyslog
- state: latest
-
-- name: Pull busybox to the OSTree backend
- community.general.atomic_image:
- name: busybox
- state: latest
- backend: ostree
-'''
-
-RETURN = r'''
-msg:
- description: The command standard output
- returned: always
- type: str
- sample: [u'Using default tag: latest ...']
-'''
-import traceback
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.common.text.converters import to_native
-
-
-def do_upgrade(module, image):
- atomic_bin = module.get_bin_path('atomic')
- args = [atomic_bin, 'update', '--force', image]
- rc, out, err = module.run_command(args, check_rc=False)
- if rc != 0: # something went wrong emit the msg
- module.fail_json(rc=rc, msg=err)
- elif 'Image is up to date' in out:
- return False
-
- return True
-
-
-def core(module):
- image = module.params['name']
- state = module.params['state']
- started = module.params['started']
- backend = module.params['backend']
- is_upgraded = False
-
- module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
- atomic_bin = module.get_bin_path('atomic')
- out = {}
- err = {}
- rc = 0
-
- if backend:
- if state == 'present' or state == 'latest':
- args = [atomic_bin, 'pull', "--storage=%s" % backend, image]
- rc, out, err = module.run_command(args, check_rc=False)
- if rc < 0:
- module.fail_json(rc=rc, msg=err)
- else:
- out_run = ""
- if started:
- args = [atomic_bin, 'run', "--storage=%s" % backend, image]
- rc, out_run, err = module.run_command(args, check_rc=False)
- if rc < 0:
- module.fail_json(rc=rc, msg=err)
-
- changed = "Extracting" in out or "Copying blob" in out
- module.exit_json(msg=(out + out_run), changed=changed)
- elif state == 'absent':
- args = [atomic_bin, 'images', 'delete', "--storage=%s" % backend, image]
- rc, out, err = module.run_command(args, check_rc=False)
- if rc < 0:
- module.fail_json(rc=rc, msg=err)
- else:
- changed = "Unable to find" not in out
- module.exit_json(msg=out, changed=changed)
- return
-
- if state == 'present' or state == 'latest':
- if state == 'latest':
- is_upgraded = do_upgrade(module, image)
-
- if started:
- args = [atomic_bin, 'run', image]
- else:
- args = [atomic_bin, 'install', image]
- elif state == 'absent':
- args = [atomic_bin, 'uninstall', image]
-
- rc, out, err = module.run_command(args, check_rc=False)
-
- if rc < 0:
- module.fail_json(rc=rc, msg=err)
- elif rc == 1 and 'already present' in err:
- module.exit_json(restult=err, changed=is_upgraded)
- elif started and 'Container is running' in out:
- module.exit_json(result=out, changed=is_upgraded)
- else:
- module.exit_json(msg=out, changed=True)
-
-
-def main():
- module = AnsibleModule(
- argument_spec=dict(
- backend=dict(type='str', choices=['docker', 'ostree']),
- name=dict(type='str', required=True),
- state=dict(type='str', default='latest', choices=['absent', 'latest', 'present']),
- started=dict(type='bool', default=True),
- ),
- )
-
- # Verify that the platform supports atomic command
- dummy = module.get_bin_path('atomic', required=True)
-
- try:
- core(module)
- except Exception as e:
- module.fail_json(msg=to_native(e), exception=traceback.format_exc())
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_aa_policy.py b/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_aa_policy.py
deleted file mode 100644
index 416a4a6c..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_aa_policy.py
+++ /dev/null
@@ -1,345 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright (c) 2015 CenturyLink
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
-module: clc_aa_policy
-short_description: Create or Delete Anti Affinity Policies at CenturyLink Cloud.
-description:
- - An Ansible module to Create or Delete Anti Affinity Policies at CenturyLink Cloud.
-options:
- name:
- description:
- - The name of the Anti Affinity Policy.
- type: str
- required: True
- location:
- description:
- - Datacenter in which the policy lives/should live.
- type: str
- required: True
- state:
- description:
- - Whether to create or delete the policy.
- type: str
- required: False
- default: present
- choices: ['present','absent']
-requirements:
- - python = 2.7
- - requests >= 2.5.0
- - clc-sdk
-author: "CLC Runner (@clc-runner)"
-notes:
- - To use this module, it is required to set the below environment variables which enables access to the
- Centurylink Cloud
- - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
- - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
- - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
- CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
- - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
- - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
- - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
-'''
-
-EXAMPLES = '''
-# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
-
----
-- name: Create AA Policy
- hosts: localhost
- gather_facts: False
- connection: local
- tasks:
- - name: Create an Anti Affinity Policy
- community.general.clc_aa_policy:
- name: Hammer Time
- location: UK3
- state: present
- register: policy
-
- - name: Debug
- ansible.builtin.debug:
- var: policy
-
-- name: Delete AA Policy
- hosts: localhost
- gather_facts: False
- connection: local
- tasks:
- - name: Delete an Anti Affinity Policy
- community.general.clc_aa_policy:
- name: Hammer Time
- location: UK3
- state: absent
- register: policy
-
- - name: Debug
- ansible.builtin.debug:
- var: policy
-'''
-
-RETURN = '''
-policy:
- description: The anti affinity policy information
- returned: success
- type: dict
- sample:
- {
- "id":"1a28dd0988984d87b9cd61fa8da15424",
- "name":"test_aa_policy",
- "location":"UC1",
- "links":[
- {
- "rel":"self",
- "href":"/v2/antiAffinityPolicies/wfad/1a28dd0988984d87b9cd61fa8da15424",
- "verbs":[
- "GET",
- "DELETE",
- "PUT"
- ]
- },
- {
- "rel":"location",
- "href":"/v2/datacenters/wfad/UC1",
- "id":"uc1",
- "name":"UC1 - US West (Santa Clara)"
- }
- ]
- }
-'''
-
-__version__ = '${version}'
-
-import os
-import traceback
-
-from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
-
-REQUESTS_IMP_ERR = None
-try:
- import requests
-except ImportError:
- REQUESTS_IMP_ERR = traceback.format_exc()
- REQUESTS_FOUND = False
-else:
- REQUESTS_FOUND = True
-
-#
-# Requires the clc-python-sdk:
-# sudo pip install clc-sdk
-#
-CLC_IMP_ERR = None
-try:
- import clc as clc_sdk
- from clc import CLCException
-except ImportError:
- CLC_IMP_ERR = traceback.format_exc()
- CLC_FOUND = False
- clc_sdk = None
-else:
- CLC_FOUND = True
-
-from ansible.module_utils.basic import AnsibleModule, missing_required_lib
-
-
-class ClcAntiAffinityPolicy:
-
- clc = clc_sdk
- module = None
-
- def __init__(self, module):
- """
- Construct module
- """
- self.module = module
- self.policy_dict = {}
-
- if not CLC_FOUND:
- self.module.fail_json(msg=missing_required_lib('clc-sdk'),
- exception=CLC_IMP_ERR)
- if not REQUESTS_FOUND:
- self.module.fail_json(msg=missing_required_lib('requests'),
- exception=REQUESTS_IMP_ERR)
- if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
- self.module.fail_json(
- msg='requests library version should be >= 2.5.0')
-
- self._set_user_agent(self.clc)
-
- @staticmethod
- def _define_module_argument_spec():
- """
- Define the argument spec for the ansible module
- :return: argument spec dictionary
- """
- argument_spec = dict(
- name=dict(required=True),
- location=dict(required=True),
- state=dict(default='present', choices=['present', 'absent']),
- )
- return argument_spec
-
- # Module Behavior Goodness
- def process_request(self):
- """
- Process the request - Main Code Path
- :return: Returns with either an exit_json or fail_json
- """
- p = self.module.params
-
- self._set_clc_credentials_from_env()
- self.policy_dict = self._get_policies_for_datacenter(p)
-
- if p['state'] == "absent":
- changed, policy = self._ensure_policy_is_absent(p)
- else:
- changed, policy = self._ensure_policy_is_present(p)
-
- if hasattr(policy, 'data'):
- policy = policy.data
- elif hasattr(policy, '__dict__'):
- policy = policy.__dict__
-
- self.module.exit_json(changed=changed, policy=policy)
-
- def _set_clc_credentials_from_env(self):
- """
- Set the CLC Credentials on the sdk by reading environment variables
- :return: none
- """
- env = os.environ
- v2_api_token = env.get('CLC_V2_API_TOKEN', False)
- v2_api_username = env.get('CLC_V2_API_USERNAME', False)
- v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
- clc_alias = env.get('CLC_ACCT_ALIAS', False)
- api_url = env.get('CLC_V2_API_URL', False)
-
- if api_url:
- self.clc.defaults.ENDPOINT_URL_V2 = api_url
-
- if v2_api_token and clc_alias:
- self.clc._LOGIN_TOKEN_V2 = v2_api_token
- self.clc._V2_ENABLED = True
- self.clc.ALIAS = clc_alias
- elif v2_api_username and v2_api_passwd:
- self.clc.v2.SetCredentials(
- api_username=v2_api_username,
- api_passwd=v2_api_passwd)
- else:
- return self.module.fail_json(
- msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
- "environment variables")
-
- def _get_policies_for_datacenter(self, p):
- """
- Get the Policies for a datacenter by calling the CLC API.
- :param p: datacenter to get policies from
- :return: policies in the datacenter
- """
- response = {}
-
- policies = self.clc.v2.AntiAffinity.GetAll(location=p['location'])
-
- for policy in policies:
- response[policy.name] = policy
- return response
-
- def _create_policy(self, p):
- """
- Create an Anti Affinity Policy using the CLC API.
- :param p: datacenter to create policy in
- :return: response dictionary from the CLC API.
- """
- try:
- return self.clc.v2.AntiAffinity.Create(
- name=p['name'],
- location=p['location'])
- except CLCException as ex:
- self.module.fail_json(msg='Failed to create anti affinity policy : {0}. {1}'.format(
- p['name'], ex.response_text
- ))
-
- def _delete_policy(self, p):
- """
- Delete an Anti Affinity Policy using the CLC API.
- :param p: datacenter to delete a policy from
- :return: none
- """
- try:
- policy = self.policy_dict[p['name']]
- policy.Delete()
- except CLCException as ex:
- self.module.fail_json(msg='Failed to delete anti affinity policy : {0}. {1}'.format(
- p['name'], ex.response_text
- ))
-
- def _policy_exists(self, policy_name):
- """
- Check to see if an Anti Affinity Policy exists
- :param policy_name: name of the policy
- :return: boolean of if the policy exists
- """
- if policy_name in self.policy_dict:
- return self.policy_dict.get(policy_name)
-
- return False
-
- def _ensure_policy_is_absent(self, p):
- """
- Makes sure that a policy is absent
- :param p: dictionary of policy name
- :return: tuple of if a deletion occurred and the name of the policy that was deleted
- """
- changed = False
- if self._policy_exists(policy_name=p['name']):
- changed = True
- if not self.module.check_mode:
- self._delete_policy(p)
- return changed, None
-
- def _ensure_policy_is_present(self, p):
- """
- Ensures that a policy is present
- :param p: dictionary of a policy name
- :return: tuple of if an addition occurred and the name of the policy that was added
- """
- changed = False
- policy = self._policy_exists(policy_name=p['name'])
- if not policy:
- changed = True
- policy = None
- if not self.module.check_mode:
- policy = self._create_policy(p)
- return changed, policy
-
- @staticmethod
- def _set_user_agent(clc):
- if hasattr(clc, 'SetRequestsSession'):
- agent_string = "ClcAnsibleModule/" + __version__
- ses = requests.Session()
- ses.headers.update({"Api-Client": agent_string})
- ses.headers['User-Agent'] += " " + agent_string
- clc.SetRequestsSession(ses)
-
-
-def main():
- """
- The main function. Instantiates the module and calls process_request.
- :return: none
- """
- module = AnsibleModule(
- argument_spec=ClcAntiAffinityPolicy._define_module_argument_spec(),
- supports_check_mode=True)
- clc_aa_policy = ClcAntiAffinityPolicy(module)
- clc_aa_policy.process_request()
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_alert_policy.py b/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_alert_policy.py
deleted file mode 100644
index b6ed6e96..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_alert_policy.py
+++ /dev/null
@@ -1,529 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-#
-# Copyright (c) 2015 CenturyLink
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
-module: clc_alert_policy
-short_description: Create or Delete Alert Policies at CenturyLink Cloud.
-description:
- - An Ansible module to Create or Delete Alert Policies at CenturyLink Cloud.
-options:
- alias:
- description:
- - The alias of your CLC Account
- type: str
- required: True
- name:
- description:
- - The name of the alert policy. This is mutually exclusive with id
- type: str
- id:
- description:
- - The alert policy id. This is mutually exclusive with name
- type: str
- alert_recipients:
- description:
- - A list of recipient email ids to notify the alert.
- This is required for state 'present'
- type: list
- elements: str
- metric:
- description:
- - The metric on which to measure the condition that will trigger the alert.
- This is required for state 'present'
- type: str
- choices: ['cpu','memory','disk']
- duration:
- description:
- - The length of time in minutes that the condition must exceed the threshold.
- This is required for state 'present'
- type: str
- threshold:
- description:
- - The threshold that will trigger the alert when the metric equals or exceeds it.
- This is required for state 'present'
- This number represents a percentage and must be a value between 5.0 - 95.0 that is a multiple of 5.0
- type: int
- state:
- description:
- - Whether to create or delete the policy.
- type: str
- default: present
- choices: ['present','absent']
-requirements:
- - python = 2.7
- - requests >= 2.5.0
- - clc-sdk
-author: "CLC Runner (@clc-runner)"
-notes:
- - To use this module, it is required to set the below environment variables which enables access to the
- Centurylink Cloud
- - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
- - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
- - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
- CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
- - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
- - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
- - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
-'''
-
-EXAMPLES = '''
-# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
-
----
-- name: Create Alert Policy Example
- hosts: localhost
- gather_facts: False
- connection: local
- tasks:
- - name: Create an Alert Policy for disk above 80% for 5 minutes
- community.general.clc_alert_policy:
- alias: wfad
- name: 'alert for disk > 80%'
- alert_recipients:
- - test1@centurylink.com
- - test2@centurylink.com
- metric: 'disk'
- duration: '00:05:00'
- threshold: 80
- state: present
- register: policy
-
- - name: Debug
- ansible.builtin.debug: var=policy
-
-- name: Delete Alert Policy Example
- hosts: localhost
- gather_facts: False
- connection: local
- tasks:
- - name: Delete an Alert Policy
- community.general.clc_alert_policy:
- alias: wfad
- name: 'alert for disk > 80%'
- state: absent
- register: policy
-
- - name: Debug
- ansible.builtin.debug: var=policy
-'''
-
-RETURN = '''
-policy:
- description: The alert policy information
- returned: success
- type: dict
- sample:
- {
- "actions": [
- {
- "action": "email",
- "settings": {
- "recipients": [
- "user1@domain.com",
- "user1@domain.com"
- ]
- }
- }
- ],
- "id": "ba54ac54a60d4a4f1ed6d48c1ce240a7",
- "links": [
- {
- "href": "/v2/alertPolicies/alias/ba54ac54a60d4a4fb1d6d48c1ce240a7",
- "rel": "self",
- "verbs": [
- "GET",
- "DELETE",
- "PUT"
- ]
- }
- ],
- "name": "test_alert",
- "triggers": [
- {
- "duration": "00:05:00",
- "metric": "disk",
- "threshold": 80.0
- }
- ]
- }
-'''
-
-__version__ = '${version}'
-
-import json
-import os
-import traceback
-
-from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
-
-REQUESTS_IMP_ERR = None
-try:
- import requests
-except ImportError:
- REQUESTS_IMP_ERR = traceback.format_exc()
- REQUESTS_FOUND = False
-else:
- REQUESTS_FOUND = True
-
-#
-# Requires the clc-python-sdk.
-# sudo pip install clc-sdk
-#
-CLC_IMP_ERR = None
-try:
- import clc as clc_sdk
- from clc import APIFailedResponse
-except ImportError:
- CLC_IMP_ERR = traceback.format_exc()
- CLC_FOUND = False
- clc_sdk = None
-else:
- CLC_FOUND = True
-
-from ansible.module_utils.basic import AnsibleModule, missing_required_lib
-
-
-class ClcAlertPolicy:
-
- clc = clc_sdk
- module = None
-
- def __init__(self, module):
- """
- Construct module
- """
- self.module = module
- self.policy_dict = {}
-
- if not CLC_FOUND:
- self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
- if not REQUESTS_FOUND:
- self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
- if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
- self.module.fail_json(
- msg='requests library version should be >= 2.5.0')
-
- self._set_user_agent(self.clc)
-
- @staticmethod
- def _define_module_argument_spec():
- """
- Define the argument spec for the ansible module
- :return: argument spec dictionary
- """
- argument_spec = dict(
- name=dict(),
- id=dict(),
- alias=dict(required=True),
- alert_recipients=dict(type='list', elements='str'),
- metric=dict(
- choices=[
- 'cpu',
- 'memory',
- 'disk'],
- default=None),
- duration=dict(type='str'),
- threshold=dict(type='int'),
- state=dict(default='present', choices=['present', 'absent'])
- )
- mutually_exclusive = [
- ['name', 'id']
- ]
- return {'argument_spec': argument_spec,
- 'mutually_exclusive': mutually_exclusive}
-
- # Module Behavior Goodness
- def process_request(self):
- """
- Process the request - Main Code Path
- :return: Returns with either an exit_json or fail_json
- """
- p = self.module.params
-
- self._set_clc_credentials_from_env()
- self.policy_dict = self._get_alert_policies(p['alias'])
-
- if p['state'] == 'present':
- changed, policy = self._ensure_alert_policy_is_present()
- else:
- changed, policy = self._ensure_alert_policy_is_absent()
-
- self.module.exit_json(changed=changed, policy=policy)
-
- def _set_clc_credentials_from_env(self):
- """
- Set the CLC Credentials on the sdk by reading environment variables
- :return: none
- """
- env = os.environ
- v2_api_token = env.get('CLC_V2_API_TOKEN', False)
- v2_api_username = env.get('CLC_V2_API_USERNAME', False)
- v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
- clc_alias = env.get('CLC_ACCT_ALIAS', False)
- api_url = env.get('CLC_V2_API_URL', False)
-
- if api_url:
- self.clc.defaults.ENDPOINT_URL_V2 = api_url
-
- if v2_api_token and clc_alias:
- self.clc._LOGIN_TOKEN_V2 = v2_api_token
- self.clc._V2_ENABLED = True
- self.clc.ALIAS = clc_alias
- elif v2_api_username and v2_api_passwd:
- self.clc.v2.SetCredentials(
- api_username=v2_api_username,
- api_passwd=v2_api_passwd)
- else:
- return self.module.fail_json(
- msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
- "environment variables")
-
- def _ensure_alert_policy_is_present(self):
- """
- Ensures that the alert policy is present
- :return: (changed, policy)
- changed: A flag representing if anything is modified
- policy: the created/updated alert policy
- """
- changed = False
- p = self.module.params
- policy_name = p.get('name')
-
- if not policy_name:
- self.module.fail_json(msg='Policy name is a required')
- policy = self._alert_policy_exists(policy_name)
- if not policy:
- changed = True
- policy = None
- if not self.module.check_mode:
- policy = self._create_alert_policy()
- else:
- changed_u, policy = self._ensure_alert_policy_is_updated(policy)
- if changed_u:
- changed = True
- return changed, policy
-
- def _ensure_alert_policy_is_absent(self):
- """
- Ensures that the alert policy is absent
- :return: (changed, None)
- changed: A flag representing if anything is modified
- """
- changed = False
- p = self.module.params
- alert_policy_id = p.get('id')
- alert_policy_name = p.get('name')
- alias = p.get('alias')
- if not alert_policy_id and not alert_policy_name:
- self.module.fail_json(
- msg='Either alert policy id or policy name is required')
- if not alert_policy_id and alert_policy_name:
- alert_policy_id = self._get_alert_policy_id(
- self.module,
- alert_policy_name)
- if alert_policy_id and alert_policy_id in self.policy_dict:
- changed = True
- if not self.module.check_mode:
- self._delete_alert_policy(alias, alert_policy_id)
- return changed, None
-
- def _ensure_alert_policy_is_updated(self, alert_policy):
- """
- Ensures the alert policy is updated if anything is changed in the alert policy configuration
- :param alert_policy: the target alert policy
- :return: (changed, policy)
- changed: A flag representing if anything is modified
- policy: the updated the alert policy
- """
- changed = False
- p = self.module.params
- alert_policy_id = alert_policy.get('id')
- email_list = p.get('alert_recipients')
- metric = p.get('metric')
- duration = p.get('duration')
- threshold = p.get('threshold')
- policy = alert_policy
- if (metric and metric != str(alert_policy.get('triggers')[0].get('metric'))) or \
- (duration and duration != str(alert_policy.get('triggers')[0].get('duration'))) or \
- (threshold and float(threshold) != float(alert_policy.get('triggers')[0].get('threshold'))):
- changed = True
- elif email_list:
- t_email_list = list(
- alert_policy.get('actions')[0].get('settings').get('recipients'))
- if set(email_list) != set(t_email_list):
- changed = True
- if changed and not self.module.check_mode:
- policy = self._update_alert_policy(alert_policy_id)
- return changed, policy
-
- def _get_alert_policies(self, alias):
- """
- Get the alert policies for account alias by calling the CLC API.
- :param alias: the account alias
- :return: the alert policies for the account alias
- """
- response = {}
-
- policies = self.clc.v2.API.Call('GET',
- '/v2/alertPolicies/%s'
- % alias)
-
- for policy in policies.get('items'):
- response[policy.get('id')] = policy
- return response
-
- def _create_alert_policy(self):
- """
- Create an alert Policy using the CLC API.
- :return: response dictionary from the CLC API.
- """
- p = self.module.params
- alias = p['alias']
- email_list = p['alert_recipients']
- metric = p['metric']
- duration = p['duration']
- threshold = p['threshold']
- policy_name = p['name']
- arguments = json.dumps(
- {
- 'name': policy_name,
- 'actions': [{
- 'action': 'email',
- 'settings': {
- 'recipients': email_list
- }
- }],
- 'triggers': [{
- 'metric': metric,
- 'duration': duration,
- 'threshold': threshold
- }]
- }
- )
- try:
- result = self.clc.v2.API.Call(
- 'POST',
- '/v2/alertPolicies/%s' % alias,
- arguments)
- except APIFailedResponse as e:
- return self.module.fail_json(
- msg='Unable to create alert policy "{0}". {1}'.format(
- policy_name, str(e.response_text)))
- return result
-
- def _update_alert_policy(self, alert_policy_id):
- """
- Update alert policy using the CLC API.
- :param alert_policy_id: The clc alert policy id
- :return: response dictionary from the CLC API.
- """
- p = self.module.params
- alias = p['alias']
- email_list = p['alert_recipients']
- metric = p['metric']
- duration = p['duration']
- threshold = p['threshold']
- policy_name = p['name']
- arguments = json.dumps(
- {
- 'name': policy_name,
- 'actions': [{
- 'action': 'email',
- 'settings': {
- 'recipients': email_list
- }
- }],
- 'triggers': [{
- 'metric': metric,
- 'duration': duration,
- 'threshold': threshold
- }]
- }
- )
- try:
- result = self.clc.v2.API.Call(
- 'PUT', '/v2/alertPolicies/%s/%s' %
- (alias, alert_policy_id), arguments)
- except APIFailedResponse as e:
- return self.module.fail_json(
- msg='Unable to update alert policy "{0}". {1}'.format(
- policy_name, str(e.response_text)))
- return result
-
- def _delete_alert_policy(self, alias, policy_id):
- """
- Delete an alert policy using the CLC API.
- :param alias : the account alias
- :param policy_id: the alert policy id
- :return: response dictionary from the CLC API.
- """
- try:
- result = self.clc.v2.API.Call(
- 'DELETE', '/v2/alertPolicies/%s/%s' %
- (alias, policy_id), None)
- except APIFailedResponse as e:
- return self.module.fail_json(
- msg='Unable to delete alert policy id "{0}". {1}'.format(
- policy_id, str(e.response_text)))
- return result
-
- def _alert_policy_exists(self, policy_name):
- """
- Check to see if an alert policy exists
- :param policy_name: name of the alert policy
- :return: boolean of if the policy exists
- """
- result = False
- for policy_id in self.policy_dict:
- if self.policy_dict.get(policy_id).get('name') == policy_name:
- result = self.policy_dict.get(policy_id)
- return result
-
- def _get_alert_policy_id(self, module, alert_policy_name):
- """
- retrieves the alert policy id of the account based on the name of the policy
- :param module: the AnsibleModule object
- :param alert_policy_name: the alert policy name
- :return: alert_policy_id: The alert policy id
- """
- alert_policy_id = None
- for policy_id in self.policy_dict:
- if self.policy_dict.get(policy_id).get('name') == alert_policy_name:
- if not alert_policy_id:
- alert_policy_id = policy_id
- else:
- return module.fail_json(
- msg='multiple alert policies were found with policy name : %s' % alert_policy_name)
- return alert_policy_id
-
- @staticmethod
- def _set_user_agent(clc):
- if hasattr(clc, 'SetRequestsSession'):
- agent_string = "ClcAnsibleModule/" + __version__
- ses = requests.Session()
- ses.headers.update({"Api-Client": agent_string})
- ses.headers['User-Agent'] += " " + agent_string
- clc.SetRequestsSession(ses)
-
-
-def main():
- """
- The main function. Instantiates the module and calls process_request.
- :return: none
- """
- argument_dict = ClcAlertPolicy._define_module_argument_spec()
- module = AnsibleModule(supports_check_mode=True, **argument_dict)
- clc_alert_policy = ClcAlertPolicy(module)
- clc_alert_policy.process_request()
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_blueprint_package.py b/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_blueprint_package.py
deleted file mode 100644
index 9e0bfa80..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_blueprint_package.py
+++ /dev/null
@@ -1,301 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright (c) 2015 CenturyLink
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
-module: clc_blueprint_package
-short_description: deploys a blue print package on a set of servers in CenturyLink Cloud.
-description:
- - An Ansible module to deploy blue print package on a set of servers in CenturyLink Cloud.
-options:
- server_ids:
- description:
- - A list of server Ids to deploy the blue print package.
- type: list
- required: True
- elements: str
- package_id:
- description:
- - The package id of the blue print.
- type: str
- required: True
- package_params:
- description:
- - The dictionary of arguments required to deploy the blue print.
- type: dict
- default: {}
- required: False
- state:
- description:
- - Whether to install or uninstall the package. Currently it supports only "present" for install action.
- type: str
- required: False
- default: present
- choices: ['present']
- wait:
- description:
- - Whether to wait for the tasks to finish before returning.
- type: str
- default: 'True'
- required: False
-requirements:
- - python = 2.7
- - requests >= 2.5.0
- - clc-sdk
-author: "CLC Runner (@clc-runner)"
-notes:
- - To use this module, it is required to set the below environment variables which enables access to the
- Centurylink Cloud
- - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
- - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
- - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
- CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
- - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
- - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
- - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
-'''
-
-EXAMPLES = '''
-# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
-
-- name: Deploy package
- community.general.clc_blueprint_package:
- server_ids:
- - UC1TEST-SERVER1
- - UC1TEST-SERVER2
- package_id: 77abb844-579d-478d-3955-c69ab4a7ba1a
- package_params: {}
-'''
-
-RETURN = '''
-server_ids:
- description: The list of server ids that are changed
- returned: success
- type: list
- sample:
- [
- "UC1TEST-SERVER1",
- "UC1TEST-SERVER2"
- ]
-'''
-
-__version__ = '${version}'
-
-import os
-import traceback
-
-from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
-
-REQUESTS_IMP_ERR = None
-try:
- import requests
-except ImportError:
- REQUESTS_IMP_ERR = traceback.format_exc()
- REQUESTS_FOUND = False
-else:
- REQUESTS_FOUND = True
-
-#
-# Requires the clc-python-sdk.
-# sudo pip install clc-sdk
-#
-CLC_IMP_ERR = None
-try:
- import clc as clc_sdk
- from clc import CLCException
-except ImportError:
- CLC_IMP_ERR = traceback.format_exc()
- CLC_FOUND = False
- clc_sdk = None
-else:
- CLC_FOUND = True
-
-from ansible.module_utils.basic import AnsibleModule, missing_required_lib
-
-
-class ClcBlueprintPackage:
-
- clc = clc_sdk
- module = None
-
- def __init__(self, module):
- """
- Construct module
- """
- self.module = module
- if not CLC_FOUND:
- self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
- if not REQUESTS_FOUND:
- self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
- if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
- self.module.fail_json(
- msg='requests library version should be >= 2.5.0')
-
- self._set_user_agent(self.clc)
-
- def process_request(self):
- """
- Process the request - Main Code Path
- :return: Returns with either an exit_json or fail_json
- """
- p = self.module.params
- changed = False
- changed_server_ids = []
- self._set_clc_credentials_from_env()
- server_ids = p['server_ids']
- package_id = p['package_id']
- package_params = p['package_params']
- state = p['state']
- if state == 'present':
- changed, changed_server_ids, request_list = self.ensure_package_installed(
- server_ids, package_id, package_params)
- self._wait_for_requests_to_complete(request_list)
- self.module.exit_json(changed=changed, server_ids=changed_server_ids)
-
- @staticmethod
- def define_argument_spec():
- """
- This function defines the dictionary object required for
- package module
- :return: the package dictionary object
- """
- argument_spec = dict(
- server_ids=dict(type='list', elements='str', required=True),
- package_id=dict(required=True),
- package_params=dict(type='dict', default={}),
- wait=dict(default=True), # @FIXME should be bool?
- state=dict(default='present', choices=['present'])
- )
- return argument_spec
-
- def ensure_package_installed(self, server_ids, package_id, package_params):
- """
- Ensure the package is installed in the given list of servers
- :param server_ids: the server list where the package needs to be installed
- :param package_id: the blueprint package id
- :param package_params: the package arguments
- :return: (changed, server_ids, request_list)
- changed: A flag indicating if a change was made
- server_ids: The list of servers modified
- request_list: The list of request objects from clc-sdk
- """
- changed = False
- request_list = []
- servers = self._get_servers_from_clc(
- server_ids,
- 'Failed to get servers from CLC')
- for server in servers:
- if not self.module.check_mode:
- request = self.clc_install_package(
- server,
- package_id,
- package_params)
- request_list.append(request)
- changed = True
- return changed, server_ids, request_list
-
- def clc_install_package(self, server, package_id, package_params):
- """
- Install the package to a given clc server
- :param server: The server object where the package needs to be installed
- :param package_id: The blue print package id
- :param package_params: the required argument dict for the package installation
- :return: The result object from the CLC API call
- """
- result = None
- try:
- result = server.ExecutePackage(
- package_id=package_id,
- parameters=package_params)
- except CLCException as ex:
- self.module.fail_json(msg='Failed to install package : {0} to server {1}. {2}'.format(
- package_id, server.id, ex.message
- ))
- return result
-
- def _wait_for_requests_to_complete(self, request_lst):
- """
- Waits until the CLC requests are complete if the wait argument is True
- :param request_lst: The list of CLC request objects
- :return: none
- """
- if not self.module.params['wait']:
- return
- for request in request_lst:
- request.WaitUntilComplete()
- for request_details in request.requests:
- if request_details.Status() != 'succeeded':
- self.module.fail_json(
- msg='Unable to process package install request')
-
- def _get_servers_from_clc(self, server_list, message):
- """
- Internal function to fetch list of CLC server objects from a list of server ids
- :param server_list: the list of server ids
- :param message: the error message to raise if there is any error
- :return the list of CLC server objects
- """
- try:
- return self.clc.v2.Servers(server_list).servers
- except CLCException as ex:
- self.module.fail_json(msg=message + ': %s' % ex)
-
- def _set_clc_credentials_from_env(self):
- """
- Set the CLC Credentials on the sdk by reading environment variables
- :return: none
- """
- env = os.environ
- v2_api_token = env.get('CLC_V2_API_TOKEN', False)
- v2_api_username = env.get('CLC_V2_API_USERNAME', False)
- v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
- clc_alias = env.get('CLC_ACCT_ALIAS', False)
- api_url = env.get('CLC_V2_API_URL', False)
-
- if api_url:
- self.clc.defaults.ENDPOINT_URL_V2 = api_url
-
- if v2_api_token and clc_alias:
- self.clc._LOGIN_TOKEN_V2 = v2_api_token
- self.clc._V2_ENABLED = True
- self.clc.ALIAS = clc_alias
- elif v2_api_username and v2_api_passwd:
- self.clc.v2.SetCredentials(
- api_username=v2_api_username,
- api_passwd=v2_api_passwd)
- else:
- return self.module.fail_json(
- msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
- "environment variables")
-
- @staticmethod
- def _set_user_agent(clc):
- if hasattr(clc, 'SetRequestsSession'):
- agent_string = "ClcAnsibleModule/" + __version__
- ses = requests.Session()
- ses.headers.update({"Api-Client": agent_string})
- ses.headers['User-Agent'] += " " + agent_string
- clc.SetRequestsSession(ses)
-
-
-def main():
- """
- Main function
- :return: None
- """
- module = AnsibleModule(
- argument_spec=ClcBlueprintPackage.define_argument_spec(),
- supports_check_mode=True
- )
- clc_blueprint_package = ClcBlueprintPackage(module)
- clc_blueprint_package.process_request()
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_firewall_policy.py b/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_firewall_policy.py
deleted file mode 100644
index f1f4a2f2..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_firewall_policy.py
+++ /dev/null
@@ -1,588 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright (c) 2015 CenturyLink
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
-module: clc_firewall_policy
-short_description: Create/delete/update firewall policies
-description:
- - Create or delete or update firewall policies on Centurylink Cloud
-options:
- location:
- description:
- - Target datacenter for the firewall policy
- type: str
- required: True
- state:
- description:
- - Whether to create or delete the firewall policy
- type: str
- default: present
- choices: ['present', 'absent']
- source:
- description:
- - The list of source addresses for traffic on the originating firewall.
- This is required when state is 'present'
- type: list
- elements: str
- destination:
- description:
- - The list of destination addresses for traffic on the terminating firewall.
- This is required when state is 'present'
- type: list
- elements: str
- ports:
- description:
- - The list of ports associated with the policy.
- TCP and UDP can take in single ports or port ranges.
- - "Example: C(['any', 'icmp', 'TCP/123', 'UDP/123', 'TCP/123-456', 'UDP/123-456'])."
- type: list
- elements: str
- firewall_policy_id:
- description:
- - Id of the firewall policy. This is required to update or delete an existing firewall policy
- type: str
- source_account_alias:
- description:
- - CLC alias for the source account
- type: str
- required: True
- destination_account_alias:
- description:
- - CLC alias for the destination account
- type: str
- wait:
- description:
- - Whether to wait for the provisioning tasks to finish before returning.
- type: str
- default: 'True'
- enabled:
- description:
- - Whether the firewall policy is enabled or disabled
- type: str
- choices: ['True', 'False']
- default: 'True'
-requirements:
- - python = 2.7
- - requests >= 2.5.0
- - clc-sdk
-author: "CLC Runner (@clc-runner)"
-notes:
- - To use this module, it is required to set the below environment variables which enables access to the
- Centurylink Cloud
- - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
- - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
- - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
- CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
- - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
- - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
- - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
-'''
-
-EXAMPLES = '''
----
-- name: Create Firewall Policy
- hosts: localhost
- gather_facts: False
- connection: local
- tasks:
- - name: Create / Verify an Firewall Policy at CenturyLink Cloud
- clc_firewall:
- source_account_alias: WFAD
- location: VA1
- state: present
- source: 10.128.216.0/24
- destination: 10.128.216.0/24
- ports: Any
- destination_account_alias: WFAD
-
-- name: Delete Firewall Policy
- hosts: localhost
- gather_facts: False
- connection: local
- tasks:
- - name: Delete an Firewall Policy at CenturyLink Cloud
- clc_firewall:
- source_account_alias: WFAD
- location: VA1
- state: absent
- firewall_policy_id: c62105233d7a4231bd2e91b9c791e43e1
-'''
-
-RETURN = '''
-firewall_policy_id:
- description: The fire wall policy id
- returned: success
- type: str
- sample: fc36f1bfd47242e488a9c44346438c05
-firewall_policy:
- description: The fire wall policy information
- returned: success
- type: dict
- sample:
- {
- "destination":[
- "10.1.1.0/24",
- "10.2.2.0/24"
- ],
- "destinationAccount":"wfad",
- "enabled":true,
- "id":"fc36f1bfd47242e488a9c44346438c05",
- "links":[
- {
- "href":"http://api.ctl.io/v2-experimental/firewallPolicies/wfad/uc1/fc36f1bfd47242e488a9c44346438c05",
- "rel":"self",
- "verbs":[
- "GET",
- "PUT",
- "DELETE"
- ]
- }
- ],
- "ports":[
- "any"
- ],
- "source":[
- "10.1.1.0/24",
- "10.2.2.0/24"
- ],
- "status":"active"
- }
-'''
-
-__version__ = '${version}'
-
-import os
-import traceback
-from ansible.module_utils.six.moves.urllib.parse import urlparse
-from time import sleep
-
-from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
-
-REQUESTS_IMP_ERR = None
-try:
- import requests
-except ImportError:
- REQUESTS_IMP_ERR = traceback.format_exc()
- REQUESTS_FOUND = False
-else:
- REQUESTS_FOUND = True
-
-CLC_IMP_ERR = None
-try:
- import clc as clc_sdk
- from clc import APIFailedResponse
-except ImportError:
- CLC_IMP_ERR = traceback.format_exc()
- CLC_FOUND = False
- clc_sdk = None
-else:
- CLC_FOUND = True
-
-from ansible.module_utils.basic import AnsibleModule, missing_required_lib
-
-
-class ClcFirewallPolicy:
-
- clc = None
-
- def __init__(self, module):
- """
- Construct module
- """
- self.clc = clc_sdk
- self.module = module
- self.firewall_dict = {}
-
- if not CLC_FOUND:
- self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
- if not REQUESTS_FOUND:
- self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
- if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
- self.module.fail_json(
- msg='requests library version should be >= 2.5.0')
-
- self._set_user_agent(self.clc)
-
- @staticmethod
- def _define_module_argument_spec():
- """
- Define the argument spec for the ansible module
- :return: argument spec dictionary
- """
- argument_spec = dict(
- location=dict(required=True),
- source_account_alias=dict(required=True),
- destination_account_alias=dict(),
- firewall_policy_id=dict(),
- ports=dict(type='list', elements='str'),
- source=dict(type='list', elements='str'),
- destination=dict(type='list', elements='str'),
- wait=dict(default=True), # @FIXME type=bool
- state=dict(default='present', choices=['present', 'absent']),
- enabled=dict(default=True, choices=[True, False])
- )
- return argument_spec
-
- def process_request(self):
- """
- Execute the main code path, and handle the request
- :return: none
- """
- changed = False
- firewall_policy = None
- location = self.module.params.get('location')
- source_account_alias = self.module.params.get('source_account_alias')
- destination_account_alias = self.module.params.get(
- 'destination_account_alias')
- firewall_policy_id = self.module.params.get('firewall_policy_id')
- ports = self.module.params.get('ports')
- source = self.module.params.get('source')
- destination = self.module.params.get('destination')
- wait = self.module.params.get('wait')
- state = self.module.params.get('state')
- enabled = self.module.params.get('enabled')
-
- self.firewall_dict = {
- 'location': location,
- 'source_account_alias': source_account_alias,
- 'destination_account_alias': destination_account_alias,
- 'firewall_policy_id': firewall_policy_id,
- 'ports': ports,
- 'source': source,
- 'destination': destination,
- 'wait': wait,
- 'state': state,
- 'enabled': enabled}
-
- self._set_clc_credentials_from_env()
-
- if state == 'absent':
- changed, firewall_policy_id, firewall_policy = self._ensure_firewall_policy_is_absent(
- source_account_alias, location, self.firewall_dict)
-
- elif state == 'present':
- changed, firewall_policy_id, firewall_policy = self._ensure_firewall_policy_is_present(
- source_account_alias, location, self.firewall_dict)
-
- return self.module.exit_json(
- changed=changed,
- firewall_policy_id=firewall_policy_id,
- firewall_policy=firewall_policy)
-
- @staticmethod
- def _get_policy_id_from_response(response):
- """
- Method to parse out the policy id from creation response
- :param response: response from firewall creation API call
- :return: policy_id: firewall policy id from creation call
- """
- url = response.get('links')[0]['href']
- path = urlparse(url).path
- path_list = os.path.split(path)
- policy_id = path_list[-1]
- return policy_id
-
- def _set_clc_credentials_from_env(self):
- """
- Set the CLC Credentials on the sdk by reading environment variables
- :return: none
- """
- env = os.environ
- v2_api_token = env.get('CLC_V2_API_TOKEN', False)
- v2_api_username = env.get('CLC_V2_API_USERNAME', False)
- v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
- clc_alias = env.get('CLC_ACCT_ALIAS', False)
- api_url = env.get('CLC_V2_API_URL', False)
-
- if api_url:
- self.clc.defaults.ENDPOINT_URL_V2 = api_url
-
- if v2_api_token and clc_alias:
- self.clc._LOGIN_TOKEN_V2 = v2_api_token
- self.clc._V2_ENABLED = True
- self.clc.ALIAS = clc_alias
- elif v2_api_username and v2_api_passwd:
- self.clc.v2.SetCredentials(
- api_username=v2_api_username,
- api_passwd=v2_api_passwd)
- else:
- return self.module.fail_json(
- msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
- "environment variables")
-
- def _ensure_firewall_policy_is_present(
- self,
- source_account_alias,
- location,
- firewall_dict):
- """
- Ensures that a given firewall policy is present
- :param source_account_alias: the source account alias for the firewall policy
- :param location: datacenter of the firewall policy
- :param firewall_dict: dictionary of request parameters for firewall policy
- :return: (changed, firewall_policy_id, firewall_policy)
- changed: flag for if a change occurred
- firewall_policy_id: the firewall policy id that was created/updated
- firewall_policy: The firewall_policy object
- """
- firewall_policy = None
- firewall_policy_id = firewall_dict.get('firewall_policy_id')
-
- if firewall_policy_id is None:
- if not self.module.check_mode:
- response = self._create_firewall_policy(
- source_account_alias,
- location,
- firewall_dict)
- firewall_policy_id = self._get_policy_id_from_response(
- response)
- changed = True
- else:
- firewall_policy = self._get_firewall_policy(
- source_account_alias, location, firewall_policy_id)
- if not firewall_policy:
- return self.module.fail_json(
- msg='Unable to find the firewall policy id : {0}'.format(
- firewall_policy_id))
- changed = self._compare_get_request_with_dict(
- firewall_policy,
- firewall_dict)
- if not self.module.check_mode and changed:
- self._update_firewall_policy(
- source_account_alias,
- location,
- firewall_policy_id,
- firewall_dict)
- if changed and firewall_policy_id:
- firewall_policy = self._wait_for_requests_to_complete(
- source_account_alias,
- location,
- firewall_policy_id)
- return changed, firewall_policy_id, firewall_policy
-
- def _ensure_firewall_policy_is_absent(
- self,
- source_account_alias,
- location,
- firewall_dict):
- """
- Ensures that a given firewall policy is removed if present
- :param source_account_alias: the source account alias for the firewall policy
- :param location: datacenter of the firewall policy
- :param firewall_dict: firewall policy to delete
- :return: (changed, firewall_policy_id, response)
- changed: flag for if a change occurred
- firewall_policy_id: the firewall policy id that was deleted
- response: response from CLC API call
- """
- changed = False
- response = []
- firewall_policy_id = firewall_dict.get('firewall_policy_id')
- result = self._get_firewall_policy(
- source_account_alias, location, firewall_policy_id)
- if result:
- if not self.module.check_mode:
- response = self._delete_firewall_policy(
- source_account_alias,
- location,
- firewall_policy_id)
- changed = True
- return changed, firewall_policy_id, response
-
- def _create_firewall_policy(
- self,
- source_account_alias,
- location,
- firewall_dict):
- """
- Creates the firewall policy for the given account alias
- :param source_account_alias: the source account alias for the firewall policy
- :param location: datacenter of the firewall policy
- :param firewall_dict: dictionary of request parameters for firewall policy
- :return: response from CLC API call
- """
- payload = {
- 'destinationAccount': firewall_dict.get('destination_account_alias'),
- 'source': firewall_dict.get('source'),
- 'destination': firewall_dict.get('destination'),
- 'ports': firewall_dict.get('ports')}
- try:
- response = self.clc.v2.API.Call(
- 'POST', '/v2-experimental/firewallPolicies/%s/%s' %
- (source_account_alias, location), payload)
- except APIFailedResponse as e:
- return self.module.fail_json(
- msg="Unable to create firewall policy. %s" %
- str(e.response_text))
- return response
-
- def _delete_firewall_policy(
- self,
- source_account_alias,
- location,
- firewall_policy_id):
- """
- Deletes a given firewall policy for an account alias in a datacenter
- :param source_account_alias: the source account alias for the firewall policy
- :param location: datacenter of the firewall policy
- :param firewall_policy_id: firewall policy id to delete
- :return: response: response from CLC API call
- """
- try:
- response = self.clc.v2.API.Call(
- 'DELETE', '/v2-experimental/firewallPolicies/%s/%s/%s' %
- (source_account_alias, location, firewall_policy_id))
- except APIFailedResponse as e:
- return self.module.fail_json(
- msg="Unable to delete the firewall policy id : {0}. {1}".format(
- firewall_policy_id, str(e.response_text)))
- return response
-
- def _update_firewall_policy(
- self,
- source_account_alias,
- location,
- firewall_policy_id,
- firewall_dict):
- """
- Updates a firewall policy for a given datacenter and account alias
- :param source_account_alias: the source account alias for the firewall policy
- :param location: datacenter of the firewall policy
- :param firewall_policy_id: firewall policy id to update
- :param firewall_dict: dictionary of request parameters for firewall policy
- :return: response: response from CLC API call
- """
- try:
- response = self.clc.v2.API.Call(
- 'PUT',
- '/v2-experimental/firewallPolicies/%s/%s/%s' %
- (source_account_alias,
- location,
- firewall_policy_id),
- firewall_dict)
- except APIFailedResponse as e:
- return self.module.fail_json(
- msg="Unable to update the firewall policy id : {0}. {1}".format(
- firewall_policy_id, str(e.response_text)))
- return response
-
- @staticmethod
- def _compare_get_request_with_dict(response, firewall_dict):
- """
- Helper method to compare the json response for getting the firewall policy with the request parameters
- :param response: response from the get method
- :param firewall_dict: dictionary of request parameters for firewall policy
- :return: changed: Boolean that returns true if there are differences between
- the response parameters and the playbook parameters
- """
-
- changed = False
-
- response_dest_account_alias = response.get('destinationAccount')
- response_enabled = response.get('enabled')
- response_source = response.get('source')
- response_dest = response.get('destination')
- response_ports = response.get('ports')
- request_dest_account_alias = firewall_dict.get(
- 'destination_account_alias')
- request_enabled = firewall_dict.get('enabled')
- if request_enabled is None:
- request_enabled = True
- request_source = firewall_dict.get('source')
- request_dest = firewall_dict.get('destination')
- request_ports = firewall_dict.get('ports')
-
- if (
- response_dest_account_alias and str(response_dest_account_alias) != str(request_dest_account_alias)) or (
- response_enabled != request_enabled) or (
- response_source and response_source != request_source) or (
- response_dest and response_dest != request_dest) or (
- response_ports and response_ports != request_ports):
- changed = True
- return changed
-
- def _get_firewall_policy(
- self,
- source_account_alias,
- location,
- firewall_policy_id):
- """
- Get back details for a particular firewall policy
- :param source_account_alias: the source account alias for the firewall policy
- :param location: datacenter of the firewall policy
- :param firewall_policy_id: id of the firewall policy to get
- :return: response - The response from CLC API call
- """
- response = None
- try:
- response = self.clc.v2.API.Call(
- 'GET', '/v2-experimental/firewallPolicies/%s/%s/%s' %
- (source_account_alias, location, firewall_policy_id))
- except APIFailedResponse as e:
- if e.response_status_code != 404:
- self.module.fail_json(
- msg="Unable to fetch the firewall policy with id : {0}. {1}".format(
- firewall_policy_id, str(e.response_text)))
- return response
-
- def _wait_for_requests_to_complete(
- self,
- source_account_alias,
- location,
- firewall_policy_id,
- wait_limit=50):
- """
- Waits until the CLC requests are complete if the wait argument is True
- :param source_account_alias: The source account alias for the firewall policy
- :param location: datacenter of the firewall policy
- :param firewall_policy_id: The firewall policy id
- :param wait_limit: The number of times to check the status for completion
- :return: the firewall_policy object
- """
- wait = self.module.params.get('wait')
- count = 0
- firewall_policy = None
- while wait:
- count += 1
- firewall_policy = self._get_firewall_policy(
- source_account_alias, location, firewall_policy_id)
- status = firewall_policy.get('status')
- if status == 'active' or count > wait_limit:
- wait = False
- else:
- # wait for 2 seconds
- sleep(2)
- return firewall_policy
-
- @staticmethod
- def _set_user_agent(clc):
- if hasattr(clc, 'SetRequestsSession'):
- agent_string = "ClcAnsibleModule/" + __version__
- ses = requests.Session()
- ses.headers.update({"Api-Client": agent_string})
- ses.headers['User-Agent'] += " " + agent_string
- clc.SetRequestsSession(ses)
-
-
-def main():
- """
- The main function. Instantiates the module and calls process_request.
- :return: none
- """
- module = AnsibleModule(
- argument_spec=ClcFirewallPolicy._define_module_argument_spec(),
- supports_check_mode=True)
-
- clc_firewall = ClcFirewallPolicy(module)
- clc_firewall.process_request()
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_group.py b/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_group.py
deleted file mode 100644
index 312c6269..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_group.py
+++ /dev/null
@@ -1,514 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-#
-# Copyright (c) 2015 CenturyLink
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
-module: clc_group
-short_description: Create/delete Server Groups at Centurylink Cloud
-description:
- - Create or delete Server Groups at Centurylink Centurylink Cloud
-options:
- name:
- description:
- - The name of the Server Group
- type: str
- required: True
- description:
- description:
- - A description of the Server Group
- type: str
- required: False
- parent:
- description:
- - The parent group of the server group. If parent is not provided, it creates the group at top level.
- type: str
- required: False
- location:
- description:
- - Datacenter to create the group in. If location is not provided, the group gets created in the default datacenter
- associated with the account
- type: str
- required: False
- state:
- description:
- - Whether to create or delete the group
- type: str
- default: present
- choices: ['present', 'absent']
- wait:
- description:
- - Whether to wait for the tasks to finish before returning.
- type: bool
- default: True
- required: False
-requirements:
- - python = 2.7
- - requests >= 2.5.0
- - clc-sdk
-author: "CLC Runner (@clc-runner)"
-notes:
- - To use this module, it is required to set the below environment variables which enables access to the
- Centurylink Cloud
- - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
- - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
- - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
- CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
- - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
- - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
- - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
-'''
-
-EXAMPLES = '''
-
-# Create a Server Group
-
----
-- name: Create Server Group
- hosts: localhost
- gather_facts: False
- connection: local
- tasks:
- - name: Create / Verify a Server Group at CenturyLink Cloud
- community.general.clc_group:
- name: My Cool Server Group
- parent: Default Group
- state: present
- register: clc
-
- - name: Debug
- ansible.builtin.debug:
- var: clc
-
-# Delete a Server Group
-- name: Delete Server Group
- hosts: localhost
- gather_facts: False
- connection: local
- tasks:
- - name: Delete / Verify Absent a Server Group at CenturyLink Cloud
- community.general.clc_group:
- name: My Cool Server Group
- parent: Default Group
- state: absent
- register: clc
-
- - name: Debug
- ansible.builtin.debug:
- var: clc
-'''
-
-RETURN = '''
-group:
- description: The group information
- returned: success
- type: dict
- sample:
- {
- "changeInfo":{
- "createdBy":"service.wfad",
- "createdDate":"2015-07-29T18:52:47Z",
- "modifiedBy":"service.wfad",
- "modifiedDate":"2015-07-29T18:52:47Z"
- },
- "customFields":[
-
- ],
- "description":"test group",
- "groups":[
-
- ],
- "id":"bb5f12a3c6044ae4ad0a03e73ae12cd1",
- "links":[
- {
- "href":"/v2/groups/wfad",
- "rel":"createGroup",
- "verbs":[
- "POST"
- ]
- },
- {
- "href":"/v2/servers/wfad",
- "rel":"createServer",
- "verbs":[
- "POST"
- ]
- },
- {
- "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1",
- "rel":"self",
- "verbs":[
- "GET",
- "PATCH",
- "DELETE"
- ]
- },
- {
- "href":"/v2/groups/wfad/086ac1dfe0b6411989e8d1b77c4065f0",
- "id":"086ac1dfe0b6411989e8d1b77c4065f0",
- "rel":"parentGroup"
- },
- {
- "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/defaults",
- "rel":"defaults",
- "verbs":[
- "GET",
- "POST"
- ]
- },
- {
- "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/billing",
- "rel":"billing"
- },
- {
- "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/archive",
- "rel":"archiveGroupAction"
- },
- {
- "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/statistics",
- "rel":"statistics"
- },
- {
- "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/upcomingScheduledActivities",
- "rel":"upcomingScheduledActivities"
- },
- {
- "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/horizontalAutoscalePolicy",
- "rel":"horizontalAutoscalePolicyMapping",
- "verbs":[
- "GET",
- "PUT",
- "DELETE"
- ]
- },
- {
- "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/scheduledActivities",
- "rel":"scheduledActivities",
- "verbs":[
- "GET",
- "POST"
- ]
- }
- ],
- "locationId":"UC1",
- "name":"test group",
- "status":"active",
- "type":"default"
- }
-'''
-
-__version__ = '${version}'
-
-import os
-import traceback
-
-from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
-
-REQUESTS_IMP_ERR = None
-try:
- import requests
-except ImportError:
- REQUESTS_IMP_ERR = traceback.format_exc()
- REQUESTS_FOUND = False
-else:
- REQUESTS_FOUND = True
-
-#
-# Requires the clc-python-sdk.
-# sudo pip install clc-sdk
-#
-CLC_IMP_ERR = None
-try:
- import clc as clc_sdk
- from clc import CLCException
-except ImportError:
- CLC_IMP_ERR = traceback.format_exc()
- CLC_FOUND = False
- clc_sdk = None
-else:
- CLC_FOUND = True
-
-from ansible.module_utils.basic import AnsibleModule, missing_required_lib
-
-
-class ClcGroup(object):
-
- clc = None
- root_group = None
-
- def __init__(self, module):
- """
- Construct module
- """
- self.clc = clc_sdk
- self.module = module
- self.group_dict = {}
-
- if not CLC_FOUND:
- self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
- if not REQUESTS_FOUND:
- self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
- if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
- self.module.fail_json(
- msg='requests library version should be >= 2.5.0')
-
- self._set_user_agent(self.clc)
-
- def process_request(self):
- """
- Execute the main code path, and handle the request
- :return: none
- """
- location = self.module.params.get('location')
- group_name = self.module.params.get('name')
- parent_name = self.module.params.get('parent')
- group_description = self.module.params.get('description')
- state = self.module.params.get('state')
-
- self._set_clc_credentials_from_env()
- self.group_dict = self._get_group_tree_for_datacenter(
- datacenter=location)
-
- if state == "absent":
- changed, group, requests = self._ensure_group_is_absent(
- group_name=group_name, parent_name=parent_name)
- if requests:
- self._wait_for_requests_to_complete(requests)
- else:
- changed, group = self._ensure_group_is_present(
- group_name=group_name, parent_name=parent_name, group_description=group_description)
- try:
- group = group.data
- except AttributeError:
- group = group_name
- self.module.exit_json(changed=changed, group=group)
-
- @staticmethod
- def _define_module_argument_spec():
- """
- Define the argument spec for the ansible module
- :return: argument spec dictionary
- """
- argument_spec = dict(
- name=dict(required=True),
- description=dict(default=None),
- parent=dict(default=None),
- location=dict(default=None),
- state=dict(default='present', choices=['present', 'absent']),
- wait=dict(type='bool', default=True))
-
- return argument_spec
-
- def _set_clc_credentials_from_env(self):
- """
- Set the CLC Credentials on the sdk by reading environment variables
- :return: none
- """
- env = os.environ
- v2_api_token = env.get('CLC_V2_API_TOKEN', False)
- v2_api_username = env.get('CLC_V2_API_USERNAME', False)
- v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
- clc_alias = env.get('CLC_ACCT_ALIAS', False)
- api_url = env.get('CLC_V2_API_URL', False)
-
- if api_url:
- self.clc.defaults.ENDPOINT_URL_V2 = api_url
-
- if v2_api_token and clc_alias:
- self.clc._LOGIN_TOKEN_V2 = v2_api_token
- self.clc._V2_ENABLED = True
- self.clc.ALIAS = clc_alias
- elif v2_api_username and v2_api_passwd:
- self.clc.v2.SetCredentials(
- api_username=v2_api_username,
- api_passwd=v2_api_passwd)
- else:
- return self.module.fail_json(
- msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
- "environment variables")
-
- def _ensure_group_is_absent(self, group_name, parent_name):
- """
- Ensure that group_name is absent by deleting it if necessary
- :param group_name: string - the name of the clc server group to delete
- :param parent_name: string - the name of the parent group for group_name
- :return: changed, group
- """
- changed = False
- group = []
- results = []
-
- if self._group_exists(group_name=group_name, parent_name=parent_name):
- if not self.module.check_mode:
- group.append(group_name)
- result = self._delete_group(group_name)
- results.append(result)
- changed = True
- return changed, group, results
-
- def _delete_group(self, group_name):
- """
- Delete the provided server group
- :param group_name: string - the server group to delete
- :return: none
- """
- response = None
- group, parent = self.group_dict.get(group_name)
- try:
- response = group.Delete()
- except CLCException as ex:
- self.module.fail_json(msg='Failed to delete group :{0}. {1}'.format(
- group_name, ex.response_text
- ))
- return response
-
- def _ensure_group_is_present(
- self,
- group_name,
- parent_name,
- group_description):
- """
- Checks to see if a server group exists, creates it if it doesn't.
- :param group_name: the name of the group to validate/create
- :param parent_name: the name of the parent group for group_name
- :param group_description: a short description of the server group (used when creating)
- :return: (changed, group) -
- changed: Boolean- whether a change was made,
- group: A clc group object for the group
- """
- if not self.root_group:
- raise AssertionError("Implementation Error: Root Group not set")
- parent = parent_name if parent_name is not None else self.root_group.name
- description = group_description
- changed = False
- group = group_name
-
- parent_exists = self._group_exists(group_name=parent, parent_name=None)
- child_exists = self._group_exists(
- group_name=group_name,
- parent_name=parent)
-
- if parent_exists and child_exists:
- group, parent = self.group_dict[group_name]
- changed = False
- elif parent_exists and not child_exists:
- if not self.module.check_mode:
- group = self._create_group(
- group=group,
- parent=parent,
- description=description)
- changed = True
- else:
- self.module.fail_json(
- msg="parent group: " +
- parent +
- " does not exist")
-
- return changed, group
-
- def _create_group(self, group, parent, description):
- """
- Create the provided server group
- :param group: clc_sdk.Group - the group to create
- :param parent: clc_sdk.Parent - the parent group for {group}
- :param description: string - a text description of the group
- :return: clc_sdk.Group - the created group
- """
- response = None
- (parent, grandparent) = self.group_dict[parent]
- try:
- response = parent.Create(name=group, description=description)
- except CLCException as ex:
- self.module.fail_json(msg='Failed to create group :{0}. {1}'.format(
- group, ex.response_text))
- return response
-
- def _group_exists(self, group_name, parent_name):
- """
- Check to see if a group exists
- :param group_name: string - the group to check
- :param parent_name: string - the parent of group_name
- :return: boolean - whether the group exists
- """
- result = False
- if group_name in self.group_dict:
- (group, parent) = self.group_dict[group_name]
- if parent_name is None or parent_name == parent.name:
- result = True
- return result
-
- def _get_group_tree_for_datacenter(self, datacenter=None):
- """
- Walk the tree of groups for a datacenter
- :param datacenter: string - the datacenter to walk (ex: 'UC1')
- :return: a dictionary of groups and parents
- """
- self.root_group = self.clc.v2.Datacenter(
- location=datacenter).RootGroup()
- return self._walk_groups_recursive(
- parent_group=None,
- child_group=self.root_group)
-
- def _walk_groups_recursive(self, parent_group, child_group):
- """
- Walk a parent-child tree of groups, starting with the provided child group
- :param parent_group: clc_sdk.Group - the parent group to start the walk
- :param child_group: clc_sdk.Group - the child group to start the walk
- :return: a dictionary of groups and parents
- """
- result = {str(child_group): (child_group, parent_group)}
- groups = child_group.Subgroups().groups
- if len(groups) > 0:
- for group in groups:
- if group.type != 'default':
- continue
-
- result.update(self._walk_groups_recursive(child_group, group))
- return result
-
- def _wait_for_requests_to_complete(self, requests_lst):
- """
- Waits until the CLC requests are complete if the wait argument is True
- :param requests_lst: The list of CLC request objects
- :return: none
- """
- if not self.module.params['wait']:
- return
- for request in requests_lst:
- request.WaitUntilComplete()
- for request_details in request.requests:
- if request_details.Status() != 'succeeded':
- self.module.fail_json(
- msg='Unable to process group request')
-
- @staticmethod
- def _set_user_agent(clc):
- if hasattr(clc, 'SetRequestsSession'):
- agent_string = "ClcAnsibleModule/" + __version__
- ses = requests.Session()
- ses.headers.update({"Api-Client": agent_string})
- ses.headers['User-Agent'] += " " + agent_string
- clc.SetRequestsSession(ses)
-
-
-def main():
- """
- The main function. Instantiates the module and calls process_request.
- :return: none
- """
- module = AnsibleModule(
- argument_spec=ClcGroup._define_module_argument_spec(),
- supports_check_mode=True)
-
- clc_group = ClcGroup(module)
- clc_group.process_request()
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_loadbalancer.py b/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_loadbalancer.py
deleted file mode 100644
index 656f4363..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_loadbalancer.py
+++ /dev/null
@@ -1,937 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright (c) 2015 CenturyLink
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
-module: clc_loadbalancer
-short_description: Create, Delete shared loadbalancers in CenturyLink Cloud.
-description:
- - An Ansible module to Create, Delete shared loadbalancers in CenturyLink Cloud.
-options:
- name:
- description:
- - The name of the loadbalancer
- type: str
- required: True
- description:
- description:
- - A description for the loadbalancer
- type: str
- alias:
- description:
- - The alias of your CLC Account
- type: str
- required: True
- location:
- description:
- - The location of the datacenter where the load balancer resides in
- type: str
- required: True
- method:
- description:
- -The balancing method for the load balancer pool
- type: str
- choices: ['leastConnection', 'roundRobin']
- persistence:
- description:
- - The persistence method for the load balancer
- type: str
- choices: ['standard', 'sticky']
- port:
- description:
- - Port to configure on the public-facing side of the load balancer pool
- type: str
- choices: ['80', '443']
- nodes:
- description:
- - A list of nodes that needs to be added to the load balancer pool
- type: list
- default: []
- elements: dict
- status:
- description:
- - The status of the loadbalancer
- type: str
- default: enabled
- choices: ['enabled', 'disabled']
- state:
- description:
- - Whether to create or delete the load balancer pool
- type: str
- default: present
- choices: ['present', 'absent', 'port_absent', 'nodes_present', 'nodes_absent']
-requirements:
- - python = 2.7
- - requests >= 2.5.0
- - clc-sdk
-author: "CLC Runner (@clc-runner)"
-notes:
- - To use this module, it is required to set the below environment variables which enables access to the
- Centurylink Cloud
- - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
- - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
- - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
- CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
- - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
- - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
- - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
-'''
-
-EXAMPLES = '''
-# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
-- name: Create Loadbalancer
- hosts: localhost
- connection: local
- tasks:
- - name: Actually Create things
- community.general.clc_loadbalancer:
- name: test
- description: test
- alias: TEST
- location: WA1
- port: 443
- nodes:
- - ipAddress: 10.11.22.123
- privatePort: 80
- state: present
-
-- name: Add node to an existing loadbalancer pool
- hosts: localhost
- connection: local
- tasks:
- - name: Actually Create things
- community.general.clc_loadbalancer:
- name: test
- description: test
- alias: TEST
- location: WA1
- port: 443
- nodes:
- - ipAddress: 10.11.22.234
- privatePort: 80
- state: nodes_present
-
-- name: Remove node from an existing loadbalancer pool
- hosts: localhost
- connection: local
- tasks:
- - name: Actually Create things
- community.general.clc_loadbalancer:
- name: test
- description: test
- alias: TEST
- location: WA1
- port: 443
- nodes:
- - ipAddress: 10.11.22.234
- privatePort: 80
- state: nodes_absent
-
-- name: Delete LoadbalancerPool
- hosts: localhost
- connection: local
- tasks:
- - name: Actually Delete things
- community.general.clc_loadbalancer:
- name: test
- description: test
- alias: TEST
- location: WA1
- port: 443
- nodes:
- - ipAddress: 10.11.22.123
- privatePort: 80
- state: port_absent
-
-- name: Delete Loadbalancer
- hosts: localhost
- connection: local
- tasks:
- - name: Actually Delete things
- community.general.clc_loadbalancer:
- name: test
- description: test
- alias: TEST
- location: WA1
- port: 443
- nodes:
- - ipAddress: 10.11.22.123
- privatePort: 80
- state: absent
-'''
-
-RETURN = '''
-loadbalancer:
- description: The load balancer result object from CLC
- returned: success
- type: dict
- sample:
- {
- "description":"test-lb",
- "id":"ab5b18cb81e94ab9925b61d1ca043fb5",
- "ipAddress":"66.150.174.197",
- "links":[
- {
- "href":"/v2/sharedLoadBalancers/wfad/wa1/ab5b18cb81e94ab9925b61d1ca043fb5",
- "rel":"self",
- "verbs":[
- "GET",
- "PUT",
- "DELETE"
- ]
- },
- {
- "href":"/v2/sharedLoadBalancers/wfad/wa1/ab5b18cb81e94ab9925b61d1ca043fb5/pools",
- "rel":"pools",
- "verbs":[
- "GET",
- "POST"
- ]
- }
- ],
- "name":"test-lb",
- "pools":[
-
- ],
- "status":"enabled"
- }
-'''
-
-__version__ = '${version}'
-
-import json
-import os
-import traceback
-from time import sleep
-
-from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
-
-REQUESTS_IMP_ERR = None
-try:
- import requests
-except ImportError:
- REQUESTS_IMP_ERR = traceback.format_exc()
- REQUESTS_FOUND = False
-else:
- REQUESTS_FOUND = True
-
-#
-# Requires the clc-python-sdk.
-# sudo pip install clc-sdk
-#
-CLC_IMP_ERR = None
-try:
- import clc as clc_sdk
- from clc import APIFailedResponse
-except ImportError:
- CLC_IMP_ERR = traceback.format_exc()
- CLC_FOUND = False
- clc_sdk = None
-else:
- CLC_FOUND = True
-
-from ansible.module_utils.basic import AnsibleModule, missing_required_lib
-
-
-class ClcLoadBalancer:
-
- clc = None
-
- def __init__(self, module):
- """
- Construct module
- """
- self.clc = clc_sdk
- self.module = module
- self.lb_dict = {}
-
- if not CLC_FOUND:
- self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
- if not REQUESTS_FOUND:
- self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
- if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
- self.module.fail_json(
- msg='requests library version should be >= 2.5.0')
-
- self._set_user_agent(self.clc)
-
- def process_request(self):
- """
- Execute the main code path, and handle the request
- :return: none
- """
- changed = False
- result_lb = None
- loadbalancer_name = self.module.params.get('name')
- loadbalancer_alias = self.module.params.get('alias')
- loadbalancer_location = self.module.params.get('location')
- loadbalancer_description = self.module.params.get('description')
- loadbalancer_port = self.module.params.get('port')
- loadbalancer_method = self.module.params.get('method')
- loadbalancer_persistence = self.module.params.get('persistence')
- loadbalancer_nodes = self.module.params.get('nodes')
- loadbalancer_status = self.module.params.get('status')
- state = self.module.params.get('state')
-
- if loadbalancer_description is None:
- loadbalancer_description = loadbalancer_name
-
- self._set_clc_credentials_from_env()
-
- self.lb_dict = self._get_loadbalancer_list(
- alias=loadbalancer_alias,
- location=loadbalancer_location)
-
- if state == 'present':
- changed, result_lb, lb_id = self.ensure_loadbalancer_present(
- name=loadbalancer_name,
- alias=loadbalancer_alias,
- location=loadbalancer_location,
- description=loadbalancer_description,
- status=loadbalancer_status)
- if loadbalancer_port:
- changed, result_pool, pool_id = self.ensure_loadbalancerpool_present(
- lb_id=lb_id,
- alias=loadbalancer_alias,
- location=loadbalancer_location,
- method=loadbalancer_method,
- persistence=loadbalancer_persistence,
- port=loadbalancer_port)
-
- if loadbalancer_nodes:
- changed, result_nodes = self.ensure_lbpool_nodes_set(
- alias=loadbalancer_alias,
- location=loadbalancer_location,
- name=loadbalancer_name,
- port=loadbalancer_port,
- nodes=loadbalancer_nodes)
- elif state == 'absent':
- changed, result_lb = self.ensure_loadbalancer_absent(
- name=loadbalancer_name,
- alias=loadbalancer_alias,
- location=loadbalancer_location)
-
- elif state == 'port_absent':
- changed, result_lb = self.ensure_loadbalancerpool_absent(
- alias=loadbalancer_alias,
- location=loadbalancer_location,
- name=loadbalancer_name,
- port=loadbalancer_port)
-
- elif state == 'nodes_present':
- changed, result_lb = self.ensure_lbpool_nodes_present(
- alias=loadbalancer_alias,
- location=loadbalancer_location,
- name=loadbalancer_name,
- port=loadbalancer_port,
- nodes=loadbalancer_nodes)
-
- elif state == 'nodes_absent':
- changed, result_lb = self.ensure_lbpool_nodes_absent(
- alias=loadbalancer_alias,
- location=loadbalancer_location,
- name=loadbalancer_name,
- port=loadbalancer_port,
- nodes=loadbalancer_nodes)
-
- self.module.exit_json(changed=changed, loadbalancer=result_lb)
-
- def ensure_loadbalancer_present(
- self, name, alias, location, description, status):
- """
- Checks to see if a load balancer exists and creates one if it does not.
- :param name: Name of loadbalancer
- :param alias: Alias of account
- :param location: Datacenter
- :param description: Description of loadbalancer
- :param status: Enabled / Disabled
- :return: (changed, result, lb_id)
- changed: Boolean whether a change was made
- result: The result object from the CLC load balancer request
- lb_id: The load balancer id
- """
- changed = False
- result = name
- lb_id = self._loadbalancer_exists(name=name)
- if not lb_id:
- if not self.module.check_mode:
- result = self.create_loadbalancer(name=name,
- alias=alias,
- location=location,
- description=description,
- status=status)
- lb_id = result.get('id')
- changed = True
-
- return changed, result, lb_id
-
- def ensure_loadbalancerpool_present(
- self, lb_id, alias, location, method, persistence, port):
- """
- Checks to see if a load balancer pool exists and creates one if it does not.
- :param lb_id: The loadbalancer id
- :param alias: The account alias
- :param location: the datacenter the load balancer resides in
- :param method: the load balancing method
- :param persistence: the load balancing persistence type
- :param port: the port that the load balancer will listen on
- :return: (changed, group, pool_id) -
- changed: Boolean whether a change was made
- result: The result from the CLC API call
- pool_id: The string id of the load balancer pool
- """
- changed = False
- result = port
- if not lb_id:
- return changed, None, None
- pool_id = self._loadbalancerpool_exists(
- alias=alias,
- location=location,
- port=port,
- lb_id=lb_id)
- if not pool_id:
- if not self.module.check_mode:
- result = self.create_loadbalancerpool(
- alias=alias,
- location=location,
- lb_id=lb_id,
- method=method,
- persistence=persistence,
- port=port)
- pool_id = result.get('id')
- changed = True
-
- return changed, result, pool_id
-
- def ensure_loadbalancer_absent(self, name, alias, location):
- """
- Checks to see if a load balancer exists and deletes it if it does
- :param name: Name of the load balancer
- :param alias: Alias of account
- :param location: Datacenter
- :return: (changed, result)
- changed: Boolean whether a change was made
- result: The result from the CLC API Call
- """
- changed = False
- result = name
- lb_exists = self._loadbalancer_exists(name=name)
- if lb_exists:
- if not self.module.check_mode:
- result = self.delete_loadbalancer(alias=alias,
- location=location,
- name=name)
- changed = True
- return changed, result
-
- def ensure_loadbalancerpool_absent(self, alias, location, name, port):
- """
- Checks to see if a load balancer pool exists and deletes it if it does
- :param alias: The account alias
- :param location: the datacenter the load balancer resides in
- :param name: the name of the load balancer
- :param port: the port that the load balancer listens on
- :return: (changed, result) -
- changed: Boolean whether a change was made
- result: The result from the CLC API call
- """
- changed = False
- result = None
- lb_exists = self._loadbalancer_exists(name=name)
- if lb_exists:
- lb_id = self._get_loadbalancer_id(name=name)
- pool_id = self._loadbalancerpool_exists(
- alias=alias,
- location=location,
- port=port,
- lb_id=lb_id)
- if pool_id:
- changed = True
- if not self.module.check_mode:
- result = self.delete_loadbalancerpool(
- alias=alias,
- location=location,
- lb_id=lb_id,
- pool_id=pool_id)
- else:
- result = "Pool doesn't exist"
- else:
- result = "LB Doesn't Exist"
- return changed, result
-
- def ensure_lbpool_nodes_set(self, alias, location, name, port, nodes):
- """
- Checks to see if the provided list of nodes exist for the pool
- and set the nodes if any in the list those doesn't exist
- :param alias: The account alias
- :param location: the datacenter the load balancer resides in
- :param name: the name of the load balancer
- :param port: the port that the load balancer will listen on
- :param nodes: The list of nodes to be updated to the pool
- :return: (changed, result) -
- changed: Boolean whether a change was made
- result: The result from the CLC API call
- """
- result = {}
- changed = False
- lb_exists = self._loadbalancer_exists(name=name)
- if lb_exists:
- lb_id = self._get_loadbalancer_id(name=name)
- pool_id = self._loadbalancerpool_exists(
- alias=alias,
- location=location,
- port=port,
- lb_id=lb_id)
- if pool_id:
- nodes_exist = self._loadbalancerpool_nodes_exists(alias=alias,
- location=location,
- lb_id=lb_id,
- pool_id=pool_id,
- nodes_to_check=nodes)
- if not nodes_exist:
- changed = True
- result = self.set_loadbalancernodes(alias=alias,
- location=location,
- lb_id=lb_id,
- pool_id=pool_id,
- nodes=nodes)
- else:
- result = "Pool doesn't exist"
- else:
- result = "Load balancer doesn't Exist"
- return changed, result
-
- def ensure_lbpool_nodes_present(self, alias, location, name, port, nodes):
- """
- Checks to see if the provided list of nodes exist for the pool and add the missing nodes to the pool
- :param alias: The account alias
- :param location: the datacenter the load balancer resides in
- :param name: the name of the load balancer
- :param port: the port that the load balancer will listen on
- :param nodes: the list of nodes to be added
- :return: (changed, result) -
- changed: Boolean whether a change was made
- result: The result from the CLC API call
- """
- changed = False
- lb_exists = self._loadbalancer_exists(name=name)
- if lb_exists:
- lb_id = self._get_loadbalancer_id(name=name)
- pool_id = self._loadbalancerpool_exists(
- alias=alias,
- location=location,
- port=port,
- lb_id=lb_id)
- if pool_id:
- changed, result = self.add_lbpool_nodes(alias=alias,
- location=location,
- lb_id=lb_id,
- pool_id=pool_id,
- nodes_to_add=nodes)
- else:
- result = "Pool doesn't exist"
- else:
- result = "Load balancer doesn't Exist"
- return changed, result
-
- def ensure_lbpool_nodes_absent(self, alias, location, name, port, nodes):
- """
- Checks to see if the provided list of nodes exist for the pool and removes them if found any
- :param alias: The account alias
- :param location: the datacenter the load balancer resides in
- :param name: the name of the load balancer
- :param port: the port that the load balancer will listen on
- :param nodes: the list of nodes to be removed
- :return: (changed, result) -
- changed: Boolean whether a change was made
- result: The result from the CLC API call
- """
- changed = False
- lb_exists = self._loadbalancer_exists(name=name)
- if lb_exists:
- lb_id = self._get_loadbalancer_id(name=name)
- pool_id = self._loadbalancerpool_exists(
- alias=alias,
- location=location,
- port=port,
- lb_id=lb_id)
- if pool_id:
- changed, result = self.remove_lbpool_nodes(alias=alias,
- location=location,
- lb_id=lb_id,
- pool_id=pool_id,
- nodes_to_remove=nodes)
- else:
- result = "Pool doesn't exist"
- else:
- result = "Load balancer doesn't Exist"
- return changed, result
-
- def create_loadbalancer(self, name, alias, location, description, status):
- """
- Create a loadbalancer w/ params
- :param name: Name of loadbalancer
- :param alias: Alias of account
- :param location: Datacenter
- :param description: Description for loadbalancer to be created
- :param status: Enabled / Disabled
- :return: result: The result from the CLC API call
- """
- result = None
- try:
- result = self.clc.v2.API.Call('POST',
- '/v2/sharedLoadBalancers/%s/%s' % (alias,
- location),
- json.dumps({"name": name,
- "description": description,
- "status": status}))
- sleep(1)
- except APIFailedResponse as e:
- self.module.fail_json(
- msg='Unable to create load balancer "{0}". {1}'.format(
- name, str(e.response_text)))
- return result
-
- def create_loadbalancerpool(
- self, alias, location, lb_id, method, persistence, port):
- """
- Creates a pool on the provided load balancer
- :param alias: the account alias
- :param location: the datacenter the load balancer resides in
- :param lb_id: the id string of the load balancer
- :param method: the load balancing method
- :param persistence: the load balancing persistence type
- :param port: the port that the load balancer will listen on
- :return: result: The result from the create API call
- """
- result = None
- try:
- result = self.clc.v2.API.Call(
- 'POST', '/v2/sharedLoadBalancers/%s/%s/%s/pools' %
- (alias, location, lb_id), json.dumps(
- {
- "port": port, "method": method, "persistence": persistence
- }))
- except APIFailedResponse as e:
- self.module.fail_json(
- msg='Unable to create pool for load balancer id "{0}". {1}'.format(
- lb_id, str(e.response_text)))
- return result
-
- def delete_loadbalancer(self, alias, location, name):
- """
- Delete CLC loadbalancer
- :param alias: Alias for account
- :param location: Datacenter
- :param name: Name of the loadbalancer to delete
- :return: result: The result from the CLC API call
- """
- result = None
- lb_id = self._get_loadbalancer_id(name=name)
- try:
- result = self.clc.v2.API.Call(
- 'DELETE', '/v2/sharedLoadBalancers/%s/%s/%s' %
- (alias, location, lb_id))
- except APIFailedResponse as e:
- self.module.fail_json(
- msg='Unable to delete load balancer "{0}". {1}'.format(
- name, str(e.response_text)))
- return result
-
- def delete_loadbalancerpool(self, alias, location, lb_id, pool_id):
- """
- Delete the pool on the provided load balancer
- :param alias: The account alias
- :param location: the datacenter the load balancer resides in
- :param lb_id: the id string of the load balancer
- :param pool_id: the id string of the load balancer pool
- :return: result: The result from the delete API call
- """
- result = None
- try:
- result = self.clc.v2.API.Call(
- 'DELETE', '/v2/sharedLoadBalancers/%s/%s/%s/pools/%s' %
- (alias, location, lb_id, pool_id))
- except APIFailedResponse as e:
- self.module.fail_json(
- msg='Unable to delete pool for load balancer id "{0}". {1}'.format(
- lb_id, str(e.response_text)))
- return result
-
- def _get_loadbalancer_id(self, name):
- """
- Retrieves unique ID of loadbalancer
- :param name: Name of loadbalancer
- :return: Unique ID of the loadbalancer
- """
- id = None
- for lb in self.lb_dict:
- if lb.get('name') == name:
- id = lb.get('id')
- return id
-
- def _get_loadbalancer_list(self, alias, location):
- """
- Retrieve a list of loadbalancers
- :param alias: Alias for account
- :param location: Datacenter
- :return: JSON data for all loadbalancers at datacenter
- """
- result = None
- try:
- result = self.clc.v2.API.Call(
- 'GET', '/v2/sharedLoadBalancers/%s/%s' % (alias, location))
- except APIFailedResponse as e:
- self.module.fail_json(
- msg='Unable to fetch load balancers for account: {0}. {1}'.format(
- alias, str(e.response_text)))
- return result
-
- def _loadbalancer_exists(self, name):
- """
- Verify a loadbalancer exists
- :param name: Name of loadbalancer
- :return: False or the ID of the existing loadbalancer
- """
- result = False
-
- for lb in self.lb_dict:
- if lb.get('name') == name:
- result = lb.get('id')
- return result
-
- def _loadbalancerpool_exists(self, alias, location, port, lb_id):
- """
- Checks to see if a pool exists on the specified port on the provided load balancer
- :param alias: the account alias
- :param location: the datacenter the load balancer resides in
- :param port: the port to check and see if it exists
- :param lb_id: the id string of the provided load balancer
- :return: result: The id string of the pool or False
- """
- result = False
- try:
- pool_list = self.clc.v2.API.Call(
- 'GET', '/v2/sharedLoadBalancers/%s/%s/%s/pools' %
- (alias, location, lb_id))
- except APIFailedResponse as e:
- return self.module.fail_json(
- msg='Unable to fetch the load balancer pools for for load balancer id: {0}. {1}'.format(
- lb_id, str(e.response_text)))
- for pool in pool_list:
- if int(pool.get('port')) == int(port):
- result = pool.get('id')
- return result
-
- def _loadbalancerpool_nodes_exists(
- self, alias, location, lb_id, pool_id, nodes_to_check):
- """
- Checks to see if a set of nodes exists on the specified port on the provided load balancer
- :param alias: the account alias
- :param location: the datacenter the load balancer resides in
- :param lb_id: the id string of the provided load balancer
- :param pool_id: the id string of the load balancer pool
- :param nodes_to_check: the list of nodes to check for
- :return: result: True / False indicating if the given nodes exist
- """
- result = False
- nodes = self._get_lbpool_nodes(alias, location, lb_id, pool_id)
- for node in nodes_to_check:
- if not node.get('status'):
- node['status'] = 'enabled'
- if node in nodes:
- result = True
- else:
- result = False
- return result
-
- def set_loadbalancernodes(self, alias, location, lb_id, pool_id, nodes):
- """
- Updates nodes to the provided pool
- :param alias: the account alias
- :param location: the datacenter the load balancer resides in
- :param lb_id: the id string of the load balancer
- :param pool_id: the id string of the pool
- :param nodes: a list of dictionaries containing the nodes to set
- :return: result: The result from the CLC API call
- """
- result = None
- if not lb_id:
- return result
- if not self.module.check_mode:
- try:
- result = self.clc.v2.API.Call('PUT',
- '/v2/sharedLoadBalancers/%s/%s/%s/pools/%s/nodes'
- % (alias, location, lb_id, pool_id), json.dumps(nodes))
- except APIFailedResponse as e:
- self.module.fail_json(
- msg='Unable to set nodes for the load balancer pool id "{0}". {1}'.format(
- pool_id, str(e.response_text)))
- return result
-
- def add_lbpool_nodes(self, alias, location, lb_id, pool_id, nodes_to_add):
- """
- Add nodes to the provided pool
- :param alias: the account alias
- :param location: the datacenter the load balancer resides in
- :param lb_id: the id string of the load balancer
- :param pool_id: the id string of the pool
- :param nodes_to_add: a list of dictionaries containing the nodes to add
- :return: (changed, result) -
- changed: Boolean whether a change was made
- result: The result from the CLC API call
- """
- changed = False
- result = {}
- nodes = self._get_lbpool_nodes(alias, location, lb_id, pool_id)
- for node in nodes_to_add:
- if not node.get('status'):
- node['status'] = 'enabled'
- if node not in nodes:
- changed = True
- nodes.append(node)
- if changed is True and not self.module.check_mode:
- result = self.set_loadbalancernodes(
- alias,
- location,
- lb_id,
- pool_id,
- nodes)
- return changed, result
-
- def remove_lbpool_nodes(
- self, alias, location, lb_id, pool_id, nodes_to_remove):
- """
- Removes nodes from the provided pool
- :param alias: the account alias
- :param location: the datacenter the load balancer resides in
- :param lb_id: the id string of the load balancer
- :param pool_id: the id string of the pool
- :param nodes_to_remove: a list of dictionaries containing the nodes to remove
- :return: (changed, result) -
- changed: Boolean whether a change was made
- result: The result from the CLC API call
- """
- changed = False
- result = {}
- nodes = self._get_lbpool_nodes(alias, location, lb_id, pool_id)
- for node in nodes_to_remove:
- if not node.get('status'):
- node['status'] = 'enabled'
- if node in nodes:
- changed = True
- nodes.remove(node)
- if changed is True and not self.module.check_mode:
- result = self.set_loadbalancernodes(
- alias,
- location,
- lb_id,
- pool_id,
- nodes)
- return changed, result
-
- def _get_lbpool_nodes(self, alias, location, lb_id, pool_id):
- """
- Return the list of nodes available to the provided load balancer pool
- :param alias: the account alias
- :param location: the datacenter the load balancer resides in
- :param lb_id: the id string of the load balancer
- :param pool_id: the id string of the pool
- :return: result: The list of nodes
- """
- result = None
- try:
- result = self.clc.v2.API.Call('GET',
- '/v2/sharedLoadBalancers/%s/%s/%s/pools/%s/nodes'
- % (alias, location, lb_id, pool_id))
- except APIFailedResponse as e:
- self.module.fail_json(
- msg='Unable to fetch list of available nodes for load balancer pool id: {0}. {1}'.format(
- pool_id, str(e.response_text)))
- return result
-
- @staticmethod
- def define_argument_spec():
- """
- Define the argument spec for the ansible module
- :return: argument spec dictionary
- """
- argument_spec = dict(
- name=dict(required=True),
- description=dict(default=None),
- location=dict(required=True),
- alias=dict(required=True),
- port=dict(choices=[80, 443]),
- method=dict(choices=['leastConnection', 'roundRobin']),
- persistence=dict(choices=['standard', 'sticky']),
- nodes=dict(type='list', default=[], elements='dict'),
- status=dict(default='enabled', choices=['enabled', 'disabled']),
- state=dict(
- default='present',
- choices=[
- 'present',
- 'absent',
- 'port_absent',
- 'nodes_present',
- 'nodes_absent'])
- )
- return argument_spec
-
- def _set_clc_credentials_from_env(self):
- """
- Set the CLC Credentials on the sdk by reading environment variables
- :return: none
- """
- env = os.environ
- v2_api_token = env.get('CLC_V2_API_TOKEN', False)
- v2_api_username = env.get('CLC_V2_API_USERNAME', False)
- v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
- clc_alias = env.get('CLC_ACCT_ALIAS', False)
- api_url = env.get('CLC_V2_API_URL', False)
-
- if api_url:
- self.clc.defaults.ENDPOINT_URL_V2 = api_url
-
- if v2_api_token and clc_alias:
- self.clc._LOGIN_TOKEN_V2 = v2_api_token
- self.clc._V2_ENABLED = True
- self.clc.ALIAS = clc_alias
- elif v2_api_username and v2_api_passwd:
- self.clc.v2.SetCredentials(
- api_username=v2_api_username,
- api_passwd=v2_api_passwd)
- else:
- return self.module.fail_json(
- msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
- "environment variables")
-
- @staticmethod
- def _set_user_agent(clc):
- if hasattr(clc, 'SetRequestsSession'):
- agent_string = "ClcAnsibleModule/" + __version__
- ses = requests.Session()
- ses.headers.update({"Api-Client": agent_string})
- ses.headers['User-Agent'] += " " + agent_string
- clc.SetRequestsSession(ses)
-
-
-def main():
- """
- The main function. Instantiates the module and calls process_request.
- :return: none
- """
- module = AnsibleModule(argument_spec=ClcLoadBalancer.define_argument_spec(),
- supports_check_mode=True)
- clc_loadbalancer = ClcLoadBalancer(module)
- clc_loadbalancer.process_request()
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_modify_server.py b/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_modify_server.py
deleted file mode 100644
index 27cdf614..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_modify_server.py
+++ /dev/null
@@ -1,967 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright (c) 2015 CenturyLink
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
-module: clc_modify_server
-short_description: modify servers in CenturyLink Cloud.
-description:
- - An Ansible module to modify servers in CenturyLink Cloud.
-options:
- server_ids:
- description:
- - A list of server Ids to modify.
- type: list
- required: True
- elements: str
- cpu:
- description:
- - How many CPUs to update on the server
- type: str
- memory:
- description:
- - Memory (in GB) to set to the server.
- type: str
- anti_affinity_policy_id:
- description:
- - The anti affinity policy id to be set for a hyper scale server.
- This is mutually exclusive with 'anti_affinity_policy_name'
- type: str
- anti_affinity_policy_name:
- description:
- - The anti affinity policy name to be set for a hyper scale server.
- This is mutually exclusive with 'anti_affinity_policy_id'
- type: str
- alert_policy_id:
- description:
- - The alert policy id to be associated to the server.
- This is mutually exclusive with 'alert_policy_name'
- type: str
- alert_policy_name:
- description:
- - The alert policy name to be associated to the server.
- This is mutually exclusive with 'alert_policy_id'
- type: str
- state:
- description:
- - The state to insure that the provided resources are in.
- type: str
- default: 'present'
- choices: ['present', 'absent']
- wait:
- description:
- - Whether to wait for the provisioning tasks to finish before returning.
- type: bool
- default: 'yes'
-requirements:
- - python = 2.7
- - requests >= 2.5.0
- - clc-sdk
-author: "CLC Runner (@clc-runner)"
-notes:
- - To use this module, it is required to set the below environment variables which enables access to the
- Centurylink Cloud
- - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
- - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
- - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
- CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
- - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
- - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
- - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
-'''
-
-EXAMPLES = '''
-# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
-
-- name: Set the cpu count to 4 on a server
- community.general.clc_modify_server:
- server_ids:
- - UC1TESTSVR01
- - UC1TESTSVR02
- cpu: 4
- state: present
-
-- name: Set the memory to 8GB on a server
- community.general.clc_modify_server:
- server_ids:
- - UC1TESTSVR01
- - UC1TESTSVR02
- memory: 8
- state: present
-
-- name: Set the anti affinity policy on a server
- community.general.clc_modify_server:
- server_ids:
- - UC1TESTSVR01
- - UC1TESTSVR02
- anti_affinity_policy_name: 'aa_policy'
- state: present
-
-- name: Remove the anti affinity policy on a server
- community.general.clc_modify_server:
- server_ids:
- - UC1TESTSVR01
- - UC1TESTSVR02
- anti_affinity_policy_name: 'aa_policy'
- state: absent
-
-- name: Add the alert policy on a server
- community.general.clc_modify_server:
- server_ids:
- - UC1TESTSVR01
- - UC1TESTSVR02
- alert_policy_name: 'alert_policy'
- state: present
-
-- name: Remove the alert policy on a server
- community.general.clc_modify_server:
- server_ids:
- - UC1TESTSVR01
- - UC1TESTSVR02
- alert_policy_name: 'alert_policy'
- state: absent
-
-- name: Ret the memory to 16GB and cpu to 8 core on a lust if servers
- community.general.clc_modify_server:
- server_ids:
- - UC1TESTSVR01
- - UC1TESTSVR02
- cpu: 8
- memory: 16
- state: present
-'''
-
-RETURN = '''
-server_ids:
- description: The list of server ids that are changed
- returned: success
- type: list
- sample:
- [
- "UC1TEST-SVR01",
- "UC1TEST-SVR02"
- ]
-servers:
- description: The list of server objects that are changed
- returned: success
- type: list
- sample:
- [
- {
- "changeInfo":{
- "createdBy":"service.wfad",
- "createdDate":1438196820,
- "modifiedBy":"service.wfad",
- "modifiedDate":1438196820
- },
- "description":"test-server",
- "details":{
- "alertPolicies":[
-
- ],
- "cpu":1,
- "customFields":[
-
- ],
- "diskCount":3,
- "disks":[
- {
- "id":"0:0",
- "partitionPaths":[
-
- ],
- "sizeGB":1
- },
- {
- "id":"0:1",
- "partitionPaths":[
-
- ],
- "sizeGB":2
- },
- {
- "id":"0:2",
- "partitionPaths":[
-
- ],
- "sizeGB":14
- }
- ],
- "hostName":"",
- "inMaintenanceMode":false,
- "ipAddresses":[
- {
- "internal":"10.1.1.1"
- }
- ],
- "memoryGB":1,
- "memoryMB":1024,
- "partitions":[
-
- ],
- "powerState":"started",
- "snapshots":[
-
- ],
- "storageGB":17
- },
- "groupId":"086ac1dfe0b6411989e8d1b77c4065f0",
- "id":"test-server",
- "ipaddress":"10.120.45.23",
- "isTemplate":false,
- "links":[
- {
- "href":"/v2/servers/wfad/test-server",
- "id":"test-server",
- "rel":"self",
- "verbs":[
- "GET",
- "PATCH",
- "DELETE"
- ]
- },
- {
- "href":"/v2/groups/wfad/086ac1dfe0b6411989e8d1b77c4065f0",
- "id":"086ac1dfe0b6411989e8d1b77c4065f0",
- "rel":"group"
- },
- {
- "href":"/v2/accounts/wfad",
- "id":"wfad",
- "rel":"account"
- },
- {
- "href":"/v2/billing/wfad/serverPricing/test-server",
- "rel":"billing"
- },
- {
- "href":"/v2/servers/wfad/test-server/publicIPAddresses",
- "rel":"publicIPAddresses",
- "verbs":[
- "POST"
- ]
- },
- {
- "href":"/v2/servers/wfad/test-server/credentials",
- "rel":"credentials"
- },
- {
- "href":"/v2/servers/wfad/test-server/statistics",
- "rel":"statistics"
- },
- {
- "href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/upcomingScheduledActivities",
- "rel":"upcomingScheduledActivities"
- },
- {
- "href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/scheduledActivities",
- "rel":"scheduledActivities",
- "verbs":[
- "GET",
- "POST"
- ]
- },
- {
- "href":"/v2/servers/wfad/test-server/capabilities",
- "rel":"capabilities"
- },
- {
- "href":"/v2/servers/wfad/test-server/alertPolicies",
- "rel":"alertPolicyMappings",
- "verbs":[
- "POST"
- ]
- },
- {
- "href":"/v2/servers/wfad/test-server/antiAffinityPolicy",
- "rel":"antiAffinityPolicyMapping",
- "verbs":[
- "PUT",
- "DELETE"
- ]
- },
- {
- "href":"/v2/servers/wfad/test-server/cpuAutoscalePolicy",
- "rel":"cpuAutoscalePolicyMapping",
- "verbs":[
- "PUT",
- "DELETE"
- ]
- }
- ],
- "locationId":"UC1",
- "name":"test-server",
- "os":"ubuntu14_64Bit",
- "osType":"Ubuntu 14 64-bit",
- "status":"active",
- "storageType":"standard",
- "type":"standard"
- }
- ]
-'''
-
-__version__ = '${version}'
-
-import json
-import os
-import traceback
-
-from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
-
-REQUESTS_IMP_ERR = None
-try:
- import requests
-except ImportError:
- REQUESTS_IMP_ERR = traceback.format_exc()
- REQUESTS_FOUND = False
-else:
- REQUESTS_FOUND = True
-
-#
-# Requires the clc-python-sdk.
-# sudo pip install clc-sdk
-#
-CLC_IMP_ERR = None
-try:
- import clc as clc_sdk
- from clc import CLCException
- from clc import APIFailedResponse
-except ImportError:
- CLC_IMP_ERR = traceback.format_exc()
- CLC_FOUND = False
- clc_sdk = None
-else:
- CLC_FOUND = True
-
-from ansible.module_utils.basic import AnsibleModule, missing_required_lib
-
-
-class ClcModifyServer:
- clc = clc_sdk
-
- def __init__(self, module):
- """
- Construct module
- """
- self.clc = clc_sdk
- self.module = module
-
- if not CLC_FOUND:
- self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
- if not REQUESTS_FOUND:
- self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
- if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
- self.module.fail_json(
- msg='requests library version should be >= 2.5.0')
-
- self._set_user_agent(self.clc)
-
- def process_request(self):
- """
- Process the request - Main Code Path
- :return: Returns with either an exit_json or fail_json
- """
- self._set_clc_credentials_from_env()
-
- p = self.module.params
- cpu = p.get('cpu')
- memory = p.get('memory')
- state = p.get('state')
- if state == 'absent' and (cpu or memory):
- return self.module.fail_json(
- msg='\'absent\' state is not supported for \'cpu\' and \'memory\' arguments')
-
- server_ids = p['server_ids']
- if not isinstance(server_ids, list):
- return self.module.fail_json(
- msg='server_ids needs to be a list of instances to modify: %s' %
- server_ids)
-
- (changed, server_dict_array, changed_server_ids) = self._modify_servers(
- server_ids=server_ids)
-
- self.module.exit_json(
- changed=changed,
- server_ids=changed_server_ids,
- servers=server_dict_array)
-
- @staticmethod
- def _define_module_argument_spec():
- """
- Define the argument spec for the ansible module
- :return: argument spec dictionary
- """
- argument_spec = dict(
- server_ids=dict(type='list', required=True, elements='str'),
- state=dict(default='present', choices=['present', 'absent']),
- cpu=dict(),
- memory=dict(),
- anti_affinity_policy_id=dict(),
- anti_affinity_policy_name=dict(),
- alert_policy_id=dict(),
- alert_policy_name=dict(),
- wait=dict(type='bool', default=True)
- )
- mutually_exclusive = [
- ['anti_affinity_policy_id', 'anti_affinity_policy_name'],
- ['alert_policy_id', 'alert_policy_name']
- ]
- return {"argument_spec": argument_spec,
- "mutually_exclusive": mutually_exclusive}
-
- def _set_clc_credentials_from_env(self):
- """
- Set the CLC Credentials on the sdk by reading environment variables
- :return: none
- """
- env = os.environ
- v2_api_token = env.get('CLC_V2_API_TOKEN', False)
- v2_api_username = env.get('CLC_V2_API_USERNAME', False)
- v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
- clc_alias = env.get('CLC_ACCT_ALIAS', False)
- api_url = env.get('CLC_V2_API_URL', False)
-
- if api_url:
- self.clc.defaults.ENDPOINT_URL_V2 = api_url
-
- if v2_api_token and clc_alias:
- self.clc._LOGIN_TOKEN_V2 = v2_api_token
- self.clc._V2_ENABLED = True
- self.clc.ALIAS = clc_alias
- elif v2_api_username and v2_api_passwd:
- self.clc.v2.SetCredentials(
- api_username=v2_api_username,
- api_passwd=v2_api_passwd)
- else:
- return self.module.fail_json(
- msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
- "environment variables")
-
- def _get_servers_from_clc(self, server_list, message):
- """
- Internal function to fetch list of CLC server objects from a list of server ids
- :param server_list: The list of server ids
- :param message: the error message to throw in case of any error
- :return the list of CLC server objects
- """
- try:
- return self.clc.v2.Servers(server_list).servers
- except CLCException as ex:
- return self.module.fail_json(msg=message + ': %s' % ex.message)
-
- def _modify_servers(self, server_ids):
- """
- modify the servers configuration on the provided list
- :param server_ids: list of servers to modify
- :return: a list of dictionaries with server information about the servers that were modified
- """
- p = self.module.params
- state = p.get('state')
- server_params = {
- 'cpu': p.get('cpu'),
- 'memory': p.get('memory'),
- 'anti_affinity_policy_id': p.get('anti_affinity_policy_id'),
- 'anti_affinity_policy_name': p.get('anti_affinity_policy_name'),
- 'alert_policy_id': p.get('alert_policy_id'),
- 'alert_policy_name': p.get('alert_policy_name'),
- }
- changed = False
- server_changed = False
- aa_changed = False
- ap_changed = False
- server_dict_array = []
- result_server_ids = []
- request_list = []
- changed_servers = []
-
- if not isinstance(server_ids, list) or len(server_ids) < 1:
- return self.module.fail_json(
- msg='server_ids should be a list of servers, aborting')
-
- servers = self._get_servers_from_clc(
- server_ids,
- 'Failed to obtain server list from the CLC API')
- for server in servers:
- if state == 'present':
- server_changed, server_result = self._ensure_server_config(
- server, server_params)
- if server_result:
- request_list.append(server_result)
- aa_changed = self._ensure_aa_policy_present(
- server,
- server_params)
- ap_changed = self._ensure_alert_policy_present(
- server,
- server_params)
- elif state == 'absent':
- aa_changed = self._ensure_aa_policy_absent(
- server,
- server_params)
- ap_changed = self._ensure_alert_policy_absent(
- server,
- server_params)
- if server_changed or aa_changed or ap_changed:
- changed_servers.append(server)
- changed = True
-
- self._wait_for_requests(self.module, request_list)
- self._refresh_servers(self.module, changed_servers)
-
- for server in changed_servers:
- server_dict_array.append(server.data)
- result_server_ids.append(server.id)
-
- return changed, server_dict_array, result_server_ids
-
- def _ensure_server_config(
- self, server, server_params):
- """
- ensures the server is updated with the provided cpu and memory
- :param server: the CLC server object
- :param server_params: the dictionary of server parameters
- :return: (changed, group) -
- changed: Boolean whether a change was made
- result: The result from the CLC API call
- """
- cpu = server_params.get('cpu')
- memory = server_params.get('memory')
- changed = False
- result = None
-
- if not cpu:
- cpu = server.cpu
- if not memory:
- memory = server.memory
- if memory != server.memory or cpu != server.cpu:
- if not self.module.check_mode:
- result = self._modify_clc_server(
- self.clc,
- self.module,
- server.id,
- cpu,
- memory)
- changed = True
- return changed, result
-
- @staticmethod
- def _modify_clc_server(clc, module, server_id, cpu, memory):
- """
- Modify the memory or CPU of a clc server.
- :param clc: the clc-sdk instance to use
- :param module: the AnsibleModule object
- :param server_id: id of the server to modify
- :param cpu: the new cpu value
- :param memory: the new memory value
- :return: the result of CLC API call
- """
- result = None
- acct_alias = clc.v2.Account.GetAlias()
- try:
- # Update the server configuration
- job_obj = clc.v2.API.Call('PATCH',
- 'servers/%s/%s' % (acct_alias,
- server_id),
- json.dumps([{"op": "set",
- "member": "memory",
- "value": memory},
- {"op": "set",
- "member": "cpu",
- "value": cpu}]))
- result = clc.v2.Requests(job_obj)
- except APIFailedResponse as ex:
- module.fail_json(
- msg='Unable to update the server configuration for server : "{0}". {1}'.format(
- server_id, str(ex.response_text)))
- return result
-
- @staticmethod
- def _wait_for_requests(module, request_list):
- """
- Block until server provisioning requests are completed.
- :param module: the AnsibleModule object
- :param request_list: a list of clc-sdk.Request instances
- :return: none
- """
- wait = module.params.get('wait')
- if wait:
- # Requests.WaitUntilComplete() returns the count of failed requests
- failed_requests_count = sum(
- [request.WaitUntilComplete() for request in request_list])
-
- if failed_requests_count > 0:
- module.fail_json(
- msg='Unable to process modify server request')
-
- @staticmethod
- def _refresh_servers(module, servers):
- """
- Loop through a list of servers and refresh them.
- :param module: the AnsibleModule object
- :param servers: list of clc-sdk.Server instances to refresh
- :return: none
- """
- for server in servers:
- try:
- server.Refresh()
- except CLCException as ex:
- module.fail_json(msg='Unable to refresh the server {0}. {1}'.format(
- server.id, ex.message
- ))
-
- def _ensure_aa_policy_present(
- self, server, server_params):
- """
- ensures the server is updated with the provided anti affinity policy
- :param server: the CLC server object
- :param server_params: the dictionary of server parameters
- :return: (changed, group) -
- changed: Boolean whether a change was made
- result: The result from the CLC API call
- """
- changed = False
- acct_alias = self.clc.v2.Account.GetAlias()
-
- aa_policy_id = server_params.get('anti_affinity_policy_id')
- aa_policy_name = server_params.get('anti_affinity_policy_name')
- if not aa_policy_id and aa_policy_name:
- aa_policy_id = self._get_aa_policy_id_by_name(
- self.clc,
- self.module,
- acct_alias,
- aa_policy_name)
- current_aa_policy_id = self._get_aa_policy_id_of_server(
- self.clc,
- self.module,
- acct_alias,
- server.id)
-
- if aa_policy_id and aa_policy_id != current_aa_policy_id:
- self._modify_aa_policy(
- self.clc,
- self.module,
- acct_alias,
- server.id,
- aa_policy_id)
- changed = True
- return changed
-
- def _ensure_aa_policy_absent(
- self, server, server_params):
- """
- ensures the provided anti affinity policy is removed from the server
- :param server: the CLC server object
- :param server_params: the dictionary of server parameters
- :return: (changed, group) -
- changed: Boolean whether a change was made
- result: The result from the CLC API call
- """
- changed = False
- acct_alias = self.clc.v2.Account.GetAlias()
- aa_policy_id = server_params.get('anti_affinity_policy_id')
- aa_policy_name = server_params.get('anti_affinity_policy_name')
- if not aa_policy_id and aa_policy_name:
- aa_policy_id = self._get_aa_policy_id_by_name(
- self.clc,
- self.module,
- acct_alias,
- aa_policy_name)
- current_aa_policy_id = self._get_aa_policy_id_of_server(
- self.clc,
- self.module,
- acct_alias,
- server.id)
-
- if aa_policy_id and aa_policy_id == current_aa_policy_id:
- self._delete_aa_policy(
- self.clc,
- self.module,
- acct_alias,
- server.id)
- changed = True
- return changed
-
- @staticmethod
- def _modify_aa_policy(clc, module, acct_alias, server_id, aa_policy_id):
- """
- modifies the anti affinity policy of the CLC server
- :param clc: the clc-sdk instance to use
- :param module: the AnsibleModule object
- :param acct_alias: the CLC account alias
- :param server_id: the CLC server id
- :param aa_policy_id: the anti affinity policy id
- :return: result: The result from the CLC API call
- """
- result = None
- if not module.check_mode:
- try:
- result = clc.v2.API.Call('PUT',
- 'servers/%s/%s/antiAffinityPolicy' % (
- acct_alias,
- server_id),
- json.dumps({"id": aa_policy_id}))
- except APIFailedResponse as ex:
- module.fail_json(
- msg='Unable to modify anti affinity policy to server : "{0}". {1}'.format(
- server_id, str(ex.response_text)))
- return result
-
- @staticmethod
- def _delete_aa_policy(clc, module, acct_alias, server_id):
- """
- Delete the anti affinity policy of the CLC server
- :param clc: the clc-sdk instance to use
- :param module: the AnsibleModule object
- :param acct_alias: the CLC account alias
- :param server_id: the CLC server id
- :return: result: The result from the CLC API call
- """
- result = None
- if not module.check_mode:
- try:
- result = clc.v2.API.Call('DELETE',
- 'servers/%s/%s/antiAffinityPolicy' % (
- acct_alias,
- server_id),
- json.dumps({}))
- except APIFailedResponse as ex:
- module.fail_json(
- msg='Unable to delete anti affinity policy to server : "{0}". {1}'.format(
- server_id, str(ex.response_text)))
- return result
-
- @staticmethod
- def _get_aa_policy_id_by_name(clc, module, alias, aa_policy_name):
- """
- retrieves the anti affinity policy id of the server based on the name of the policy
- :param clc: the clc-sdk instance to use
- :param module: the AnsibleModule object
- :param alias: the CLC account alias
- :param aa_policy_name: the anti affinity policy name
- :return: aa_policy_id: The anti affinity policy id
- """
- aa_policy_id = None
- try:
- aa_policies = clc.v2.API.Call(method='GET',
- url='antiAffinityPolicies/%s' % alias)
- except APIFailedResponse as ex:
- return module.fail_json(
- msg='Unable to fetch anti affinity policies from account alias : "{0}". {1}'.format(
- alias, str(ex.response_text)))
- for aa_policy in aa_policies.get('items'):
- if aa_policy.get('name') == aa_policy_name:
- if not aa_policy_id:
- aa_policy_id = aa_policy.get('id')
- else:
- return module.fail_json(
- msg='multiple anti affinity policies were found with policy name : %s' % aa_policy_name)
- if not aa_policy_id:
- module.fail_json(
- msg='No anti affinity policy was found with policy name : %s' % aa_policy_name)
- return aa_policy_id
-
- @staticmethod
- def _get_aa_policy_id_of_server(clc, module, alias, server_id):
- """
- retrieves the anti affinity policy id of the server based on the CLC server id
- :param clc: the clc-sdk instance to use
- :param module: the AnsibleModule object
- :param alias: the CLC account alias
- :param server_id: the CLC server id
- :return: aa_policy_id: The anti affinity policy id
- """
- aa_policy_id = None
- try:
- result = clc.v2.API.Call(
- method='GET', url='servers/%s/%s/antiAffinityPolicy' %
- (alias, server_id))
- aa_policy_id = result.get('id')
- except APIFailedResponse as ex:
- if ex.response_status_code != 404:
- module.fail_json(msg='Unable to fetch anti affinity policy for server "{0}". {1}'.format(
- server_id, str(ex.response_text)))
- return aa_policy_id
-
- def _ensure_alert_policy_present(
- self, server, server_params):
- """
- ensures the server is updated with the provided alert policy
- :param server: the CLC server object
- :param server_params: the dictionary of server parameters
- :return: (changed, group) -
- changed: Boolean whether a change was made
- result: The result from the CLC API call
- """
- changed = False
- acct_alias = self.clc.v2.Account.GetAlias()
- alert_policy_id = server_params.get('alert_policy_id')
- alert_policy_name = server_params.get('alert_policy_name')
- if not alert_policy_id and alert_policy_name:
- alert_policy_id = self._get_alert_policy_id_by_name(
- self.clc,
- self.module,
- acct_alias,
- alert_policy_name)
- if alert_policy_id and not self._alert_policy_exists(
- server, alert_policy_id):
- self._add_alert_policy_to_server(
- self.clc,
- self.module,
- acct_alias,
- server.id,
- alert_policy_id)
- changed = True
- return changed
-
- def _ensure_alert_policy_absent(
- self, server, server_params):
- """
- ensures the alert policy is removed from the server
- :param server: the CLC server object
- :param server_params: the dictionary of server parameters
- :return: (changed, group) -
- changed: Boolean whether a change was made
- result: The result from the CLC API call
- """
- changed = False
-
- acct_alias = self.clc.v2.Account.GetAlias()
- alert_policy_id = server_params.get('alert_policy_id')
- alert_policy_name = server_params.get('alert_policy_name')
- if not alert_policy_id and alert_policy_name:
- alert_policy_id = self._get_alert_policy_id_by_name(
- self.clc,
- self.module,
- acct_alias,
- alert_policy_name)
-
- if alert_policy_id and self._alert_policy_exists(
- server, alert_policy_id):
- self._remove_alert_policy_to_server(
- self.clc,
- self.module,
- acct_alias,
- server.id,
- alert_policy_id)
- changed = True
- return changed
-
- @staticmethod
- def _add_alert_policy_to_server(
- clc, module, acct_alias, server_id, alert_policy_id):
- """
- add the alert policy to CLC server
- :param clc: the clc-sdk instance to use
- :param module: the AnsibleModule object
- :param acct_alias: the CLC account alias
- :param server_id: the CLC server id
- :param alert_policy_id: the alert policy id
- :return: result: The result from the CLC API call
- """
- result = None
- if not module.check_mode:
- try:
- result = clc.v2.API.Call('POST',
- 'servers/%s/%s/alertPolicies' % (
- acct_alias,
- server_id),
- json.dumps({"id": alert_policy_id}))
- except APIFailedResponse as ex:
- module.fail_json(msg='Unable to set alert policy to the server : "{0}". {1}'.format(
- server_id, str(ex.response_text)))
- return result
-
- @staticmethod
- def _remove_alert_policy_to_server(
- clc, module, acct_alias, server_id, alert_policy_id):
- """
- remove the alert policy to the CLC server
- :param clc: the clc-sdk instance to use
- :param module: the AnsibleModule object
- :param acct_alias: the CLC account alias
- :param server_id: the CLC server id
- :param alert_policy_id: the alert policy id
- :return: result: The result from the CLC API call
- """
- result = None
- if not module.check_mode:
- try:
- result = clc.v2.API.Call('DELETE',
- 'servers/%s/%s/alertPolicies/%s'
- % (acct_alias, server_id, alert_policy_id))
- except APIFailedResponse as ex:
- module.fail_json(msg='Unable to remove alert policy from the server : "{0}". {1}'.format(
- server_id, str(ex.response_text)))
- return result
-
- @staticmethod
- def _get_alert_policy_id_by_name(clc, module, alias, alert_policy_name):
- """
- retrieves the alert policy id of the server based on the name of the policy
- :param clc: the clc-sdk instance to use
- :param module: the AnsibleModule object
- :param alias: the CLC account alias
- :param alert_policy_name: the alert policy name
- :return: alert_policy_id: The alert policy id
- """
- alert_policy_id = None
- try:
- alert_policies = clc.v2.API.Call(method='GET',
- url='alertPolicies/%s' % alias)
- except APIFailedResponse as ex:
- return module.fail_json(msg='Unable to fetch alert policies for account : "{0}". {1}'.format(
- alias, str(ex.response_text)))
- for alert_policy in alert_policies.get('items'):
- if alert_policy.get('name') == alert_policy_name:
- if not alert_policy_id:
- alert_policy_id = alert_policy.get('id')
- else:
- return module.fail_json(
- msg='multiple alert policies were found with policy name : %s' % alert_policy_name)
- return alert_policy_id
-
- @staticmethod
- def _alert_policy_exists(server, alert_policy_id):
- """
- Checks if the alert policy exists for the server
- :param server: the clc server object
- :param alert_policy_id: the alert policy
- :return: True: if the given alert policy id associated to the server, False otherwise
- """
- result = False
- alert_policies = server.alertPolicies
- if alert_policies:
- for alert_policy in alert_policies:
- if alert_policy.get('id') == alert_policy_id:
- result = True
- return result
-
- @staticmethod
- def _set_user_agent(clc):
- if hasattr(clc, 'SetRequestsSession'):
- agent_string = "ClcAnsibleModule/" + __version__
- ses = requests.Session()
- ses.headers.update({"Api-Client": agent_string})
- ses.headers['User-Agent'] += " " + agent_string
- clc.SetRequestsSession(ses)
-
-
-def main():
- """
- The main function. Instantiates the module and calls process_request.
- :return: none
- """
-
- argument_dict = ClcModifyServer._define_module_argument_spec()
- module = AnsibleModule(supports_check_mode=True, **argument_dict)
- clc_modify_server = ClcModifyServer(module)
- clc_modify_server.process_request()
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_publicip.py b/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_publicip.py
deleted file mode 100644
index 3b4fcc4e..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_publicip.py
+++ /dev/null
@@ -1,361 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright (c) 2015 CenturyLink
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
-module: clc_publicip
-short_description: Add and Delete public ips on servers in CenturyLink Cloud.
-description:
- - An Ansible module to add or delete public ip addresses on an existing server or servers in CenturyLink Cloud.
-options:
- protocol:
- description:
- - The protocol that the public IP will listen for.
- type: str
- default: TCP
- choices: ['TCP', 'UDP', 'ICMP']
- ports:
- description:
- - A list of ports to expose. This is required when state is 'present'
- type: list
- elements: int
- server_ids:
- description:
- - A list of servers to create public ips on.
- type: list
- required: True
- elements: str
- state:
- description:
- - Determine whether to create or delete public IPs. If present module will not create a second public ip if one
- already exists.
- type: str
- default: present
- choices: ['present', 'absent']
- wait:
- description:
- - Whether to wait for the tasks to finish before returning.
- type: bool
- default: 'yes'
-requirements:
- - python = 2.7
- - requests >= 2.5.0
- - clc-sdk
-author: "CLC Runner (@clc-runner)"
-notes:
- - To use this module, it is required to set the below environment variables which enables access to the
- Centurylink Cloud
- - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
- - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
- - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
- CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
- - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
- - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
- - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
-'''
-
-EXAMPLES = '''
-# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
-
-- name: Add Public IP to Server
- hosts: localhost
- gather_facts: False
- connection: local
- tasks:
- - name: Create Public IP For Servers
- community.general.clc_publicip:
- protocol: TCP
- ports:
- - 80
- server_ids:
- - UC1TEST-SVR01
- - UC1TEST-SVR02
- state: present
- register: clc
-
- - name: Debug
- ansible.builtin.debug:
- var: clc
-
-- name: Delete Public IP from Server
- hosts: localhost
- gather_facts: False
- connection: local
- tasks:
- - name: Create Public IP For Servers
- community.general.clc_publicip:
- server_ids:
- - UC1TEST-SVR01
- - UC1TEST-SVR02
- state: absent
- register: clc
-
- - name: Debug
- ansible.builtin.debug:
- var: clc
-'''
-
-RETURN = '''
-server_ids:
- description: The list of server ids that are changed
- returned: success
- type: list
- sample:
- [
- "UC1TEST-SVR01",
- "UC1TEST-SVR02"
- ]
-'''
-
-__version__ = '${version}'
-
-import os
-import traceback
-
-from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
-
-REQUESTS_IMP_ERR = None
-try:
- import requests
-except ImportError:
- REQUESTS_IMP_ERR = traceback.format_exc()
- REQUESTS_FOUND = False
-else:
- REQUESTS_FOUND = True
-
-#
-# Requires the clc-python-sdk.
-# sudo pip install clc-sdk
-#
-CLC_IMP_ERR = None
-try:
- import clc as clc_sdk
- from clc import CLCException
-except ImportError:
- CLC_IMP_ERR = traceback.format_exc()
- CLC_FOUND = False
- clc_sdk = None
-else:
- CLC_FOUND = True
-
-from ansible.module_utils.basic import AnsibleModule, missing_required_lib
-
-
-class ClcPublicIp(object):
- clc = clc_sdk
- module = None
-
- def __init__(self, module):
- """
- Construct module
- """
- self.module = module
- if not CLC_FOUND:
- self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
- if not REQUESTS_FOUND:
- self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
- if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
- self.module.fail_json(
- msg='requests library version should be >= 2.5.0')
-
- self._set_user_agent(self.clc)
-
- def process_request(self):
- """
- Process the request - Main Code Path
- :return: Returns with either an exit_json or fail_json
- """
- self._set_clc_credentials_from_env()
- params = self.module.params
- server_ids = params['server_ids']
- ports = params['ports']
- protocol = params['protocol']
- state = params['state']
-
- if state == 'present':
- changed, changed_server_ids, requests = self.ensure_public_ip_present(
- server_ids=server_ids, protocol=protocol, ports=ports)
- elif state == 'absent':
- changed, changed_server_ids, requests = self.ensure_public_ip_absent(
- server_ids=server_ids)
- else:
- return self.module.fail_json(msg="Unknown State: " + state)
- self._wait_for_requests_to_complete(requests)
- return self.module.exit_json(changed=changed,
- server_ids=changed_server_ids)
-
- @staticmethod
- def _define_module_argument_spec():
- """
- Define the argument spec for the ansible module
- :return: argument spec dictionary
- """
- argument_spec = dict(
- server_ids=dict(type='list', required=True, elements='str'),
- protocol=dict(default='TCP', choices=['TCP', 'UDP', 'ICMP']),
- ports=dict(type='list', elements='int'),
- wait=dict(type='bool', default=True),
- state=dict(default='present', choices=['present', 'absent']),
- )
- return argument_spec
-
- def ensure_public_ip_present(self, server_ids, protocol, ports):
- """
- Ensures the given server ids having the public ip available
- :param server_ids: the list of server ids
- :param protocol: the ip protocol
- :param ports: the list of ports to expose
- :return: (changed, changed_server_ids, results)
- changed: A flag indicating if there is any change
- changed_server_ids : the list of server ids that are changed
- results: The result list from clc public ip call
- """
- changed = False
- results = []
- changed_server_ids = []
- servers = self._get_servers_from_clc(
- server_ids,
- 'Failed to obtain server list from the CLC API')
- servers_to_change = [
- server for server in servers if len(
- server.PublicIPs().public_ips) == 0]
- ports_to_expose = [{'protocol': protocol, 'port': port}
- for port in ports]
- for server in servers_to_change:
- if not self.module.check_mode:
- result = self._add_publicip_to_server(server, ports_to_expose)
- results.append(result)
- changed_server_ids.append(server.id)
- changed = True
- return changed, changed_server_ids, results
-
- def _add_publicip_to_server(self, server, ports_to_expose):
- result = None
- try:
- result = server.PublicIPs().Add(ports_to_expose)
- except CLCException as ex:
- self.module.fail_json(msg='Failed to add public ip to the server : {0}. {1}'.format(
- server.id, ex.response_text
- ))
- return result
-
- def ensure_public_ip_absent(self, server_ids):
- """
- Ensures the given server ids having the public ip removed if there is any
- :param server_ids: the list of server ids
- :return: (changed, changed_server_ids, results)
- changed: A flag indicating if there is any change
- changed_server_ids : the list of server ids that are changed
- results: The result list from clc public ip call
- """
- changed = False
- results = []
- changed_server_ids = []
- servers = self._get_servers_from_clc(
- server_ids,
- 'Failed to obtain server list from the CLC API')
- servers_to_change = [
- server for server in servers if len(
- server.PublicIPs().public_ips) > 0]
- for server in servers_to_change:
- if not self.module.check_mode:
- result = self._remove_publicip_from_server(server)
- results.append(result)
- changed_server_ids.append(server.id)
- changed = True
- return changed, changed_server_ids, results
-
- def _remove_publicip_from_server(self, server):
- result = None
- try:
- for ip_address in server.PublicIPs().public_ips:
- result = ip_address.Delete()
- except CLCException as ex:
- self.module.fail_json(msg='Failed to remove public ip from the server : {0}. {1}'.format(
- server.id, ex.response_text
- ))
- return result
-
- def _wait_for_requests_to_complete(self, requests_lst):
- """
- Waits until the CLC requests are complete if the wait argument is True
- :param requests_lst: The list of CLC request objects
- :return: none
- """
- if not self.module.params['wait']:
- return
- for request in requests_lst:
- request.WaitUntilComplete()
- for request_details in request.requests:
- if request_details.Status() != 'succeeded':
- self.module.fail_json(
- msg='Unable to process public ip request')
-
- def _set_clc_credentials_from_env(self):
- """
- Set the CLC Credentials on the sdk by reading environment variables
- :return: none
- """
- env = os.environ
- v2_api_token = env.get('CLC_V2_API_TOKEN', False)
- v2_api_username = env.get('CLC_V2_API_USERNAME', False)
- v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
- clc_alias = env.get('CLC_ACCT_ALIAS', False)
- api_url = env.get('CLC_V2_API_URL', False)
-
- if api_url:
- self.clc.defaults.ENDPOINT_URL_V2 = api_url
-
- if v2_api_token and clc_alias:
- self.clc._LOGIN_TOKEN_V2 = v2_api_token
- self.clc._V2_ENABLED = True
- self.clc.ALIAS = clc_alias
- elif v2_api_username and v2_api_passwd:
- self.clc.v2.SetCredentials(
- api_username=v2_api_username,
- api_passwd=v2_api_passwd)
- else:
- return self.module.fail_json(
- msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
- "environment variables")
-
- def _get_servers_from_clc(self, server_ids, message):
- """
- Gets list of servers form CLC api
- """
- try:
- return self.clc.v2.Servers(server_ids).servers
- except CLCException as exception:
- self.module.fail_json(msg=message + ': %s' % exception)
-
- @staticmethod
- def _set_user_agent(clc):
- if hasattr(clc, 'SetRequestsSession'):
- agent_string = "ClcAnsibleModule/" + __version__
- ses = requests.Session()
- ses.headers.update({"Api-Client": agent_string})
- ses.headers['User-Agent'] += " " + agent_string
- clc.SetRequestsSession(ses)
-
-
-def main():
- """
- The main function. Instantiates the module and calls process_request.
- :return: none
- """
- module = AnsibleModule(
- argument_spec=ClcPublicIp._define_module_argument_spec(),
- supports_check_mode=True
- )
- clc_public_ip = ClcPublicIp(module)
- clc_public_ip.process_request()
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_server.py b/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_server.py
deleted file mode 100644
index 73403987..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_server.py
+++ /dev/null
@@ -1,1563 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright (c) 2015 CenturyLink
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
-module: clc_server
-short_description: Create, Delete, Start and Stop servers in CenturyLink Cloud.
-description:
- - An Ansible module to Create, Delete, Start and Stop servers in CenturyLink Cloud.
-options:
- additional_disks:
- description:
- - The list of additional disks for the server
- type: list
- elements: dict
- default: []
- add_public_ip:
- description:
- - Whether to add a public ip to the server
- type: bool
- default: 'no'
- alias:
- description:
- - The account alias to provision the servers under.
- type: str
- anti_affinity_policy_id:
- description:
- - The anti-affinity policy to assign to the server. This is mutually exclusive with 'anti_affinity_policy_name'.
- type: str
- anti_affinity_policy_name:
- description:
- - The anti-affinity policy to assign to the server. This is mutually exclusive with 'anti_affinity_policy_id'.
- type: str
- alert_policy_id:
- description:
- - The alert policy to assign to the server. This is mutually exclusive with 'alert_policy_name'.
- type: str
- alert_policy_name:
- description:
- - The alert policy to assign to the server. This is mutually exclusive with 'alert_policy_id'.
- type: str
- count:
- description:
- - The number of servers to build (mutually exclusive with exact_count)
- default: 1
- type: int
- count_group:
- description:
- - Required when exact_count is specified. The Server Group use to determine how many servers to deploy.
- type: str
- cpu:
- description:
- - How many CPUs to provision on the server
- default: 1
- type: int
- cpu_autoscale_policy_id:
- description:
- - The autoscale policy to assign to the server.
- type: str
- custom_fields:
- description:
- - The list of custom fields to set on the server.
- type: list
- default: []
- elements: dict
- description:
- description:
- - The description to set for the server.
- type: str
- exact_count:
- description:
- - Run in idempotent mode. Will insure that this exact number of servers are running in the provided group,
- creating and deleting them to reach that count. Requires count_group to be set.
- type: int
- group:
- description:
- - The Server Group to create servers under.
- type: str
- default: 'Default Group'
- ip_address:
- description:
- - The IP Address for the server. One is assigned if not provided.
- type: str
- location:
- description:
- - The Datacenter to create servers in.
- type: str
- managed_os:
- description:
- - Whether to create the server as 'Managed' or not.
- type: bool
- default: 'no'
- required: False
- memory:
- description:
- - Memory in GB.
- type: int
- default: 1
- name:
- description:
- - A 1 to 6 character identifier to use for the server. This is required when state is 'present'
- type: str
- network_id:
- description:
- - The network UUID on which to create servers.
- type: str
- packages:
- description:
- - The list of blue print packages to run on the server after its created.
- type: list
- elements: dict
- default: []
- password:
- description:
- - Password for the administrator / root user
- type: str
- primary_dns:
- description:
- - Primary DNS used by the server.
- type: str
- public_ip_protocol:
- description:
- - The protocol to use for the public ip if add_public_ip is set to True.
- type: str
- default: 'TCP'
- choices: ['TCP', 'UDP', 'ICMP']
- public_ip_ports:
- description:
- - A list of ports to allow on the firewall to the servers public ip, if add_public_ip is set to True.
- type: list
- elements: dict
- default: []
- secondary_dns:
- description:
- - Secondary DNS used by the server.
- type: str
- server_ids:
- description:
- - Required for started, stopped, and absent states.
- A list of server Ids to insure are started, stopped, or absent.
- type: list
- default: []
- elements: str
- source_server_password:
- description:
- - The password for the source server if a clone is specified.
- type: str
- state:
- description:
- - The state to insure that the provided resources are in.
- type: str
- default: 'present'
- choices: ['present', 'absent', 'started', 'stopped']
- storage_type:
- description:
- - The type of storage to attach to the server.
- type: str
- default: 'standard'
- choices: ['standard', 'hyperscale']
- template:
- description:
- - The template to use for server creation. Will search for a template if a partial string is provided.
- This is required when state is 'present'
- type: str
- ttl:
- description:
- - The time to live for the server in seconds. The server will be deleted when this time expires.
- type: str
- type:
- description:
- - The type of server to create.
- type: str
- default: 'standard'
- choices: ['standard', 'hyperscale', 'bareMetal']
- configuration_id:
- description:
- - Only required for bare metal servers.
- Specifies the identifier for the specific configuration type of bare metal server to deploy.
- type: str
- os_type:
- description:
- - Only required for bare metal servers.
- Specifies the OS to provision with the bare metal server.
- type: str
- choices: ['redHat6_64Bit', 'centOS6_64Bit', 'windows2012R2Standard_64Bit', 'ubuntu14_64Bit']
- wait:
- description:
- - Whether to wait for the provisioning tasks to finish before returning.
- type: bool
- default: 'yes'
-requirements:
- - python = 2.7
- - requests >= 2.5.0
- - clc-sdk
-author: "CLC Runner (@clc-runner)"
-notes:
- - To use this module, it is required to set the below environment variables which enables access to the
- Centurylink Cloud
- - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
- - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
- - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
- CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
- - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
- - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
- - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
-'''
-
-EXAMPLES = '''
-# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
-
-- name: Provision a single Ubuntu Server
- community.general.clc_server:
- name: test
- template: ubuntu-14-64
- count: 1
- group: Default Group
- state: present
-
-- name: Ensure 'Default Group' has exactly 5 servers
- community.general.clc_server:
- name: test
- template: ubuntu-14-64
- exact_count: 5
- count_group: Default Group
- group: Default Group
-
-- name: Stop a Server
- community.general.clc_server:
- server_ids:
- - UC1ACCT-TEST01
- state: stopped
-
-- name: Start a Server
- community.general.clc_server:
- server_ids:
- - UC1ACCT-TEST01
- state: started
-
-- name: Delete a Server
- community.general.clc_server:
- server_ids:
- - UC1ACCT-TEST01
- state: absent
-'''
-
-RETURN = '''
-server_ids:
- description: The list of server ids that are created
- returned: success
- type: list
- sample:
- [
- "UC1TEST-SVR01",
- "UC1TEST-SVR02"
- ]
-partially_created_server_ids:
- description: The list of server ids that are partially created
- returned: success
- type: list
- sample:
- [
- "UC1TEST-SVR01",
- "UC1TEST-SVR02"
- ]
-servers:
- description: The list of server objects returned from CLC
- returned: success
- type: list
- sample:
- [
- {
- "changeInfo":{
- "createdBy":"service.wfad",
- "createdDate":1438196820,
- "modifiedBy":"service.wfad",
- "modifiedDate":1438196820
- },
- "description":"test-server",
- "details":{
- "alertPolicies":[
-
- ],
- "cpu":1,
- "customFields":[
-
- ],
- "diskCount":3,
- "disks":[
- {
- "id":"0:0",
- "partitionPaths":[
-
- ],
- "sizeGB":1
- },
- {
- "id":"0:1",
- "partitionPaths":[
-
- ],
- "sizeGB":2
- },
- {
- "id":"0:2",
- "partitionPaths":[
-
- ],
- "sizeGB":14
- }
- ],
- "hostName":"",
- "inMaintenanceMode":false,
- "ipAddresses":[
- {
- "internal":"10.1.1.1"
- }
- ],
- "memoryGB":1,
- "memoryMB":1024,
- "partitions":[
-
- ],
- "powerState":"started",
- "snapshots":[
-
- ],
- "storageGB":17
- },
- "groupId":"086ac1dfe0b6411989e8d1b77c4065f0",
- "id":"test-server",
- "ipaddress":"10.120.45.23",
- "isTemplate":false,
- "links":[
- {
- "href":"/v2/servers/wfad/test-server",
- "id":"test-server",
- "rel":"self",
- "verbs":[
- "GET",
- "PATCH",
- "DELETE"
- ]
- },
- {
- "href":"/v2/groups/wfad/086ac1dfe0b6411989e8d1b77c4065f0",
- "id":"086ac1dfe0b6411989e8d1b77c4065f0",
- "rel":"group"
- },
- {
- "href":"/v2/accounts/wfad",
- "id":"wfad",
- "rel":"account"
- },
- {
- "href":"/v2/billing/wfad/serverPricing/test-server",
- "rel":"billing"
- },
- {
- "href":"/v2/servers/wfad/test-server/publicIPAddresses",
- "rel":"publicIPAddresses",
- "verbs":[
- "POST"
- ]
- },
- {
- "href":"/v2/servers/wfad/test-server/credentials",
- "rel":"credentials"
- },
- {
- "href":"/v2/servers/wfad/test-server/statistics",
- "rel":"statistics"
- },
- {
- "href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/upcomingScheduledActivities",
- "rel":"upcomingScheduledActivities"
- },
- {
- "href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/scheduledActivities",
- "rel":"scheduledActivities",
- "verbs":[
- "GET",
- "POST"
- ]
- },
- {
- "href":"/v2/servers/wfad/test-server/capabilities",
- "rel":"capabilities"
- },
- {
- "href":"/v2/servers/wfad/test-server/alertPolicies",
- "rel":"alertPolicyMappings",
- "verbs":[
- "POST"
- ]
- },
- {
- "href":"/v2/servers/wfad/test-server/antiAffinityPolicy",
- "rel":"antiAffinityPolicyMapping",
- "verbs":[
- "PUT",
- "DELETE"
- ]
- },
- {
- "href":"/v2/servers/wfad/test-server/cpuAutoscalePolicy",
- "rel":"cpuAutoscalePolicyMapping",
- "verbs":[
- "PUT",
- "DELETE"
- ]
- }
- ],
- "locationId":"UC1",
- "name":"test-server",
- "os":"ubuntu14_64Bit",
- "osType":"Ubuntu 14 64-bit",
- "status":"active",
- "storageType":"standard",
- "type":"standard"
- }
- ]
-'''
-
-__version__ = '${version}'
-
-import json
-import os
-import time
-import traceback
-
-from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
-
-REQUESTS_IMP_ERR = None
-try:
- import requests
-except ImportError:
- REQUESTS_IMP_ERR = traceback.format_exc()
- REQUESTS_FOUND = False
-else:
- REQUESTS_FOUND = True
-
-#
-# Requires the clc-python-sdk.
-# sudo pip install clc-sdk
-#
-CLC_IMP_ERR = None
-try:
- import clc as clc_sdk
- from clc import CLCException
- from clc import APIFailedResponse
-except ImportError:
- CLC_IMP_ERR = traceback.format_exc()
- CLC_FOUND = False
- clc_sdk = None
-else:
- CLC_FOUND = True
-
-from ansible.module_utils.basic import AnsibleModule, missing_required_lib
-
-
-class ClcServer:
- clc = clc_sdk
-
- def __init__(self, module):
- """
- Construct module
- """
- self.clc = clc_sdk
- self.module = module
- self.group_dict = {}
-
- if not CLC_FOUND:
- self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
- if not REQUESTS_FOUND:
- self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
- if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
- self.module.fail_json(
- msg='requests library version should be >= 2.5.0')
-
- self._set_user_agent(self.clc)
-
- def process_request(self):
- """
- Process the request - Main Code Path
- :return: Returns with either an exit_json or fail_json
- """
- changed = False
- new_server_ids = []
- server_dict_array = []
-
- self._set_clc_credentials_from_env()
- self.module.params = self._validate_module_params(
- self.clc,
- self.module)
- p = self.module.params
- state = p.get('state')
-
- #
- # Handle each state
- #
- partial_servers_ids = []
- if state == 'absent':
- server_ids = p['server_ids']
- if not isinstance(server_ids, list):
- return self.module.fail_json(
- msg='server_ids needs to be a list of instances to delete: %s' %
- server_ids)
-
- (changed,
- server_dict_array,
- new_server_ids) = self._delete_servers(module=self.module,
- clc=self.clc,
- server_ids=server_ids)
-
- elif state in ('started', 'stopped'):
- server_ids = p.get('server_ids')
- if not isinstance(server_ids, list):
- return self.module.fail_json(
- msg='server_ids needs to be a list of servers to run: %s' %
- server_ids)
-
- (changed,
- server_dict_array,
- new_server_ids) = self._start_stop_servers(self.module,
- self.clc,
- server_ids)
-
- elif state == 'present':
- # Changed is always set to true when provisioning new instances
- if not p.get('template') and p.get('type') != 'bareMetal':
- return self.module.fail_json(
- msg='template parameter is required for new instance')
-
- if p.get('exact_count') is None:
- (server_dict_array,
- new_server_ids,
- partial_servers_ids,
- changed) = self._create_servers(self.module,
- self.clc)
- else:
- (server_dict_array,
- new_server_ids,
- partial_servers_ids,
- changed) = self._enforce_count(self.module,
- self.clc)
-
- self.module.exit_json(
- changed=changed,
- server_ids=new_server_ids,
- partially_created_server_ids=partial_servers_ids,
- servers=server_dict_array)
-
- @staticmethod
- def _define_module_argument_spec():
- """
- Define the argument spec for the ansible module
- :return: argument spec dictionary
- """
- argument_spec = dict(
- name=dict(),
- template=dict(),
- group=dict(default='Default Group'),
- network_id=dict(),
- location=dict(default=None),
- cpu=dict(default=1, type='int'),
- memory=dict(default=1, type='int'),
- alias=dict(default=None),
- password=dict(default=None, no_log=True),
- ip_address=dict(default=None),
- storage_type=dict(
- default='standard',
- choices=[
- 'standard',
- 'hyperscale']),
- type=dict(default='standard', choices=['standard', 'hyperscale', 'bareMetal']),
- primary_dns=dict(default=None),
- secondary_dns=dict(default=None),
- additional_disks=dict(type='list', default=[], elements='dict'),
- custom_fields=dict(type='list', default=[], elements='dict'),
- ttl=dict(default=None),
- managed_os=dict(type='bool', default=False),
- description=dict(default=None),
- source_server_password=dict(default=None, no_log=True),
- cpu_autoscale_policy_id=dict(default=None),
- anti_affinity_policy_id=dict(default=None),
- anti_affinity_policy_name=dict(default=None),
- alert_policy_id=dict(default=None),
- alert_policy_name=dict(default=None),
- packages=dict(type='list', default=[], elements='dict'),
- state=dict(
- default='present',
- choices=[
- 'present',
- 'absent',
- 'started',
- 'stopped']),
- count=dict(type='int', default=1),
- exact_count=dict(type='int', default=None),
- count_group=dict(),
- server_ids=dict(type='list', default=[], elements='str'),
- add_public_ip=dict(type='bool', default=False),
- public_ip_protocol=dict(
- default='TCP',
- choices=[
- 'TCP',
- 'UDP',
- 'ICMP']),
- public_ip_ports=dict(type='list', default=[], elements='dict'),
- configuration_id=dict(default=None),
- os_type=dict(default=None,
- choices=[
- 'redHat6_64Bit',
- 'centOS6_64Bit',
- 'windows2012R2Standard_64Bit',
- 'ubuntu14_64Bit'
- ]),
- wait=dict(type='bool', default=True))
-
- mutually_exclusive = [
- ['exact_count', 'count'],
- ['exact_count', 'state'],
- ['anti_affinity_policy_id', 'anti_affinity_policy_name'],
- ['alert_policy_id', 'alert_policy_name'],
- ]
- return {"argument_spec": argument_spec,
- "mutually_exclusive": mutually_exclusive}
-
- def _set_clc_credentials_from_env(self):
- """
- Set the CLC Credentials on the sdk by reading environment variables
- :return: none
- """
- env = os.environ
- v2_api_token = env.get('CLC_V2_API_TOKEN', False)
- v2_api_username = env.get('CLC_V2_API_USERNAME', False)
- v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
- clc_alias = env.get('CLC_ACCT_ALIAS', False)
- api_url = env.get('CLC_V2_API_URL', False)
- if api_url:
- self.clc.defaults.ENDPOINT_URL_V2 = api_url
-
- if v2_api_token and clc_alias:
- self.clc._LOGIN_TOKEN_V2 = v2_api_token
- self.clc._V2_ENABLED = True
- self.clc.ALIAS = clc_alias
- elif v2_api_username and v2_api_passwd:
- self.clc.v2.SetCredentials(
- api_username=v2_api_username,
- api_passwd=v2_api_passwd)
- else:
- return self.module.fail_json(
- msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
- "environment variables")
-
- @staticmethod
- def _validate_module_params(clc, module):
- """
- Validate the module params, and lookup default values.
- :param clc: clc-sdk instance to use
- :param module: module to validate
- :return: dictionary of validated params
- """
- params = module.params
- datacenter = ClcServer._find_datacenter(clc, module)
-
- ClcServer._validate_types(module)
- ClcServer._validate_name(module)
-
- params['alias'] = ClcServer._find_alias(clc, module)
- params['cpu'] = ClcServer._find_cpu(clc, module)
- params['memory'] = ClcServer._find_memory(clc, module)
- params['description'] = ClcServer._find_description(module)
- params['ttl'] = ClcServer._find_ttl(clc, module)
- params['template'] = ClcServer._find_template_id(module, datacenter)
- params['group'] = ClcServer._find_group(module, datacenter).id
- params['network_id'] = ClcServer._find_network_id(module, datacenter)
- params['anti_affinity_policy_id'] = ClcServer._find_aa_policy_id(
- clc,
- module)
- params['alert_policy_id'] = ClcServer._find_alert_policy_id(
- clc,
- module)
-
- return params
-
- @staticmethod
- def _find_datacenter(clc, module):
- """
- Find the datacenter by calling the CLC API.
- :param clc: clc-sdk instance to use
- :param module: module to validate
- :return: clc-sdk.Datacenter instance
- """
- location = module.params.get('location')
- try:
- if not location:
- account = clc.v2.Account()
- location = account.data.get('primaryDataCenter')
- data_center = clc.v2.Datacenter(location)
- return data_center
- except CLCException:
- module.fail_json(msg="Unable to find location: {0}".format(location))
-
- @staticmethod
- def _find_alias(clc, module):
- """
- Find or Validate the Account Alias by calling the CLC API
- :param clc: clc-sdk instance to use
- :param module: module to validate
- :return: clc-sdk.Account instance
- """
- alias = module.params.get('alias')
- if not alias:
- try:
- alias = clc.v2.Account.GetAlias()
- except CLCException as ex:
- module.fail_json(msg='Unable to find account alias. {0}'.format(
- ex.message
- ))
- return alias
-
- @staticmethod
- def _find_cpu(clc, module):
- """
- Find or validate the CPU value by calling the CLC API
- :param clc: clc-sdk instance to use
- :param module: module to validate
- :return: Int value for CPU
- """
- cpu = module.params.get('cpu')
- group_id = module.params.get('group_id')
- alias = module.params.get('alias')
- state = module.params.get('state')
-
- if not cpu and state == 'present':
- group = clc.v2.Group(id=group_id,
- alias=alias)
- if group.Defaults("cpu"):
- cpu = group.Defaults("cpu")
- else:
- module.fail_json(
- msg=str("Can\'t determine a default cpu value. Please provide a value for cpu."))
- return cpu
-
- @staticmethod
- def _find_memory(clc, module):
- """
- Find or validate the Memory value by calling the CLC API
- :param clc: clc-sdk instance to use
- :param module: module to validate
- :return: Int value for Memory
- """
- memory = module.params.get('memory')
- group_id = module.params.get('group_id')
- alias = module.params.get('alias')
- state = module.params.get('state')
-
- if not memory and state == 'present':
- group = clc.v2.Group(id=group_id,
- alias=alias)
- if group.Defaults("memory"):
- memory = group.Defaults("memory")
- else:
- module.fail_json(msg=str(
- "Can\'t determine a default memory value. Please provide a value for memory."))
- return memory
-
- @staticmethod
- def _find_description(module):
- """
- Set the description module param to name if description is blank
- :param module: the module to validate
- :return: string description
- """
- description = module.params.get('description')
- if not description:
- description = module.params.get('name')
- return description
-
- @staticmethod
- def _validate_types(module):
- """
- Validate that type and storage_type are set appropriately, and fail if not
- :param module: the module to validate
- :return: none
- """
- state = module.params.get('state')
- server_type = module.params.get(
- 'type').lower() if module.params.get('type') else None
- storage_type = module.params.get(
- 'storage_type').lower() if module.params.get('storage_type') else None
-
- if state == "present":
- if server_type == "standard" and storage_type not in (
- "standard", "premium"):
- module.fail_json(
- msg=str("Standard VMs must have storage_type = 'standard' or 'premium'"))
-
- if server_type == "hyperscale" and storage_type != "hyperscale":
- module.fail_json(
- msg=str("Hyperscale VMs must have storage_type = 'hyperscale'"))
-
- @staticmethod
- def _validate_name(module):
- """
- Validate that name is the correct length if provided, fail if it's not
- :param module: the module to validate
- :return: none
- """
- server_name = module.params.get('name')
- state = module.params.get('state')
-
- if state == 'present' and (
- len(server_name) < 1 or len(server_name) > 6):
- module.fail_json(msg=str(
- "When state = 'present', name must be a string with a minimum length of 1 and a maximum length of 6"))
-
- @staticmethod
- def _find_ttl(clc, module):
- """
- Validate that TTL is > 3600 if set, and fail if not
- :param clc: clc-sdk instance to use
- :param module: module to validate
- :return: validated ttl
- """
- ttl = module.params.get('ttl')
-
- if ttl:
- if ttl <= 3600:
- return module.fail_json(msg=str("Ttl cannot be <= 3600"))
- else:
- ttl = clc.v2.time_utils.SecondsToZuluTS(int(time.time()) + ttl)
- return ttl
-
- @staticmethod
- def _find_template_id(module, datacenter):
- """
- Find the template id by calling the CLC API.
- :param module: the module to validate
- :param datacenter: the datacenter to search for the template
- :return: a valid clc template id
- """
- lookup_template = module.params.get('template')
- state = module.params.get('state')
- type = module.params.get('type')
- result = None
-
- if state == 'present' and type != 'bareMetal':
- try:
- result = datacenter.Templates().Search(lookup_template)[0].id
- except CLCException:
- module.fail_json(
- msg=str(
- "Unable to find a template: " +
- lookup_template +
- " in location: " +
- datacenter.id))
- return result
-
- @staticmethod
- def _find_network_id(module, datacenter):
- """
- Validate the provided network id or return a default.
- :param module: the module to validate
- :param datacenter: the datacenter to search for a network id
- :return: a valid network id
- """
- network_id = module.params.get('network_id')
-
- if not network_id:
- try:
- network_id = datacenter.Networks().networks[0].id
- # -- added for clc-sdk 2.23 compatibility
- # datacenter_networks = clc_sdk.v2.Networks(
- # networks_lst=datacenter._DeploymentCapabilities()['deployableNetworks'])
- # network_id = datacenter_networks.networks[0].id
- # -- end
- except CLCException:
- module.fail_json(
- msg=str(
- "Unable to find a network in location: " +
- datacenter.id))
-
- return network_id
-
- @staticmethod
- def _find_aa_policy_id(clc, module):
- """
- Validate if the anti affinity policy exist for the given name and throw error if not
- :param clc: the clc-sdk instance
- :param module: the module to validate
- :return: aa_policy_id: the anti affinity policy id of the given name.
- """
- aa_policy_id = module.params.get('anti_affinity_policy_id')
- aa_policy_name = module.params.get('anti_affinity_policy_name')
- if not aa_policy_id and aa_policy_name:
- alias = module.params.get('alias')
- aa_policy_id = ClcServer._get_anti_affinity_policy_id(
- clc,
- module,
- alias,
- aa_policy_name)
- if not aa_policy_id:
- module.fail_json(
- msg='No anti affinity policy was found with policy name : %s' % aa_policy_name)
- return aa_policy_id
-
- @staticmethod
- def _find_alert_policy_id(clc, module):
- """
- Validate if the alert policy exist for the given name and throw error if not
- :param clc: the clc-sdk instance
- :param module: the module to validate
- :return: alert_policy_id: the alert policy id of the given name.
- """
- alert_policy_id = module.params.get('alert_policy_id')
- alert_policy_name = module.params.get('alert_policy_name')
- if not alert_policy_id and alert_policy_name:
- alias = module.params.get('alias')
- alert_policy_id = ClcServer._get_alert_policy_id_by_name(
- clc=clc,
- module=module,
- alias=alias,
- alert_policy_name=alert_policy_name
- )
- if not alert_policy_id:
- module.fail_json(
- msg='No alert policy exist with name : %s' % alert_policy_name)
- return alert_policy_id
-
- def _create_servers(self, module, clc, override_count=None):
- """
- Create New Servers in CLC cloud
- :param module: the AnsibleModule object
- :param clc: the clc-sdk instance to use
- :return: a list of dictionaries with server information about the servers that were created
- """
- p = module.params
- request_list = []
- servers = []
- server_dict_array = []
- created_server_ids = []
- partial_created_servers_ids = []
-
- add_public_ip = p.get('add_public_ip')
- public_ip_protocol = p.get('public_ip_protocol')
- public_ip_ports = p.get('public_ip_ports')
-
- params = {
- 'name': p.get('name'),
- 'template': p.get('template'),
- 'group_id': p.get('group'),
- 'network_id': p.get('network_id'),
- 'cpu': p.get('cpu'),
- 'memory': p.get('memory'),
- 'alias': p.get('alias'),
- 'password': p.get('password'),
- 'ip_address': p.get('ip_address'),
- 'storage_type': p.get('storage_type'),
- 'type': p.get('type'),
- 'primary_dns': p.get('primary_dns'),
- 'secondary_dns': p.get('secondary_dns'),
- 'additional_disks': p.get('additional_disks'),
- 'custom_fields': p.get('custom_fields'),
- 'ttl': p.get('ttl'),
- 'managed_os': p.get('managed_os'),
- 'description': p.get('description'),
- 'source_server_password': p.get('source_server_password'),
- 'cpu_autoscale_policy_id': p.get('cpu_autoscale_policy_id'),
- 'anti_affinity_policy_id': p.get('anti_affinity_policy_id'),
- 'packages': p.get('packages'),
- 'configuration_id': p.get('configuration_id'),
- 'os_type': p.get('os_type')
- }
-
- count = override_count if override_count else p.get('count')
-
- changed = False if count == 0 else True
-
- if not changed:
- return server_dict_array, created_server_ids, partial_created_servers_ids, changed
- for i in range(0, count):
- if not module.check_mode:
- req = self._create_clc_server(clc=clc,
- module=module,
- server_params=params)
- server = req.requests[0].Server()
- request_list.append(req)
- servers.append(server)
-
- self._wait_for_requests(module, request_list)
- self._refresh_servers(module, servers)
-
- ip_failed_servers = self._add_public_ip_to_servers(
- module=module,
- should_add_public_ip=add_public_ip,
- servers=servers,
- public_ip_protocol=public_ip_protocol,
- public_ip_ports=public_ip_ports)
- ap_failed_servers = self._add_alert_policy_to_servers(clc=clc,
- module=module,
- servers=servers)
-
- for server in servers:
- if server in ip_failed_servers or server in ap_failed_servers:
- partial_created_servers_ids.append(server.id)
- else:
- # reload server details
- server = clc.v2.Server(server.id)
- server.data['ipaddress'] = server.details[
- 'ipAddresses'][0]['internal']
-
- if add_public_ip and len(server.PublicIPs().public_ips) > 0:
- server.data['publicip'] = str(
- server.PublicIPs().public_ips[0])
- created_server_ids.append(server.id)
- server_dict_array.append(server.data)
-
- return server_dict_array, created_server_ids, partial_created_servers_ids, changed
-
- def _enforce_count(self, module, clc):
- """
- Enforce that there is the right number of servers in the provided group.
- Starts or stops servers as necessary.
- :param module: the AnsibleModule object
- :param clc: the clc-sdk instance to use
- :return: a list of dictionaries with server information about the servers that were created or deleted
- """
- p = module.params
- changed = False
- count_group = p.get('count_group')
- datacenter = ClcServer._find_datacenter(clc, module)
- exact_count = p.get('exact_count')
- server_dict_array = []
- partial_servers_ids = []
- changed_server_ids = []
-
- # fail here if the exact count was specified without filtering
- # on a group, as this may lead to a undesired removal of instances
- if exact_count and count_group is None:
- return module.fail_json(
- msg="you must use the 'count_group' option with exact_count")
-
- servers, running_servers = ClcServer._find_running_servers_by_group(
- module, datacenter, count_group)
-
- if len(running_servers) == exact_count:
- changed = False
-
- elif len(running_servers) < exact_count:
- to_create = exact_count - len(running_servers)
- server_dict_array, changed_server_ids, partial_servers_ids, changed \
- = self._create_servers(module, clc, override_count=to_create)
-
- for server in server_dict_array:
- running_servers.append(server)
-
- elif len(running_servers) > exact_count:
- to_remove = len(running_servers) - exact_count
- all_server_ids = sorted([x.id for x in running_servers])
- remove_ids = all_server_ids[0:to_remove]
-
- (changed, server_dict_array, changed_server_ids) \
- = ClcServer._delete_servers(module, clc, remove_ids)
-
- return server_dict_array, changed_server_ids, partial_servers_ids, changed
-
- @staticmethod
- def _wait_for_requests(module, request_list):
- """
- Block until server provisioning requests are completed.
- :param module: the AnsibleModule object
- :param request_list: a list of clc-sdk.Request instances
- :return: none
- """
- wait = module.params.get('wait')
- if wait:
- # Requests.WaitUntilComplete() returns the count of failed requests
- failed_requests_count = sum(
- [request.WaitUntilComplete() for request in request_list])
-
- if failed_requests_count > 0:
- module.fail_json(
- msg='Unable to process server request')
-
- @staticmethod
- def _refresh_servers(module, servers):
- """
- Loop through a list of servers and refresh them.
- :param module: the AnsibleModule object
- :param servers: list of clc-sdk.Server instances to refresh
- :return: none
- """
- for server in servers:
- try:
- server.Refresh()
- except CLCException as ex:
- module.fail_json(msg='Unable to refresh the server {0}. {1}'.format(
- server.id, ex.message
- ))
-
- @staticmethod
- def _add_public_ip_to_servers(
- module,
- should_add_public_ip,
- servers,
- public_ip_protocol,
- public_ip_ports):
- """
- Create a public IP for servers
- :param module: the AnsibleModule object
- :param should_add_public_ip: boolean - whether or not to provision a public ip for servers. Skipped if False
- :param servers: List of servers to add public ips to
- :param public_ip_protocol: a protocol to allow for the public ips
- :param public_ip_ports: list of ports to allow for the public ips
- :return: none
- """
- failed_servers = []
- if not should_add_public_ip:
- return failed_servers
-
- ports_lst = []
- request_list = []
- server = None
-
- for port in public_ip_ports:
- ports_lst.append(
- {'protocol': public_ip_protocol, 'port': port})
- try:
- if not module.check_mode:
- for server in servers:
- request = server.PublicIPs().Add(ports_lst)
- request_list.append(request)
- except APIFailedResponse:
- failed_servers.append(server)
- ClcServer._wait_for_requests(module, request_list)
- return failed_servers
-
- @staticmethod
- def _add_alert_policy_to_servers(clc, module, servers):
- """
- Associate the alert policy to servers
- :param clc: the clc-sdk instance to use
- :param module: the AnsibleModule object
- :param servers: List of servers to add alert policy to
- :return: failed_servers: the list of servers which failed while associating alert policy
- """
- failed_servers = []
- p = module.params
- alert_policy_id = p.get('alert_policy_id')
- alias = p.get('alias')
-
- if alert_policy_id and not module.check_mode:
- for server in servers:
- try:
- ClcServer._add_alert_policy_to_server(
- clc=clc,
- alias=alias,
- server_id=server.id,
- alert_policy_id=alert_policy_id)
- except CLCException:
- failed_servers.append(server)
- return failed_servers
-
- @staticmethod
- def _add_alert_policy_to_server(
- clc, alias, server_id, alert_policy_id):
- """
- Associate an alert policy to a clc server
- :param clc: the clc-sdk instance to use
- :param alias: the clc account alias
- :param server_id: The clc server id
- :param alert_policy_id: the alert policy id to be associated to the server
- :return: none
- """
- try:
- clc.v2.API.Call(
- method='POST',
- url='servers/%s/%s/alertPolicies' % (alias, server_id),
- payload=json.dumps(
- {
- 'id': alert_policy_id
- }))
- except APIFailedResponse as e:
- raise CLCException(
- 'Failed to associate alert policy to the server : {0} with Error {1}'.format(
- server_id, str(e.response_text)))
-
- @staticmethod
- def _get_alert_policy_id_by_name(clc, module, alias, alert_policy_name):
- """
- Returns the alert policy id for the given alert policy name
- :param clc: the clc-sdk instance to use
- :param module: the AnsibleModule object
- :param alias: the clc account alias
- :param alert_policy_name: the name of the alert policy
- :return: alert_policy_id: the alert policy id
- """
- alert_policy_id = None
- policies = clc.v2.API.Call('GET', '/v2/alertPolicies/%s' % alias)
- if not policies:
- return alert_policy_id
- for policy in policies.get('items'):
- if policy.get('name') == alert_policy_name:
- if not alert_policy_id:
- alert_policy_id = policy.get('id')
- else:
- return module.fail_json(
- msg='multiple alert policies were found with policy name : %s' % alert_policy_name)
- return alert_policy_id
-
- @staticmethod
- def _delete_servers(module, clc, server_ids):
- """
- Delete the servers on the provided list
- :param module: the AnsibleModule object
- :param clc: the clc-sdk instance to use
- :param server_ids: list of servers to delete
- :return: a list of dictionaries with server information about the servers that were deleted
- """
- terminated_server_ids = []
- server_dict_array = []
- request_list = []
-
- if not isinstance(server_ids, list) or len(server_ids) < 1:
- return module.fail_json(
- msg='server_ids should be a list of servers, aborting')
-
- servers = clc.v2.Servers(server_ids).Servers()
- for server in servers:
- if not module.check_mode:
- request_list.append(server.Delete())
- ClcServer._wait_for_requests(module, request_list)
-
- for server in servers:
- terminated_server_ids.append(server.id)
-
- return True, server_dict_array, terminated_server_ids
-
- @staticmethod
- def _start_stop_servers(module, clc, server_ids):
- """
- Start or Stop the servers on the provided list
- :param module: the AnsibleModule object
- :param clc: the clc-sdk instance to use
- :param server_ids: list of servers to start or stop
- :return: a list of dictionaries with server information about the servers that were started or stopped
- """
- p = module.params
- state = p.get('state')
- changed = False
- changed_servers = []
- server_dict_array = []
- result_server_ids = []
- request_list = []
-
- if not isinstance(server_ids, list) or len(server_ids) < 1:
- return module.fail_json(
- msg='server_ids should be a list of servers, aborting')
-
- servers = clc.v2.Servers(server_ids).Servers()
- for server in servers:
- if server.powerState != state:
- changed_servers.append(server)
- if not module.check_mode:
- request_list.append(
- ClcServer._change_server_power_state(
- module,
- server,
- state))
- changed = True
-
- ClcServer._wait_for_requests(module, request_list)
- ClcServer._refresh_servers(module, changed_servers)
-
- for server in set(changed_servers + servers):
- try:
- server.data['ipaddress'] = server.details[
- 'ipAddresses'][0]['internal']
- server.data['publicip'] = str(
- server.PublicIPs().public_ips[0])
- except (KeyError, IndexError):
- pass
-
- server_dict_array.append(server.data)
- result_server_ids.append(server.id)
-
- return changed, server_dict_array, result_server_ids
-
- @staticmethod
- def _change_server_power_state(module, server, state):
- """
- Change the server powerState
- :param module: the module to check for intended state
- :param server: the server to start or stop
- :param state: the intended powerState for the server
- :return: the request object from clc-sdk call
- """
- result = None
- try:
- if state == 'started':
- result = server.PowerOn()
- else:
- # Try to shut down the server and fall back to power off when unable to shut down.
- result = server.ShutDown()
- if result and hasattr(result, 'requests') and result.requests[0]:
- return result
- else:
- result = server.PowerOff()
- except CLCException:
- module.fail_json(
- msg='Unable to change power state for server {0}'.format(
- server.id))
- return result
-
- @staticmethod
- def _find_running_servers_by_group(module, datacenter, count_group):
- """
- Find a list of running servers in the provided group
- :param module: the AnsibleModule object
- :param datacenter: the clc-sdk.Datacenter instance to use to lookup the group
- :param count_group: the group to count the servers
- :return: list of servers, and list of running servers
- """
- group = ClcServer._find_group(
- module=module,
- datacenter=datacenter,
- lookup_group=count_group)
-
- servers = group.Servers().Servers()
- running_servers = []
-
- for server in servers:
- if server.status == 'active' and server.powerState == 'started':
- running_servers.append(server)
-
- return servers, running_servers
-
- @staticmethod
- def _find_group(module, datacenter, lookup_group=None):
- """
- Find a server group in a datacenter by calling the CLC API
- :param module: the AnsibleModule instance
- :param datacenter: clc-sdk.Datacenter instance to search for the group
- :param lookup_group: string name of the group to search for
- :return: clc-sdk.Group instance
- """
- if not lookup_group:
- lookup_group = module.params.get('group')
- try:
- return datacenter.Groups().Get(lookup_group)
- except CLCException:
- pass
-
- # The search above only acts on the main
- result = ClcServer._find_group_recursive(
- module,
- datacenter.Groups(),
- lookup_group)
-
- if result is None:
- module.fail_json(
- msg=str(
- "Unable to find group: " +
- lookup_group +
- " in location: " +
- datacenter.id))
-
- return result
-
- @staticmethod
- def _find_group_recursive(module, group_list, lookup_group):
- """
- Find a server group by recursively walking the tree
- :param module: the AnsibleModule instance to use
- :param group_list: a list of groups to search
- :param lookup_group: the group to look for
- :return: list of groups
- """
- result = None
- for group in group_list.groups:
- subgroups = group.Subgroups()
- try:
- return subgroups.Get(lookup_group)
- except CLCException:
- result = ClcServer._find_group_recursive(
- module,
- subgroups,
- lookup_group)
-
- if result is not None:
- break
-
- return result
-
- @staticmethod
- def _create_clc_server(
- clc,
- module,
- server_params):
- """
- Call the CLC Rest API to Create a Server
- :param clc: the clc-python-sdk instance to use
- :param module: the AnsibleModule instance to use
- :param server_params: a dictionary of params to use to create the servers
- :return: clc-sdk.Request object linked to the queued server request
- """
-
- try:
- res = clc.v2.API.Call(
- method='POST',
- url='servers/%s' %
- (server_params.get('alias')),
- payload=json.dumps(
- {
- 'name': server_params.get('name'),
- 'description': server_params.get('description'),
- 'groupId': server_params.get('group_id'),
- 'sourceServerId': server_params.get('template'),
- 'isManagedOS': server_params.get('managed_os'),
- 'primaryDNS': server_params.get('primary_dns'),
- 'secondaryDNS': server_params.get('secondary_dns'),
- 'networkId': server_params.get('network_id'),
- 'ipAddress': server_params.get('ip_address'),
- 'password': server_params.get('password'),
- 'sourceServerPassword': server_params.get('source_server_password'),
- 'cpu': server_params.get('cpu'),
- 'cpuAutoscalePolicyId': server_params.get('cpu_autoscale_policy_id'),
- 'memoryGB': server_params.get('memory'),
- 'type': server_params.get('type'),
- 'storageType': server_params.get('storage_type'),
- 'antiAffinityPolicyId': server_params.get('anti_affinity_policy_id'),
- 'customFields': server_params.get('custom_fields'),
- 'additionalDisks': server_params.get('additional_disks'),
- 'ttl': server_params.get('ttl'),
- 'packages': server_params.get('packages'),
- 'configurationId': server_params.get('configuration_id'),
- 'osType': server_params.get('os_type')}))
-
- result = clc.v2.Requests(res)
- except APIFailedResponse as ex:
- return module.fail_json(msg='Unable to create the server: {0}. {1}'.format(
- server_params.get('name'),
- ex.response_text
- ))
-
- #
- # Patch the Request object so that it returns a valid server
-
- # Find the server's UUID from the API response
- server_uuid = [obj['id']
- for obj in res['links'] if obj['rel'] == 'self'][0]
-
- # Change the request server method to a _find_server_by_uuid closure so
- # that it will work
- result.requests[0].Server = lambda: ClcServer._find_server_by_uuid_w_retry(
- clc,
- module,
- server_uuid,
- server_params.get('alias'))
-
- return result
-
- @staticmethod
- def _get_anti_affinity_policy_id(clc, module, alias, aa_policy_name):
- """
- retrieves the anti affinity policy id of the server based on the name of the policy
- :param clc: the clc-sdk instance to use
- :param module: the AnsibleModule object
- :param alias: the CLC account alias
- :param aa_policy_name: the anti affinity policy name
- :return: aa_policy_id: The anti affinity policy id
- """
- aa_policy_id = None
- try:
- aa_policies = clc.v2.API.Call(method='GET',
- url='antiAffinityPolicies/%s' % alias)
- except APIFailedResponse as ex:
- return module.fail_json(msg='Unable to fetch anti affinity policies for account: {0}. {1}'.format(
- alias, ex.response_text))
- for aa_policy in aa_policies.get('items'):
- if aa_policy.get('name') == aa_policy_name:
- if not aa_policy_id:
- aa_policy_id = aa_policy.get('id')
- else:
- return module.fail_json(
- msg='multiple anti affinity policies were found with policy name : %s' % aa_policy_name)
- return aa_policy_id
-
- #
- # This is the function that gets patched to the Request.server object using a lamda closure
- #
-
- @staticmethod
- def _find_server_by_uuid_w_retry(
- clc, module, svr_uuid, alias=None, retries=5, back_out=2):
- """
- Find the clc server by the UUID returned from the provisioning request. Retry the request if a 404 is returned.
- :param clc: the clc-sdk instance to use
- :param module: the AnsibleModule object
- :param svr_uuid: UUID of the server
- :param retries: the number of retry attempts to make prior to fail. default is 5
- :param alias: the Account Alias to search
- :return: a clc-sdk.Server instance
- """
- if not alias:
- alias = clc.v2.Account.GetAlias()
-
- # Wait and retry if the api returns a 404
- while True:
- retries -= 1
- try:
- server_obj = clc.v2.API.Call(
- method='GET', url='servers/%s/%s?uuid=true' %
- (alias, svr_uuid))
- server_id = server_obj['id']
- server = clc.v2.Server(
- id=server_id,
- alias=alias,
- server_obj=server_obj)
- return server
-
- except APIFailedResponse as e:
- if e.response_status_code != 404:
- return module.fail_json(
- msg='A failure response was received from CLC API when '
- 'attempting to get details for a server: UUID=%s, Code=%i, Message=%s' %
- (svr_uuid, e.response_status_code, e.message))
- if retries == 0:
- return module.fail_json(
- msg='Unable to reach the CLC API after 5 attempts')
- time.sleep(back_out)
- back_out *= 2
-
- @staticmethod
- def _set_user_agent(clc):
- if hasattr(clc, 'SetRequestsSession'):
- agent_string = "ClcAnsibleModule/" + __version__
- ses = requests.Session()
- ses.headers.update({"Api-Client": agent_string})
- ses.headers['User-Agent'] += " " + agent_string
- clc.SetRequestsSession(ses)
-
-
-def main():
- """
- The main function. Instantiates the module and calls process_request.
- :return: none
- """
- argument_dict = ClcServer._define_module_argument_spec()
- module = AnsibleModule(supports_check_mode=True, **argument_dict)
- clc_server = ClcServer(module)
- clc_server.process_request()
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_server_snapshot.py b/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_server_snapshot.py
deleted file mode 100644
index 4de4c993..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/centurylink/clc_server_snapshot.py
+++ /dev/null
@@ -1,411 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright (c) 2015 CenturyLink
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
-module: clc_server_snapshot
-short_description: Create, Delete and Restore server snapshots in CenturyLink Cloud.
-description:
- - An Ansible module to Create, Delete and Restore server snapshots in CenturyLink Cloud.
-options:
- server_ids:
- description:
- - The list of CLC server Ids.
- type: list
- required: True
- elements: str
- expiration_days:
- description:
- - The number of days to keep the server snapshot before it expires.
- type: int
- default: 7
- required: False
- state:
- description:
- - The state to insure that the provided resources are in.
- type: str
- default: 'present'
- required: False
- choices: ['present', 'absent', 'restore']
- wait:
- description:
- - Whether to wait for the provisioning tasks to finish before returning.
- default: 'True'
- required: False
- type: str
-requirements:
- - python = 2.7
- - requests >= 2.5.0
- - clc-sdk
-author: "CLC Runner (@clc-runner)"
-notes:
- - To use this module, it is required to set the below environment variables which enables access to the
- Centurylink Cloud
- - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
- - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
- - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
- CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
- - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
- - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
- - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
-'''
-
-EXAMPLES = '''
-# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
-
-- name: Create server snapshot
- community.general.clc_server_snapshot:
- server_ids:
- - UC1TEST-SVR01
- - UC1TEST-SVR02
- expiration_days: 10
- wait: True
- state: present
-
-- name: Restore server snapshot
- community.general.clc_server_snapshot:
- server_ids:
- - UC1TEST-SVR01
- - UC1TEST-SVR02
- wait: True
- state: restore
-
-- name: Delete server snapshot
- community.general.clc_server_snapshot:
- server_ids:
- - UC1TEST-SVR01
- - UC1TEST-SVR02
- wait: True
- state: absent
-'''
-
-RETURN = '''
-server_ids:
- description: The list of server ids that are changed
- returned: success
- type: list
- sample:
- [
- "UC1TEST-SVR01",
- "UC1TEST-SVR02"
- ]
-'''
-
-__version__ = '${version}'
-
-import os
-import traceback
-
-from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
-
-REQUESTS_IMP_ERR = None
-try:
- import requests
-except ImportError:
- REQUESTS_IMP_ERR = traceback.format_exc()
- REQUESTS_FOUND = False
-else:
- REQUESTS_FOUND = True
-
-#
-# Requires the clc-python-sdk.
-# sudo pip install clc-sdk
-#
-CLC_IMP_ERR = None
-try:
- import clc as clc_sdk
- from clc import CLCException
-except ImportError:
- CLC_IMP_ERR = traceback.format_exc()
- CLC_FOUND = False
- clc_sdk = None
-else:
- CLC_FOUND = True
-
-from ansible.module_utils.basic import AnsibleModule, missing_required_lib
-
-
-class ClcSnapshot:
-
- clc = clc_sdk
- module = None
-
- def __init__(self, module):
- """
- Construct module
- """
- self.module = module
-
- if not CLC_FOUND:
- self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
- if not REQUESTS_FOUND:
- self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
- if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
- self.module.fail_json(
- msg='requests library version should be >= 2.5.0')
-
- self._set_user_agent(self.clc)
-
- def process_request(self):
- """
- Process the request - Main Code Path
- :return: Returns with either an exit_json or fail_json
- """
- p = self.module.params
- server_ids = p['server_ids']
- expiration_days = p['expiration_days']
- state = p['state']
- request_list = []
- changed = False
- changed_servers = []
-
- self._set_clc_credentials_from_env()
- if state == 'present':
- changed, request_list, changed_servers = self.ensure_server_snapshot_present(
- server_ids=server_ids,
- expiration_days=expiration_days)
- elif state == 'absent':
- changed, request_list, changed_servers = self.ensure_server_snapshot_absent(
- server_ids=server_ids)
- elif state == 'restore':
- changed, request_list, changed_servers = self.ensure_server_snapshot_restore(
- server_ids=server_ids)
-
- self._wait_for_requests_to_complete(request_list)
- return self.module.exit_json(
- changed=changed,
- server_ids=changed_servers)
-
- def ensure_server_snapshot_present(self, server_ids, expiration_days):
- """
- Ensures the given set of server_ids have the snapshots created
- :param server_ids: The list of server_ids to create the snapshot
- :param expiration_days: The number of days to keep the snapshot
- :return: (changed, request_list, changed_servers)
- changed: A flag indicating whether any change was made
- request_list: the list of clc request objects from CLC API call
- changed_servers: The list of servers ids that are modified
- """
- request_list = []
- changed = False
- servers = self._get_servers_from_clc(
- server_ids,
- 'Failed to obtain server list from the CLC API')
- servers_to_change = [
- server for server in servers if len(
- server.GetSnapshots()) == 0]
- for server in servers_to_change:
- changed = True
- if not self.module.check_mode:
- request = self._create_server_snapshot(server, expiration_days)
- request_list.append(request)
- changed_servers = [
- server.id for server in servers_to_change if server.id]
- return changed, request_list, changed_servers
-
- def _create_server_snapshot(self, server, expiration_days):
- """
- Create the snapshot for the CLC server
- :param server: the CLC server object
- :param expiration_days: The number of days to keep the snapshot
- :return: the create request object from CLC API Call
- """
- result = None
- try:
- result = server.CreateSnapshot(
- delete_existing=True,
- expiration_days=expiration_days)
- except CLCException as ex:
- self.module.fail_json(msg='Failed to create snapshot for server : {0}. {1}'.format(
- server.id, ex.response_text
- ))
- return result
-
- def ensure_server_snapshot_absent(self, server_ids):
- """
- Ensures the given set of server_ids have the snapshots removed
- :param server_ids: The list of server_ids to delete the snapshot
- :return: (changed, request_list, changed_servers)
- changed: A flag indicating whether any change was made
- request_list: the list of clc request objects from CLC API call
- changed_servers: The list of servers ids that are modified
- """
- request_list = []
- changed = False
- servers = self._get_servers_from_clc(
- server_ids,
- 'Failed to obtain server list from the CLC API')
- servers_to_change = [
- server for server in servers if len(
- server.GetSnapshots()) > 0]
- for server in servers_to_change:
- changed = True
- if not self.module.check_mode:
- request = self._delete_server_snapshot(server)
- request_list.append(request)
- changed_servers = [
- server.id for server in servers_to_change if server.id]
- return changed, request_list, changed_servers
-
- def _delete_server_snapshot(self, server):
- """
- Delete snapshot for the CLC server
- :param server: the CLC server object
- :return: the delete snapshot request object from CLC API
- """
- result = None
- try:
- result = server.DeleteSnapshot()
- except CLCException as ex:
- self.module.fail_json(msg='Failed to delete snapshot for server : {0}. {1}'.format(
- server.id, ex.response_text
- ))
- return result
-
- def ensure_server_snapshot_restore(self, server_ids):
- """
- Ensures the given set of server_ids have the snapshots restored
- :param server_ids: The list of server_ids to delete the snapshot
- :return: (changed, request_list, changed_servers)
- changed: A flag indicating whether any change was made
- request_list: the list of clc request objects from CLC API call
- changed_servers: The list of servers ids that are modified
- """
- request_list = []
- changed = False
- servers = self._get_servers_from_clc(
- server_ids,
- 'Failed to obtain server list from the CLC API')
- servers_to_change = [
- server for server in servers if len(
- server.GetSnapshots()) > 0]
- for server in servers_to_change:
- changed = True
- if not self.module.check_mode:
- request = self._restore_server_snapshot(server)
- request_list.append(request)
- changed_servers = [
- server.id for server in servers_to_change if server.id]
- return changed, request_list, changed_servers
-
- def _restore_server_snapshot(self, server):
- """
- Restore snapshot for the CLC server
- :param server: the CLC server object
- :return: the restore snapshot request object from CLC API
- """
- result = None
- try:
- result = server.RestoreSnapshot()
- except CLCException as ex:
- self.module.fail_json(msg='Failed to restore snapshot for server : {0}. {1}'.format(
- server.id, ex.response_text
- ))
- return result
-
- def _wait_for_requests_to_complete(self, requests_lst):
- """
- Waits until the CLC requests are complete if the wait argument is True
- :param requests_lst: The list of CLC request objects
- :return: none
- """
- if not self.module.params['wait']:
- return
- for request in requests_lst:
- request.WaitUntilComplete()
- for request_details in request.requests:
- if request_details.Status() != 'succeeded':
- self.module.fail_json(
- msg='Unable to process server snapshot request')
-
- @staticmethod
- def define_argument_spec():
- """
- This function defines the dictionary object required for
- package module
- :return: the package dictionary object
- """
- argument_spec = dict(
- server_ids=dict(type='list', required=True, elements='str'),
- expiration_days=dict(default=7, type='int'),
- wait=dict(default=True),
- state=dict(
- default='present',
- choices=[
- 'present',
- 'absent',
- 'restore']),
- )
- return argument_spec
-
- def _get_servers_from_clc(self, server_list, message):
- """
- Internal function to fetch list of CLC server objects from a list of server ids
- :param server_list: The list of server ids
- :param message: The error message to throw in case of any error
- :return the list of CLC server objects
- """
- try:
- return self.clc.v2.Servers(server_list).servers
- except CLCException as ex:
- return self.module.fail_json(msg=message + ': %s' % ex)
-
- def _set_clc_credentials_from_env(self):
- """
- Set the CLC Credentials on the sdk by reading environment variables
- :return: none
- """
- env = os.environ
- v2_api_token = env.get('CLC_V2_API_TOKEN', False)
- v2_api_username = env.get('CLC_V2_API_USERNAME', False)
- v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
- clc_alias = env.get('CLC_ACCT_ALIAS', False)
- api_url = env.get('CLC_V2_API_URL', False)
-
- if api_url:
- self.clc.defaults.ENDPOINT_URL_V2 = api_url
-
- if v2_api_token and clc_alias:
- self.clc._LOGIN_TOKEN_V2 = v2_api_token
- self.clc._V2_ENABLED = True
- self.clc.ALIAS = clc_alias
- elif v2_api_username and v2_api_passwd:
- self.clc.v2.SetCredentials(
- api_username=v2_api_username,
- api_passwd=v2_api_passwd)
- else:
- return self.module.fail_json(
- msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
- "environment variables")
-
- @staticmethod
- def _set_user_agent(clc):
- if hasattr(clc, 'SetRequestsSession'):
- agent_string = "ClcAnsibleModule/" + __version__
- ses = requests.Session()
- ses.headers.update({"Api-Client": agent_string})
- ses.headers['User-Agent'] += " " + agent_string
- clc.SetRequestsSession(ses)
-
-
-def main():
- """
- Main function
- :return: None
- """
- module = AnsibleModule(
- argument_spec=ClcSnapshot.define_argument_spec(),
- supports_check_mode=True
- )
- clc_snapshot = ClcSnapshot(module)
- clc_snapshot.process_request()
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/dimensiondata/dimensiondata_network.py b/ansible_collections/community/general/plugins/modules/cloud/dimensiondata/dimensiondata_network.py
deleted file mode 100644
index 64cc8b11..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/dimensiondata/dimensiondata_network.py
+++ /dev/null
@@ -1,296 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright (c) 2016 Dimension Data
-# Authors:
-# - Aimon Bustardo
-# - Bert Diwa
-# - Adam Friedman
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: dimensiondata_network
-short_description: Create, update, and delete MCP 1.0 & 2.0 networks
-extends_documentation_fragment:
-- community.general.dimensiondata
-- community.general.dimensiondata_wait
-
-description:
- - Create, update, and delete MCP 1.0 & 2.0 networks
-author: 'Aimon Bustardo (@aimonb)'
-options:
- name:
- description:
- - The name of the network domain to create.
- required: true
- type: str
- description:
- description:
- - Additional description of the network domain.
- required: false
- type: str
- service_plan:
- description:
- - The service plan, either "ESSENTIALS" or "ADVANCED".
- - MCP 2.0 Only.
- choices: [ESSENTIALS, ADVANCED]
- default: ESSENTIALS
- type: str
- state:
- description:
- - Should the resource be present or absent.
- choices: [present, absent]
- default: present
- type: str
-'''
-
-EXAMPLES = '''
-- name: Create an MCP 1.0 network
- community.general.dimensiondata_network:
- region: na
- location: NA5
- name: mynet
-
-- name: Create an MCP 2.0 network
- community.general.dimensiondata_network:
- region: na
- mcp_user: my_user
- mcp_password: my_password
- location: NA9
- name: mynet
- service_plan: ADVANCED
-
-- name: Delete a network
- community.general.dimensiondata_network:
- region: na
- location: NA1
- name: mynet
- state: absent
-'''
-
-RETURN = '''
-network:
- description: Dictionary describing the network.
- returned: On success when I(state=present).
- type: complex
- contains:
- id:
- description: Network ID.
- type: str
- sample: "8c787000-a000-4050-a215-280893411a7d"
- name:
- description: Network name.
- type: str
- sample: "My network"
- description:
- description: Network description.
- type: str
- sample: "My network description"
- location:
- description: Datacenter location.
- type: str
- sample: NA3
- status:
- description: Network status. (MCP 2.0 only)
- type: str
- sample: NORMAL
- private_net:
- description: Private network subnet. (MCP 1.0 only)
- type: str
- sample: "10.2.3.0"
- multicast:
- description: Multicast enabled? (MCP 1.0 only)
- type: bool
- sample: false
-'''
-import traceback
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.dimensiondata import HAS_LIBCLOUD, DimensionDataModule
-from ansible.module_utils.common.text.converters import to_native
-
-if HAS_LIBCLOUD:
- from libcloud.compute.base import NodeLocation
- from libcloud.common.dimensiondata import DimensionDataAPIException
-
-
-class DimensionDataNetworkModule(DimensionDataModule):
- """
- The dimensiondata_network module for Ansible.
- """
-
- def __init__(self):
- """
- Create a new Dimension Data network module.
- """
-
- super(DimensionDataNetworkModule, self).__init__(
- module=AnsibleModule(
- argument_spec=DimensionDataModule.argument_spec_with_wait(
- name=dict(type='str', required=True),
- description=dict(type='str', required=False),
- service_plan=dict(default='ESSENTIALS', choices=['ADVANCED', 'ESSENTIALS']),
- state=dict(default='present', choices=['present', 'absent'])
- ),
- required_together=DimensionDataModule.required_together()
- )
- )
-
- self.name = self.module.params['name']
- self.description = self.module.params['description']
- self.service_plan = self.module.params['service_plan']
- self.state = self.module.params['state']
-
- def state_present(self):
- network = self._get_network()
-
- if network:
- self.module.exit_json(
- changed=False,
- msg='Network already exists',
- network=self._network_to_dict(network)
- )
-
- network = self._create_network()
-
- self.module.exit_json(
- changed=True,
- msg='Created network "%s" in datacenter "%s".' % (self.name, self.location),
- network=self._network_to_dict(network)
- )
-
- def state_absent(self):
- network = self._get_network()
-
- if not network:
- self.module.exit_json(
- changed=False,
- msg='Network "%s" does not exist' % self.name,
- network=self._network_to_dict(network)
- )
-
- self._delete_network(network)
-
- def _get_network(self):
- if self.mcp_version == '1.0':
- networks = self.driver.list_networks(location=self.location)
- else:
- networks = self.driver.ex_list_network_domains(location=self.location)
-
- matched_network = [network for network in networks if network.name == self.name]
- if matched_network:
- return matched_network[0]
-
- return None
-
- def _network_to_dict(self, network):
- network_dict = dict(
- id=network.id,
- name=network.name,
- description=network.description
- )
-
- if isinstance(network.location, NodeLocation):
- network_dict['location'] = network.location.id
- else:
- network_dict['location'] = network.location
-
- if self.mcp_version == '1.0':
- network_dict['private_net'] = network.private_net
- network_dict['multicast'] = network.multicast
- network_dict['status'] = None
- else:
- network_dict['private_net'] = None
- network_dict['multicast'] = None
- network_dict['status'] = network.status
-
- return network_dict
-
- def _create_network(self):
-
- # Make sure service_plan argument is defined
- if self.mcp_version == '2.0' and 'service_plan' not in self.module.params:
- self.module.fail_json(
- msg='service_plan required when creating network and location is MCP 2.0'
- )
-
- # Create network
- try:
- if self.mcp_version == '1.0':
- network = self.driver.ex_create_network(
- self.location,
- self.name,
- description=self.description
- )
- else:
- network = self.driver.ex_create_network_domain(
- self.location,
- self.name,
- self.module.params['service_plan'],
- description=self.description
- )
- except DimensionDataAPIException as e:
-
- self.module.fail_json(
- msg="Failed to create new network: %s" % to_native(e), exception=traceback.format_exc()
- )
-
- if self.module.params['wait'] is True:
- network = self._wait_for_network_state(network.id, 'NORMAL')
-
- return network
-
- def _delete_network(self, network):
- try:
- if self.mcp_version == '1.0':
- deleted = self.driver.ex_delete_network(network)
- else:
- deleted = self.driver.ex_delete_network_domain(network)
-
- if deleted:
- self.module.exit_json(
- changed=True,
- msg="Deleted network with id %s" % network.id
- )
-
- self.module.fail_json(
- "Unexpected failure deleting network with id %s" % network.id
- )
-
- except DimensionDataAPIException as e:
- self.module.fail_json(
- msg="Failed to delete network: %s" % to_native(e), exception=traceback.format_exc()
- )
-
- def _wait_for_network_state(self, net_id, state_to_wait_for):
- try:
- return self.driver.connection.wait_for_state(
- state_to_wait_for,
- self.driver.ex_get_network_domain,
- self.module.params['wait_poll_interval'],
- self.module.params['wait_time'],
- net_id
- )
- except DimensionDataAPIException as e:
- self.module.fail_json(
- msg='Network did not reach % state in time: %s' % (state_to_wait_for, to_native(e)),
- exception=traceback.format_exc()
- )
-
-
-def main():
- module = DimensionDataNetworkModule()
- if module.state == 'present':
- module.state_present()
- elif module.state == 'absent':
- module.state_absent()
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/dimensiondata/dimensiondata_vlan.py b/ansible_collections/community/general/plugins/modules/cloud/dimensiondata/dimensiondata_vlan.py
deleted file mode 100644
index 26c621f4..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/dimensiondata/dimensiondata_vlan.py
+++ /dev/null
@@ -1,568 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright (c) 2016 Dimension Data
-#
-# This module is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This software is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this software. If not, see .
-#
-# Authors:
-# - Adam Friedman
-#
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-DOCUMENTATION = '''
----
-module: dimensiondata_vlan
-short_description: Manage a VLAN in a Cloud Control network domain.
-extends_documentation_fragment:
-- community.general.dimensiondata
-- community.general.dimensiondata_wait
-
-description:
- - Manage VLANs in Cloud Control network domains.
-author: 'Adam Friedman (@tintoy)'
-options:
- name:
- description:
- - The name of the target VLAN.
- type: str
- required: true
- description:
- description:
- - A description of the VLAN.
- type: str
- network_domain:
- description:
- - The Id or name of the target network domain.
- required: true
- type: str
- private_ipv4_base_address:
- description:
- - The base address for the VLAN's IPv4 network (e.g. 192.168.1.0).
- type: str
- private_ipv4_prefix_size:
- description:
- - The size of the IPv4 address space, e.g 24.
- - Required, if C(private_ipv4_base_address) is specified.
- type: int
- state:
- description:
- - The desired state for the target VLAN.
- - C(readonly) ensures that the state is only ever read, not modified (the module will fail if the resource does not exist).
- choices: [present, absent, readonly]
- default: present
- type: str
- allow_expand:
- description:
- - Permit expansion of the target VLAN's network if the module parameters specify a larger network than the VLAN currently possesses.
- - If C(False), the module will fail under these conditions.
- - This is intended to prevent accidental expansion of a VLAN's network (since this operation is not reversible).
- type: bool
- default: 'no'
-'''
-
-EXAMPLES = '''
-- name: Add or update VLAN
- community.general.dimensiondata_vlan:
- region: na
- location: NA5
- network_domain: test_network
- name: my_vlan1
- description: A test VLAN
- private_ipv4_base_address: 192.168.23.0
- private_ipv4_prefix_size: 24
- state: present
- wait: yes
-
-- name: Read / get VLAN details
- community.general.dimensiondata_vlan:
- region: na
- location: NA5
- network_domain: test_network
- name: my_vlan1
- state: readonly
- wait: yes
-
-- name: Delete a VLAN
- community.general.dimensiondata_vlan:
- region: na
- location: NA5
- network_domain: test_network
- name: my_vlan_1
- state: absent
- wait: yes
-'''
-
-RETURN = '''
-vlan:
- description: Dictionary describing the VLAN.
- returned: On success when I(state) is 'present'
- type: complex
- contains:
- id:
- description: VLAN ID.
- type: str
- sample: "aaaaa000-a000-4050-a215-2808934ccccc"
- name:
- description: VLAN name.
- type: str
- sample: "My VLAN"
- description:
- description: VLAN description.
- type: str
- sample: "My VLAN description"
- location:
- description: Datacenter location.
- type: str
- sample: NA3
- private_ipv4_base_address:
- description: The base address for the VLAN's private IPV4 network.
- type: str
- sample: 192.168.23.0
- private_ipv4_prefix_size:
- description: The prefix size for the VLAN's private IPV4 network.
- type: int
- sample: 24
- private_ipv4_gateway_address:
- description: The gateway address for the VLAN's private IPV4 network.
- type: str
- sample: 192.168.23.1
- private_ipv6_base_address:
- description: The base address for the VLAN's IPV6 network.
- type: str
- sample: 2402:9900:111:1195:0:0:0:0
- private_ipv6_prefix_size:
- description: The prefix size for the VLAN's IPV6 network.
- type: int
- sample: 64
- private_ipv6_gateway_address:
- description: The gateway address for the VLAN's IPV6 network.
- type: str
- sample: 2402:9900:111:1195:0:0:0:1
- status:
- description: VLAN status.
- type: str
- sample: NORMAL
-'''
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.dimensiondata import DimensionDataModule, UnknownNetworkError
-
-try:
- from libcloud.common.dimensiondata import DimensionDataVlan, DimensionDataAPIException
-
- HAS_LIBCLOUD = True
-
-except ImportError:
- DimensionDataVlan = None
-
- HAS_LIBCLOUD = False
-
-
-class DimensionDataVlanModule(DimensionDataModule):
- """
- The dimensiondata_vlan module for Ansible.
- """
-
- def __init__(self):
- """
- Create a new Dimension Data VLAN module.
- """
-
- super(DimensionDataVlanModule, self).__init__(
- module=AnsibleModule(
- argument_spec=DimensionDataModule.argument_spec_with_wait(
- name=dict(required=True, type='str'),
- description=dict(default='', type='str'),
- network_domain=dict(required=True, type='str'),
- private_ipv4_base_address=dict(default='', type='str'),
- private_ipv4_prefix_size=dict(default=0, type='int'),
- allow_expand=dict(required=False, default=False, type='bool'),
- state=dict(default='present', choices=['present', 'absent', 'readonly'])
- ),
- required_together=DimensionDataModule.required_together()
- )
- )
-
- self.name = self.module.params['name']
- self.description = self.module.params['description']
- self.network_domain_selector = self.module.params['network_domain']
- self.private_ipv4_base_address = self.module.params['private_ipv4_base_address']
- self.private_ipv4_prefix_size = self.module.params['private_ipv4_prefix_size']
- self.state = self.module.params['state']
- self.allow_expand = self.module.params['allow_expand']
-
- if self.wait and self.state != 'present':
- self.module.fail_json(
- msg='The wait parameter is only supported when state is "present".'
- )
-
- def state_present(self):
- """
- Ensure that the target VLAN is present.
- """
-
- network_domain = self._get_network_domain()
-
- vlan = self._get_vlan(network_domain)
- if not vlan:
- if self.module.check_mode:
- self.module.exit_json(
- msg='VLAN "{0}" is absent from network domain "{1}" (should be present).'.format(
- self.name, self.network_domain_selector
- ),
- changed=True
- )
-
- vlan = self._create_vlan(network_domain)
- self.module.exit_json(
- msg='Created VLAN "{0}" in network domain "{1}".'.format(
- self.name, self.network_domain_selector
- ),
- vlan=vlan_to_dict(vlan),
- changed=True
- )
- else:
- diff = VlanDiff(vlan, self.module.params)
- if not diff.has_changes():
- self.module.exit_json(
- msg='VLAN "{0}" is present in network domain "{1}" (no changes detected).'.format(
- self.name, self.network_domain_selector
- ),
- vlan=vlan_to_dict(vlan),
- changed=False
- )
-
- return
-
- try:
- diff.ensure_legal_change()
- except InvalidVlanChangeError as invalid_vlan_change:
- self.module.fail_json(
- msg='Unable to update VLAN "{0}" in network domain "{1}": {2}'.format(
- self.name, self.network_domain_selector, invalid_vlan_change
- )
- )
-
- if diff.needs_expand() and not self.allow_expand:
- self.module.fail_json(
- msg='The configured private IPv4 network size ({0}-bit prefix) for '.format(
- self.private_ipv4_prefix_size
- ) + 'the VLAN differs from its current network size ({0}-bit prefix) '.format(
- vlan.private_ipv4_range_size
- ) + 'and needs to be expanded. Use allow_expand=true if this is what you want.'
- )
-
- if self.module.check_mode:
- self.module.exit_json(
- msg='VLAN "{0}" is present in network domain "{1}" (changes detected).'.format(
- self.name, self.network_domain_selector
- ),
- vlan=vlan_to_dict(vlan),
- changed=True
- )
-
- if diff.needs_edit():
- vlan.name = self.name
- vlan.description = self.description
-
- self.driver.ex_update_vlan(vlan)
-
- if diff.needs_expand():
- vlan.private_ipv4_range_size = self.private_ipv4_prefix_size
- self.driver.ex_expand_vlan(vlan)
-
- self.module.exit_json(
- msg='Updated VLAN "{0}" in network domain "{1}".'.format(
- self.name, self.network_domain_selector
- ),
- vlan=vlan_to_dict(vlan),
- changed=True
- )
-
- def state_readonly(self):
- """
- Read the target VLAN's state.
- """
-
- network_domain = self._get_network_domain()
-
- vlan = self._get_vlan(network_domain)
- if vlan:
- self.module.exit_json(
- vlan=vlan_to_dict(vlan),
- changed=False
- )
- else:
- self.module.fail_json(
- msg='VLAN "{0}" does not exist in network domain "{1}".'.format(
- self.name, self.network_domain_selector
- )
- )
-
- def state_absent(self):
- """
- Ensure that the target VLAN is not present.
- """
-
- network_domain = self._get_network_domain()
-
- vlan = self._get_vlan(network_domain)
- if not vlan:
- self.module.exit_json(
- msg='VLAN "{0}" is absent from network domain "{1}".'.format(
- self.name, self.network_domain_selector
- ),
- changed=False
- )
-
- return
-
- if self.module.check_mode:
- self.module.exit_json(
- msg='VLAN "{0}" is present in network domain "{1}" (should be absent).'.format(
- self.name, self.network_domain_selector
- ),
- vlan=vlan_to_dict(vlan),
- changed=True
- )
-
- self._delete_vlan(vlan)
-
- self.module.exit_json(
- msg='Deleted VLAN "{0}" from network domain "{1}".'.format(
- self.name, self.network_domain_selector
- ),
- changed=True
- )
-
- def _get_vlan(self, network_domain):
- """
- Retrieve the target VLAN details from CloudControl.
-
- :param network_domain: The target network domain.
- :return: The VLAN, or None if the target VLAN was not found.
- :rtype: DimensionDataVlan
- """
-
- vlans = self.driver.ex_list_vlans(
- location=self.location,
- network_domain=network_domain
- )
- matching_vlans = [vlan for vlan in vlans if vlan.name == self.name]
- if matching_vlans:
- return matching_vlans[0]
-
- return None
-
- def _create_vlan(self, network_domain):
- vlan = self.driver.ex_create_vlan(
- network_domain,
- self.name,
- self.private_ipv4_base_address,
- self.description,
- self.private_ipv4_prefix_size
- )
-
- if self.wait:
- vlan = self._wait_for_vlan_state(vlan.id, 'NORMAL')
-
- return vlan
-
- def _delete_vlan(self, vlan):
- try:
- self.driver.ex_delete_vlan(vlan)
-
- # Not currently supported for deletes due to a bug in libcloud (module will error out if "wait" is specified when "state" is not "present").
- if self.wait:
- self._wait_for_vlan_state(vlan, 'NOT_FOUND')
-
- except DimensionDataAPIException as api_exception:
- self.module.fail_json(
- msg='Failed to delete VLAN "{0}" due to unexpected error from the CloudControl API: {1}'.format(
- vlan.id, api_exception.msg
- )
- )
-
- def _wait_for_vlan_state(self, vlan, state_to_wait_for):
- network_domain = self._get_network_domain()
-
- wait_poll_interval = self.module.params['wait_poll_interval']
- wait_time = self.module.params['wait_time']
-
- # Bizarre bug in libcloud when checking status after delete; socket.error is too generic to catch in this context so for now we don't even try.
-
- try:
- return self.driver.connection.wait_for_state(
- state_to_wait_for,
- self.driver.ex_get_vlan,
- wait_poll_interval,
- wait_time,
- vlan
- )
-
- except DimensionDataAPIException as api_exception:
- if api_exception.code != 'RESOURCE_NOT_FOUND':
- raise
-
- return DimensionDataVlan(
- id=vlan.id,
- status='NOT_FOUND',
- name='',
- description='',
- private_ipv4_range_address='',
- private_ipv4_range_size=0,
- ipv4_gateway='',
- ipv6_range_address='',
- ipv6_range_size=0,
- ipv6_gateway='',
- location=self.location,
- network_domain=network_domain
- )
-
- def _get_network_domain(self):
- """
- Retrieve the target network domain from the Cloud Control API.
-
- :return: The network domain.
- """
-
- try:
- return self.get_network_domain(
- self.network_domain_selector, self.location
- )
- except UnknownNetworkError:
- self.module.fail_json(
- msg='Cannot find network domain "{0}" in datacenter "{1}".'.format(
- self.network_domain_selector, self.location
- )
- )
-
- return None
-
-
-class InvalidVlanChangeError(Exception):
- """
- Error raised when an illegal change to VLAN state is attempted.
- """
-
- pass
-
-
-class VlanDiff(object):
- """
- Represents differences between VLAN information (from CloudControl) and module parameters.
- """
-
- def __init__(self, vlan, module_params):
- """
-
- :param vlan: The VLAN information from CloudControl.
- :type vlan: DimensionDataVlan
- :param module_params: The module parameters.
- :type module_params: dict
- """
-
- self.vlan = vlan
- self.module_params = module_params
-
- self.name_changed = module_params['name'] != vlan.name
- self.description_changed = module_params['description'] != vlan.description
- self.private_ipv4_base_address_changed = module_params['private_ipv4_base_address'] != vlan.private_ipv4_range_address
- self.private_ipv4_prefix_size_changed = module_params['private_ipv4_prefix_size'] != vlan.private_ipv4_range_size
-
- # Is configured prefix size greater than or less than the actual prefix size?
- private_ipv4_prefix_size_difference = module_params['private_ipv4_prefix_size'] - vlan.private_ipv4_range_size
- self.private_ipv4_prefix_size_increased = private_ipv4_prefix_size_difference > 0
- self.private_ipv4_prefix_size_decreased = private_ipv4_prefix_size_difference < 0
-
- def has_changes(self):
- """
- Does the VlanDiff represent any changes between the VLAN and module configuration?
-
- :return: True, if there are change changes; otherwise, False.
- """
-
- return self.needs_edit() or self.needs_expand()
-
- def ensure_legal_change(self):
- """
- Ensure the change (if any) represented by the VlanDiff represents a legal change to VLAN state.
-
- - private_ipv4_base_address cannot be changed
- - private_ipv4_prefix_size must be greater than or equal to the VLAN's existing private_ipv4_range_size
-
- :raise InvalidVlanChangeError: The VlanDiff does not represent a legal change to VLAN state.
- """
-
- # Cannot change base address for private IPv4 network.
- if self.private_ipv4_base_address_changed:
- raise InvalidVlanChangeError('Cannot change the private IPV4 base address for an existing VLAN.')
-
- # Cannot shrink private IPv4 network (by increasing prefix size).
- if self.private_ipv4_prefix_size_increased:
- raise InvalidVlanChangeError('Cannot shrink the private IPV4 network for an existing VLAN (only expand is supported).')
-
- def needs_edit(self):
- """
- Is an Edit operation required to resolve the differences between the VLAN information and the module parameters?
-
- :return: True, if an Edit operation is required; otherwise, False.
- """
-
- return self.name_changed or self.description_changed
-
- def needs_expand(self):
- """
- Is an Expand operation required to resolve the differences between the VLAN information and the module parameters?
-
- The VLAN's network is expanded by reducing the size of its network prefix.
-
- :return: True, if an Expand operation is required; otherwise, False.
- """
-
- return self.private_ipv4_prefix_size_decreased
-
-
-def vlan_to_dict(vlan):
- return {
- 'id': vlan.id,
- 'name': vlan.name,
- 'description': vlan.description,
- 'location': vlan.location.id,
- 'private_ipv4_base_address': vlan.private_ipv4_range_address,
- 'private_ipv4_prefix_size': vlan.private_ipv4_range_size,
- 'private_ipv4_gateway_address': vlan.ipv4_gateway,
- 'ipv6_base_address': vlan.ipv6_range_address,
- 'ipv6_prefix_size': vlan.ipv6_range_size,
- 'ipv6_gateway_address': vlan.ipv6_gateway,
- 'status': vlan.status
- }
-
-
-def main():
- module = DimensionDataVlanModule()
-
- if module.state == 'present':
- module.state_present()
- elif module.state == 'readonly':
- module.state_readonly()
- elif module.state == 'absent':
- module.state_absent()
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/heroku/heroku_collaborator.py b/ansible_collections/community/general/plugins/modules/cloud/heroku/heroku_collaborator.py
deleted file mode 100644
index bbc34fdb..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/heroku/heroku_collaborator.py
+++ /dev/null
@@ -1,130 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2018, Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-DOCUMENTATION = '''
----
-module: heroku_collaborator
-short_description: "Add or delete app collaborators on Heroku"
-description:
- - Manages collaborators for Heroku apps.
- - If set to C(present) and heroku user is already collaborator, then do nothing.
- - If set to C(present) and heroku user is not collaborator, then add user to app.
- - If set to C(absent) and heroku user is collaborator, then delete user from app.
-author:
- - Marcel Arns (@marns93)
-requirements:
- - heroku3
-options:
- api_key:
- type: str
- description:
- - Heroku API key
- apps:
- type: list
- elements: str
- description:
- - List of Heroku App names
- required: true
- suppress_invitation:
- description:
- - Suppress email invitation when creating collaborator
- type: bool
- default: "no"
- user:
- type: str
- description:
- - User ID or e-mail
- required: true
- state:
- type: str
- description:
- - Create or remove the heroku collaborator
- choices: ["present", "absent"]
- default: "present"
-notes:
- - C(HEROKU_API_KEY) and C(TF_VAR_HEROKU_API_KEY) env variable can be used instead setting C(api_key).
- - If you use I(--check), you can also pass the I(-v) flag to see affected apps in C(msg), e.g. ["heroku-example-app"].
-'''
-
-EXAMPLES = '''
-- name: Create a heroku collaborator
- community.general.heroku_collaborator:
- api_key: YOUR_API_KEY
- user: max.mustermann@example.com
- apps: heroku-example-app
- state: present
-
-- name: An example of using the module in loop
- community.general.heroku_collaborator:
- api_key: YOUR_API_KEY
- user: '{{ item.user }}'
- apps: '{{ item.apps | default(apps) }}'
- suppress_invitation: '{{ item.suppress_invitation | default(suppress_invitation) }}'
- state: '{{ item.state | default("present") }}'
- with_items:
- - { user: 'a.b@example.com' }
- - { state: 'absent', user: 'b.c@example.com', suppress_invitation: false }
- - { user: 'x.y@example.com', apps: ["heroku-example-app"] }
-'''
-
-RETURN = ''' # '''
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.heroku import HerokuHelper
-
-
-def add_or_delete_heroku_collaborator(module, client):
- user = module.params['user']
- state = module.params['state']
- affected_apps = []
- result_state = False
-
- for app in module.params['apps']:
- if app not in client.apps():
- module.fail_json(msg='App {0} does not exist'.format(app))
-
- heroku_app = client.apps()[app]
-
- heroku_collaborator_list = [collaborator.user.email for collaborator in heroku_app.collaborators()]
-
- if state == 'absent' and user in heroku_collaborator_list:
- if not module.check_mode:
- heroku_app.remove_collaborator(user)
- affected_apps += [app]
- result_state = True
- elif state == 'present' and user not in heroku_collaborator_list:
- if not module.check_mode:
- heroku_app.add_collaborator(user_id_or_email=user, silent=module.params['suppress_invitation'])
- affected_apps += [app]
- result_state = True
-
- return result_state, affected_apps
-
-
-def main():
- argument_spec = HerokuHelper.heroku_argument_spec()
- argument_spec.update(
- user=dict(required=True, type='str'),
- apps=dict(required=True, type='list', elements='str'),
- suppress_invitation=dict(default=False, type='bool'),
- state=dict(default='present', type='str', choices=['present', 'absent']),
- )
- module = AnsibleModule(
- argument_spec=argument_spec,
- supports_check_mode=True
- )
-
- client = HerokuHelper(module).get_heroku_client()
-
- has_changed, msg = add_or_delete_heroku_collaborator(module, client)
- module.exit_json(changed=has_changed, msg=msg)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_ecs_instance.py b/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_ecs_instance.py
deleted file mode 100644
index 3d4ba84b..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_ecs_instance.py
+++ /dev/null
@@ -1,2135 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright (C) 2019 Huawei
-# GNU General Public License v3.0+ (see COPYING or
-# https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-###############################################################################
-# Documentation
-###############################################################################
-
-DOCUMENTATION = '''
----
-module: hwc_ecs_instance
-description:
- - instance management.
-short_description: Creates a resource of Ecs/Instance in Huawei Cloud
-version_added: '0.2.0'
-author: Huawei Inc. (@huaweicloud)
-requirements:
- - keystoneauth1 >= 3.6.0
-options:
- state:
- description:
- - Whether the given object should exist in Huawei Cloud.
- type: str
- choices: ['present', 'absent']
- default: 'present'
- timeouts:
- description:
- - The timeouts for each operations.
- type: dict
- suboptions:
- create:
- description:
- - The timeouts for create operation.
- type: str
- default: '30m'
- update:
- description:
- - The timeouts for update operation.
- type: str
- default: '30m'
- delete:
- description:
- - The timeouts for delete operation.
- type: str
- default: '30m'
- availability_zone:
- description:
- - Specifies the name of the AZ where the ECS is located.
- type: str
- required: true
- flavor_name:
- description:
- - Specifies the name of the system flavor.
- type: str
- required: true
- image_id:
- description:
- - Specifies the ID of the system image.
- type: str
- required: true
- name:
- description:
- - Specifies the ECS name. Value requirements consists of 1 to 64
- characters, including letters, digits, underscores C(_), hyphens
- (-), periods (.).
- type: str
- required: true
- nics:
- description:
- - Specifies the NIC information of the ECS. Constraints the
- network of the NIC must belong to the VPC specified by vpc_id. A
- maximum of 12 NICs can be attached to an ECS.
- type: list
- elements: dict
- required: true
- suboptions:
- ip_address:
- description:
- - Specifies the IP address of the NIC. The value is an IPv4
- address. Its value must be an unused IP
- address in the network segment of the subnet.
- type: str
- required: true
- subnet_id:
- description:
- - Specifies the ID of subnet.
- type: str
- required: true
- root_volume:
- description:
- - Specifies the configuration of the ECS's system disks.
- type: dict
- required: true
- suboptions:
- volume_type:
- description:
- - Specifies the ECS system disk type.
- - SATA is common I/O disk type.
- - SAS is high I/O disk type.
- - SSD is ultra-high I/O disk type.
- - co-p1 is high I/O (performance-optimized I) disk type.
- - uh-l1 is ultra-high I/O (latency-optimized) disk type.
- - NOTE is For HANA, HL1, and HL2 ECSs, use co-p1 and uh-l1
- disks. For other ECSs, do not use co-p1 or uh-l1 disks.
- type: str
- required: true
- size:
- description:
- - Specifies the system disk size, in GB. The value range is
- 1 to 1024. The system disk size must be
- greater than or equal to the minimum system disk size
- supported by the image (min_disk attribute of the image).
- If this parameter is not specified or is set to 0, the
- default system disk size is the minimum value of the
- system disk in the image (min_disk attribute of the
- image).
- type: int
- required: false
- snapshot_id:
- description:
- - Specifies the snapshot ID or ID of the original data disk
- contained in the full-ECS image.
- type: str
- required: false
- vpc_id:
- description:
- - Specifies the ID of the VPC to which the ECS belongs.
- type: str
- required: true
- admin_pass:
- description:
- - Specifies the initial login password of the administrator account
- for logging in to an ECS using password authentication. The Linux
- administrator is root, and the Windows administrator is
- Administrator. Password complexity requirements, consists of 8 to
- 26 characters. The password must contain at least three of the
- following character types 'uppercase letters, lowercase letters,
- digits, and special characters (!@$%^-_=+[{}]:,./?)'. The password
- cannot contain the username or the username in reverse. The
- Windows ECS password cannot contain the username, the username in
- reverse, or more than two consecutive characters in the username.
- type: str
- required: false
- data_volumes:
- description:
- - Specifies the data disks of ECS instance.
- type: list
- elements: dict
- required: false
- suboptions:
- volume_id:
- description:
- - Specifies the disk ID.
- type: str
- required: true
- device:
- description:
- - Specifies the disk device name.
- type: str
- required: false
- description:
- description:
- - Specifies the description of an ECS, which is a null string by
- default. Can contain a maximum of 85 characters. Cannot contain
- special characters, such as < and >.
- type: str
- required: false
- eip_id:
- description:
- - Specifies the ID of the elastic IP address assigned to the ECS.
- Only elastic IP addresses in the DOWN state can be
- assigned.
- type: str
- required: false
- enable_auto_recovery:
- description:
- - Specifies whether automatic recovery is enabled on the ECS.
- type: bool
- required: false
- enterprise_project_id:
- description:
- - Specifies the ID of the enterprise project to which the ECS
- belongs.
- type: str
- required: false
- security_groups:
- description:
- - Specifies the security groups of the ECS. If this
- parameter is left blank, the default security group is bound to
- the ECS by default.
- type: list
- elements: str
- required: false
- server_metadata:
- description:
- - Specifies the metadata of ECS to be created.
- type: dict
- required: false
- server_tags:
- description:
- - Specifies the tags of an ECS. When you create ECSs, one ECS
- supports up to 10 tags.
- type: dict
- required: false
- ssh_key_name:
- description:
- - Specifies the name of the SSH key used for logging in to the ECS.
- type: str
- required: false
- user_data:
- description:
- - Specifies the user data to be injected during the ECS creation
- process. Text, text files, and gzip files can be injected.
- The content to be injected must be encoded with
- base64. The maximum size of the content to be injected (before
- encoding) is 32 KB. For Linux ECSs, this parameter does not take
- effect when adminPass is used.
- type: str
- required: false
-extends_documentation_fragment:
-- community.general.hwc
-
-'''
-
-EXAMPLES = '''
-# create an ecs instance
-- name: Create a vpc
- hwc_network_vpc:
- cidr: "192.168.100.0/24"
- name: "ansible_network_vpc_test"
- register: vpc
-- name: Create a subnet
- hwc_vpc_subnet:
- gateway_ip: "192.168.100.32"
- name: "ansible_network_subnet_test"
- dhcp_enable: true
- vpc_id: "{{ vpc.id }}"
- cidr: "192.168.100.0/26"
- register: subnet
-- name: Create a eip
- hwc_vpc_eip:
- dedicated_bandwidth:
- charge_mode: "traffic"
- name: "ansible_test_dedicated_bandwidth"
- size: 1
- type: "5_bgp"
- register: eip
-- name: Create a disk
- hwc_evs_disk:
- availability_zone: "cn-north-1a"
- name: "ansible_evs_disk_test"
- volume_type: "SATA"
- size: 10
- register: disk
-- name: Create an instance
- community.general.hwc_ecs_instance:
- data_volumes:
- - volume_id: "{{ disk.id }}"
- enable_auto_recovery: false
- eip_id: "{{ eip.id }}"
- name: "ansible_ecs_instance_test"
- availability_zone: "cn-north-1a"
- nics:
- - subnet_id: "{{ subnet.id }}"
- ip_address: "192.168.100.33"
- - subnet_id: "{{ subnet.id }}"
- ip_address: "192.168.100.34"
- server_tags:
- my_server: "my_server"
- image_id: "8da46d6d-6079-4e31-ad6d-a7167efff892"
- flavor_name: "s3.small.1"
- vpc_id: "{{ vpc.id }}"
- root_volume:
- volume_type: "SAS"
-'''
-
-RETURN = '''
- availability_zone:
- description:
- - Specifies the name of the AZ where the ECS is located.
- type: str
- returned: success
- flavor_name:
- description:
- - Specifies the name of the system flavor.
- type: str
- returned: success
- image_id:
- description:
- - Specifies the ID of the system image.
- type: str
- returned: success
- name:
- description:
- - Specifies the ECS name. Value requirements "Consists of 1 to 64
- characters, including letters, digits, underscores C(_), hyphens
- (-), periods (.)".
- type: str
- returned: success
- nics:
- description:
- - Specifies the NIC information of the ECS. The
- network of the NIC must belong to the VPC specified by vpc_id. A
- maximum of 12 NICs can be attached to an ECS.
- type: list
- returned: success
- contains:
- ip_address:
- description:
- - Specifies the IP address of the NIC. The value is an IPv4
- address. Its value must be an unused IP
- address in the network segment of the subnet.
- type: str
- returned: success
- subnet_id:
- description:
- - Specifies the ID of subnet.
- type: str
- returned: success
- port_id:
- description:
- - Specifies the port ID corresponding to the IP address.
- type: str
- returned: success
- root_volume:
- description:
- - Specifies the configuration of the ECS's system disks.
- type: dict
- returned: success
- contains:
- volume_type:
- description:
- - Specifies the ECS system disk type.
- - SATA is common I/O disk type.
- - SAS is high I/O disk type.
- - SSD is ultra-high I/O disk type.
- - co-p1 is high I/O (performance-optimized I) disk type.
- - uh-l1 is ultra-high I/O (latency-optimized) disk type.
- - NOTE is For HANA, HL1, and HL2 ECSs, use co-p1 and uh-l1
- disks. For other ECSs, do not use co-p1 or uh-l1 disks.
- type: str
- returned: success
- size:
- description:
- - Specifies the system disk size, in GB. The value range is
- 1 to 1024. The system disk size must be
- greater than or equal to the minimum system disk size
- supported by the image (min_disk attribute of the image).
- If this parameter is not specified or is set to 0, the
- default system disk size is the minimum value of the
- system disk in the image (min_disk attribute of the
- image).
- type: int
- returned: success
- snapshot_id:
- description:
- - Specifies the snapshot ID or ID of the original data disk
- contained in the full-ECS image.
- type: str
- returned: success
- device:
- description:
- - Specifies the disk device name.
- type: str
- returned: success
- volume_id:
- description:
- - Specifies the disk ID.
- type: str
- returned: success
- vpc_id:
- description:
- - Specifies the ID of the VPC to which the ECS belongs.
- type: str
- returned: success
- admin_pass:
- description:
- - Specifies the initial login password of the administrator account
- for logging in to an ECS using password authentication. The Linux
- administrator is root, and the Windows administrator is
- Administrator. Password complexity requirements consists of 8 to
- 26 characters. The password must contain at least three of the
- following character types "uppercase letters, lowercase letters,
- digits, and special characters (!@$%^-_=+[{}]:,./?)". The password
- cannot contain the username or the username in reverse. The
- Windows ECS password cannot contain the username, the username in
- reverse, or more than two consecutive characters in the username.
- type: str
- returned: success
- data_volumes:
- description:
- - Specifies the data disks of ECS instance.
- type: list
- returned: success
- contains:
- volume_id:
- description:
- - Specifies the disk ID.
- type: str
- returned: success
- device:
- description:
- - Specifies the disk device name.
- type: str
- returned: success
- description:
- description:
- - Specifies the description of an ECS, which is a null string by
- default. Can contain a maximum of 85 characters. Cannot contain
- special characters, such as < and >.
- type: str
- returned: success
- eip_id:
- description:
- - Specifies the ID of the elastic IP address assigned to the ECS.
- Only elastic IP addresses in the DOWN state can be assigned.
- type: str
- returned: success
- enable_auto_recovery:
- description:
- - Specifies whether automatic recovery is enabled on the ECS.
- type: bool
- returned: success
- enterprise_project_id:
- description:
- - Specifies the ID of the enterprise project to which the ECS
- belongs.
- type: str
- returned: success
- security_groups:
- description:
- - Specifies the security groups of the ECS. If this parameter is left
- blank, the default security group is bound to the ECS by default.
- type: list
- returned: success
- server_metadata:
- description:
- - Specifies the metadata of ECS to be created.
- type: dict
- returned: success
- server_tags:
- description:
- - Specifies the tags of an ECS. When you create ECSs, one ECS
- supports up to 10 tags.
- type: dict
- returned: success
- ssh_key_name:
- description:
- - Specifies the name of the SSH key used for logging in to the ECS.
- type: str
- returned: success
- user_data:
- description:
- - Specifies the user data to be injected during the ECS creation
- process. Text, text files, and gzip files can be injected.
- The content to be injected must be encoded with base64. The maximum
- size of the content to be injected (before encoding) is 32 KB. For
- Linux ECSs, this parameter does not take effect when adminPass is
- used.
- type: str
- returned: success
- config_drive:
- description:
- - Specifies the configuration driver.
- type: str
- returned: success
- created:
- description:
- - Specifies the time when an ECS was created.
- type: str
- returned: success
- disk_config_type:
- description:
- - Specifies the disk configuration type. MANUAL is The image
- space is not expanded. AUTO is the image space of the system disk
- will be expanded to be as same as the flavor.
- type: str
- returned: success
- host_name:
- description:
- - Specifies the host name of the ECS.
- type: str
- returned: success
- image_name:
- description:
- - Specifies the image name of the ECS.
- type: str
- returned: success
- power_state:
- description:
- - Specifies the power status of the ECS.
- type: int
- returned: success
- server_alias:
- description:
- - Specifies the ECS alias.
- type: str
- returned: success
- status:
- description:
- - Specifies the ECS status. Options are ACTIVE, REBOOT, HARD_REBOOT,
- REBUILD, MIGRATING, BUILD, SHUTOFF, RESIZE, VERIFY_RESIZE, ERROR,
- and DELETED.
- type: str
- returned: success
-'''
-
-from ansible_collections.community.general.plugins.module_utils.hwc_utils import (
- Config, HwcClientException, HwcModule, are_different_dicts, build_path,
- get_region, is_empty_value, navigate_value, wait_to_finish)
-
-
-def build_module():
- return HwcModule(
- argument_spec=dict(
- state=dict(default='present', choices=['present', 'absent'],
- type='str'),
- timeouts=dict(type='dict', options=dict(
- create=dict(default='30m', type='str'),
- update=dict(default='30m', type='str'),
- delete=dict(default='30m', type='str'),
- ), default=dict()),
- availability_zone=dict(type='str', required=True),
- flavor_name=dict(type='str', required=True),
- image_id=dict(type='str', required=True),
- name=dict(type='str', required=True),
- nics=dict(
- type='list', required=True, elements='dict',
- options=dict(
- ip_address=dict(type='str', required=True),
- subnet_id=dict(type='str', required=True)
- ),
- ),
- root_volume=dict(type='dict', required=True, options=dict(
- volume_type=dict(type='str', required=True),
- size=dict(type='int'),
- snapshot_id=dict(type='str')
- )),
- vpc_id=dict(type='str', required=True),
- admin_pass=dict(type='str', no_log=True),
- data_volumes=dict(type='list', elements='dict', options=dict(
- volume_id=dict(type='str', required=True),
- device=dict(type='str')
- )),
- description=dict(type='str'),
- eip_id=dict(type='str'),
- enable_auto_recovery=dict(type='bool'),
- enterprise_project_id=dict(type='str'),
- security_groups=dict(type='list', elements='str'),
- server_metadata=dict(type='dict'),
- server_tags=dict(type='dict'),
- ssh_key_name=dict(type='str'),
- user_data=dict(type='str')
- ),
- supports_check_mode=True,
- )
-
-
-def main():
- """Main function"""
-
- module = build_module()
- config = Config(module, "ecs")
-
- try:
- _init(config)
- is_exist = module.params['id']
-
- result = None
- changed = False
- if module.params['state'] == 'present':
- if not is_exist:
- if not module.check_mode:
- create(config)
- changed = True
-
- inputv = user_input_parameters(module)
- resp, array_index = read_resource(config)
- result = build_state(inputv, resp, array_index)
- set_readonly_options(inputv, result)
- if are_different_dicts(inputv, result):
- if not module.check_mode:
- update(config, inputv, result)
-
- inputv = user_input_parameters(module)
- resp, array_index = read_resource(config)
- result = build_state(inputv, resp, array_index)
- set_readonly_options(inputv, result)
- if are_different_dicts(inputv, result):
- raise Exception("Update resource failed, "
- "some attributes are not updated")
-
- changed = True
-
- result['id'] = module.params.get('id')
- else:
- result = dict()
- if is_exist:
- if not module.check_mode:
- delete(config)
- changed = True
-
- except Exception as ex:
- module.fail_json(msg=str(ex))
-
- else:
- result['changed'] = changed
- module.exit_json(**result)
-
-
-def _init(config):
- module = config.module
- if module.params['id']:
- return
-
- v = search_resource(config)
- n = len(v)
- if n > 1:
- raise Exception("Found more than one resource(%s)" % ", ".join([
- navigate_value(i, ["id"])
- for i in v
- ]))
-
- if n == 1:
- module.params['id'] = navigate_value(v[0], ["id"])
-
-
-def user_input_parameters(module):
- return {
- "admin_pass": module.params.get("admin_pass"),
- "availability_zone": module.params.get("availability_zone"),
- "data_volumes": module.params.get("data_volumes"),
- "description": module.params.get("description"),
- "eip_id": module.params.get("eip_id"),
- "enable_auto_recovery": module.params.get("enable_auto_recovery"),
- "enterprise_project_id": module.params.get("enterprise_project_id"),
- "flavor_name": module.params.get("flavor_name"),
- "image_id": module.params.get("image_id"),
- "name": module.params.get("name"),
- "nics": module.params.get("nics"),
- "root_volume": module.params.get("root_volume"),
- "security_groups": module.params.get("security_groups"),
- "server_metadata": module.params.get("server_metadata"),
- "server_tags": module.params.get("server_tags"),
- "ssh_key_name": module.params.get("ssh_key_name"),
- "user_data": module.params.get("user_data"),
- "vpc_id": module.params.get("vpc_id"),
- }
-
-
-def create(config):
- module = config.module
- client = config.client(get_region(module), "ecs", "project")
- timeout = 60 * int(module.params['timeouts']['create'].rstrip('m'))
- opts = user_input_parameters(module)
- opts["ansible_module"] = module
-
- params = build_create_parameters(opts)
- r = send_create_request(module, params, client)
- obj = async_wait(config, r, client, timeout)
-
- sub_job_identity = {
- "job_type": "createSingleServer",
- }
- for item in navigate_value(obj, ["entities", "sub_jobs"]):
- for k, v in sub_job_identity.items():
- if item[k] != v:
- break
- else:
- obj = item
- break
- else:
- raise Exception("Can't find the sub job")
- module.params['id'] = navigate_value(obj, ["entities", "server_id"])
-
-
-def update(config, expect_state, current_state):
- module = config.module
- expect_state["current_state"] = current_state
- current_state["current_state"] = current_state
- timeout = 60 * int(module.params['timeouts']['update'].rstrip('m'))
- client = config.client(get_region(module), "ecs", "project")
-
- params = build_delete_nics_parameters(expect_state)
- params1 = build_delete_nics_parameters(current_state)
- if params and are_different_dicts(params, params1):
- r = send_delete_nics_request(module, params, client)
- async_wait(config, r, client, timeout)
-
- params = build_set_auto_recovery_parameters(expect_state)
- params1 = build_set_auto_recovery_parameters(current_state)
- if params and are_different_dicts(params, params1):
- send_set_auto_recovery_request(module, params, client)
-
- params = build_attach_nics_parameters(expect_state)
- params1 = build_attach_nics_parameters(current_state)
- if params and are_different_dicts(params, params1):
- r = send_attach_nics_request(module, params, client)
- async_wait(config, r, client, timeout)
-
- multi_invoke_delete_volume(config, expect_state, client, timeout)
-
- multi_invoke_attach_data_disk(config, expect_state, client, timeout)
-
-
-def delete(config):
- module = config.module
- client = config.client(get_region(module), "ecs", "project")
- timeout = 60 * int(module.params['timeouts']['delete'].rstrip('m'))
-
- opts = user_input_parameters(module)
- opts["ansible_module"] = module
-
- params = build_delete_parameters(opts)
- if params:
- r = send_delete_request(module, params, client)
- async_wait(config, r, client, timeout)
-
-
-def read_resource(config):
- module = config.module
- client = config.client(get_region(module), "ecs", "project")
-
- res = {}
-
- r = send_read_request(module, client)
- preprocess_read_response(r)
- res["read"] = fill_read_resp_body(r)
-
- r = send_read_auto_recovery_request(module, client)
- res["read_auto_recovery"] = fill_read_auto_recovery_resp_body(r)
-
- return res, None
-
-
-def preprocess_read_response(resp):
- v = resp.get("os-extended-volumes:volumes_attached")
- if v and isinstance(v, list):
- for i in range(len(v)):
- if v[i].get("bootIndex") == "0":
- root_volume = v[i]
-
- if (i + 1) != len(v):
- v[i] = v[-1]
-
- v.pop()
-
- resp["root_volume"] = root_volume
- break
-
- v = resp.get("addresses")
- if v:
- rv = {}
- eips = []
- for val in v.values():
- for item in val:
- if item["OS-EXT-IPS:type"] == "floating":
- eips.append(item)
- else:
- rv[item["OS-EXT-IPS:port_id"]] = item
-
- for item in eips:
- k = item["OS-EXT-IPS:port_id"]
- if k in rv:
- rv[k]["eip_address"] = item.get("addr", "")
- else:
- rv[k] = item
- item["eip_address"] = item.get("addr", "")
- item["addr"] = ""
-
- resp["address"] = rv.values()
-
-
-def build_state(opts, response, array_index):
- states = flatten_options(response, array_index)
- set_unreadable_options(opts, states)
- adjust_options(opts, states)
- return states
-
-
-def _build_query_link(opts):
- query_params = []
-
- v = navigate_value(opts, ["enterprise_project_id"])
- if v or v in [False, 0]:
- query_params.append(
- "enterprise_project_id=" + (str(v) if v else str(v).lower()))
-
- v = navigate_value(opts, ["name"])
- if v or v in [False, 0]:
- query_params.append(
- "name=" + (str(v) if v else str(v).lower()))
-
- query_link = "?limit=10&offset={offset}"
- if query_params:
- query_link += "&" + "&".join(query_params)
-
- return query_link
-
-
-def search_resource(config):
- module = config.module
- client = config.client(get_region(module), "ecs", "project")
- opts = user_input_parameters(module)
- identity_obj = _build_identity_object(opts)
- query_link = _build_query_link(opts)
- link = "cloudservers/detail" + query_link
-
- result = []
- p = {'offset': 1}
- while True:
- url = link.format(**p)
- r = send_list_request(module, client, url)
- if not r:
- break
-
- for item in r:
- item = fill_list_resp_body(item)
- adjust_list_resp(identity_obj, item)
- if not are_different_dicts(identity_obj, item):
- result.append(item)
-
- if len(result) > 1:
- break
-
- p['offset'] += 1
-
- return result
-
-
-def build_delete_nics_parameters(opts):
- params = dict()
-
- v = expand_delete_nics_nics(opts, None)
- if not is_empty_value(v):
- params["nics"] = v
-
- return params
-
-
-def expand_delete_nics_nics(d, array_index):
- cv = d["current_state"].get("nics")
- if not cv:
- return None
-
- val = cv
-
- ev = d.get("nics")
- if ev:
- m = [item.get("ip_address") for item in ev]
- val = [item for item in cv if item.get("ip_address") not in m]
-
- r = []
- for item in val:
- transformed = dict()
-
- v = item.get("port_id")
- if not is_empty_value(v):
- transformed["id"] = v
-
- if transformed:
- r.append(transformed)
-
- return r
-
-
-def send_delete_nics_request(module, params, client):
- url = build_path(module, "cloudservers/{id}/nics/delete")
-
- try:
- r = client.post(url, params)
- except HwcClientException as ex:
- msg = ("module(hwc_ecs_instance): error running "
- "api(delete_nics), error: %s" % str(ex))
- module.fail_json(msg=msg)
-
- return r
-
-
-def build_set_auto_recovery_parameters(opts):
- params = dict()
-
- v = expand_set_auto_recovery_support_auto_recovery(opts, None)
- if v is not None:
- params["support_auto_recovery"] = v
-
- return params
-
-
-def expand_set_auto_recovery_support_auto_recovery(d, array_index):
- v = navigate_value(d, ["enable_auto_recovery"], None)
- return None if v is None else str(v).lower()
-
-
-def send_set_auto_recovery_request(module, params, client):
- url = build_path(module, "cloudservers/{id}/autorecovery")
-
- try:
- r = client.put(url, params)
- except HwcClientException as ex:
- msg = ("module(hwc_ecs_instance): error running "
- "api(set_auto_recovery), error: %s" % str(ex))
- module.fail_json(msg=msg)
-
- return r
-
-
-def build_create_parameters(opts):
- params = dict()
-
- v = navigate_value(opts, ["admin_pass"], None)
- if not is_empty_value(v):
- params["adminPass"] = v
-
- v = navigate_value(opts, ["availability_zone"], None)
- if not is_empty_value(v):
- params["availability_zone"] = v
-
- v = navigate_value(opts, ["description"], None)
- if not is_empty_value(v):
- params["description"] = v
-
- v = expand_create_extendparam(opts, None)
- if not is_empty_value(v):
- params["extendparam"] = v
-
- v = navigate_value(opts, ["flavor_name"], None)
- if not is_empty_value(v):
- params["flavorRef"] = v
-
- v = navigate_value(opts, ["image_id"], None)
- if not is_empty_value(v):
- params["imageRef"] = v
-
- v = navigate_value(opts, ["ssh_key_name"], None)
- if not is_empty_value(v):
- params["key_name"] = v
-
- v = navigate_value(opts, ["server_metadata"], None)
- if not is_empty_value(v):
- params["metadata"] = v
-
- v = navigate_value(opts, ["name"], None)
- if not is_empty_value(v):
- params["name"] = v
-
- v = expand_create_nics(opts, None)
- if not is_empty_value(v):
- params["nics"] = v
-
- v = expand_create_publicip(opts, None)
- if not is_empty_value(v):
- params["publicip"] = v
-
- v = expand_create_root_volume(opts, None)
- if not is_empty_value(v):
- params["root_volume"] = v
-
- v = expand_create_security_groups(opts, None)
- if not is_empty_value(v):
- params["security_groups"] = v
-
- v = expand_create_server_tags(opts, None)
- if not is_empty_value(v):
- params["server_tags"] = v
-
- v = navigate_value(opts, ["user_data"], None)
- if not is_empty_value(v):
- params["user_data"] = v
-
- v = navigate_value(opts, ["vpc_id"], None)
- if not is_empty_value(v):
- params["vpcid"] = v
-
- if not params:
- return params
-
- params = {"server": params}
-
- return params
-
-
-def expand_create_extendparam(d, array_index):
- r = dict()
-
- r["chargingMode"] = 0
-
- v = navigate_value(d, ["enterprise_project_id"], array_index)
- if not is_empty_value(v):
- r["enterprise_project_id"] = v
-
- v = navigate_value(d, ["enable_auto_recovery"], array_index)
- if not is_empty_value(v):
- r["support_auto_recovery"] = v
-
- return r
-
-
-def expand_create_nics(d, array_index):
- new_ai = dict()
- if array_index:
- new_ai.update(array_index)
-
- req = []
-
- v = navigate_value(
- d, ["nics"], new_ai)
-
- if not v:
- return req
- n = len(v)
- for i in range(n):
- new_ai["nics"] = i
- transformed = dict()
-
- v = navigate_value(d, ["nics", "ip_address"], new_ai)
- if not is_empty_value(v):
- transformed["ip_address"] = v
-
- v = navigate_value(d, ["nics", "subnet_id"], new_ai)
- if not is_empty_value(v):
- transformed["subnet_id"] = v
-
- if transformed:
- req.append(transformed)
-
- return req
-
-
-def expand_create_publicip(d, array_index):
- r = dict()
-
- v = navigate_value(d, ["eip_id"], array_index)
- if not is_empty_value(v):
- r["id"] = v
-
- return r
-
-
-def expand_create_root_volume(d, array_index):
- r = dict()
-
- v = expand_create_root_volume_extendparam(d, array_index)
- if not is_empty_value(v):
- r["extendparam"] = v
-
- v = navigate_value(d, ["root_volume", "size"], array_index)
- if not is_empty_value(v):
- r["size"] = v
-
- v = navigate_value(d, ["root_volume", "volume_type"], array_index)
- if not is_empty_value(v):
- r["volumetype"] = v
-
- return r
-
-
-def expand_create_root_volume_extendparam(d, array_index):
- r = dict()
-
- v = navigate_value(d, ["root_volume", "snapshot_id"], array_index)
- if not is_empty_value(v):
- r["snapshotId"] = v
-
- return r
-
-
-def expand_create_security_groups(d, array_index):
- v = d.get("security_groups")
- if not v:
- return None
-
- return [{"id": i} for i in v]
-
-
-def expand_create_server_tags(d, array_index):
- v = d.get("server_tags")
- if not v:
- return None
-
- return [{"key": k, "value": v1} for k, v1 in v.items()]
-
-
-def send_create_request(module, params, client):
- url = "cloudservers"
- try:
- r = client.post(url, params)
- except HwcClientException as ex:
- msg = ("module(hwc_ecs_instance): error running "
- "api(create), error: %s" % str(ex))
- module.fail_json(msg=msg)
-
- return r
-
-
-def build_attach_nics_parameters(opts):
- params = dict()
-
- v = expand_attach_nics_nics(opts, None)
- if not is_empty_value(v):
- params["nics"] = v
-
- return params
-
-
-def expand_attach_nics_nics(d, array_index):
- ev = d.get("nics")
- if not ev:
- return None
-
- val = ev
-
- cv = d["current_state"].get("nics")
- if cv:
- m = [item.get("ip_address") for item in cv]
- val = [item for item in ev if item.get("ip_address") not in m]
-
- r = []
- for item in val:
- transformed = dict()
-
- v = item.get("ip_address")
- if not is_empty_value(v):
- transformed["ip_address"] = v
-
- v = item.get("subnet_id")
- if not is_empty_value(v):
- transformed["subnet_id"] = v
-
- if transformed:
- r.append(transformed)
-
- return r
-
-
-def send_attach_nics_request(module, params, client):
- url = build_path(module, "cloudservers/{id}/nics")
-
- try:
- r = client.post(url, params)
- except HwcClientException as ex:
- msg = ("module(hwc_ecs_instance): error running "
- "api(attach_nics), error: %s" % str(ex))
- module.fail_json(msg=msg)
-
- return r
-
-
-def send_delete_volume_request(module, params, client, info):
- path_parameters = {
- "volume_id": ["volume_id"],
- }
- data = dict((key, navigate_value(info, path))
- for key, path in path_parameters.items())
-
- url = build_path(module, "cloudservers/{id}/detachvolume/{volume_id}", data)
-
- try:
- r = client.delete(url, params)
- except HwcClientException as ex:
- msg = ("module(hwc_ecs_instance): error running "
- "api(delete_volume), error: %s" % str(ex))
- module.fail_json(msg=msg)
-
- return r
-
-
-def build_attach_data_disk_parameters(opts, array_index):
- params = dict()
-
- v = expand_attach_data_disk_volume_attachment(opts, array_index)
- if not is_empty_value(v):
- params["volumeAttachment"] = v
-
- return params
-
-
-def expand_attach_data_disk_volume_attachment(d, array_index):
- r = dict()
-
- v = navigate_value(d, ["data_volumes", "device"], array_index)
- if not is_empty_value(v):
- r["device"] = v
-
- v = navigate_value(d, ["data_volumes", "volume_id"], array_index)
- if not is_empty_value(v):
- r["volumeId"] = v
-
- return r
-
-
-def send_attach_data_disk_request(module, params, client):
- url = build_path(module, "cloudservers/{id}/attachvolume")
-
- try:
- r = client.post(url, params)
- except HwcClientException as ex:
- msg = ("module(hwc_ecs_instance): error running "
- "api(attach_data_disk), error: %s" % str(ex))
- module.fail_json(msg=msg)
-
- return r
-
-
-def build_delete_parameters(opts):
- params = dict()
-
- params["delete_publicip"] = False
-
- params["delete_volume"] = False
-
- v = expand_delete_servers(opts, None)
- if not is_empty_value(v):
- params["servers"] = v
-
- return params
-
-
-def expand_delete_servers(d, array_index):
- new_ai = dict()
- if array_index:
- new_ai.update(array_index)
-
- req = []
-
- n = 1
- for i in range(n):
- transformed = dict()
-
- v = expand_delete_servers_id(d, new_ai)
- if not is_empty_value(v):
- transformed["id"] = v
-
- if transformed:
- req.append(transformed)
-
- return req
-
-
-def expand_delete_servers_id(d, array_index):
- return d["ansible_module"].params.get("id")
-
-
-def send_delete_request(module, params, client):
- url = "cloudservers/delete"
- try:
- r = client.post(url, params)
- except HwcClientException as ex:
- msg = ("module(hwc_ecs_instance): error running "
- "api(delete), error: %s" % str(ex))
- module.fail_json(msg=msg)
-
- return r
-
-
-def async_wait(config, result, client, timeout):
- module = config.module
-
- url = build_path(module, "jobs/{job_id}", result)
-
- def _query_status():
- r = None
- try:
- r = client.get(url, timeout=timeout)
- except HwcClientException:
- return None, ""
-
- try:
- s = navigate_value(r, ["status"])
- return r, s
- except Exception:
- return None, ""
-
- try:
- return wait_to_finish(
- ["SUCCESS"],
- ["RUNNING", "INIT"],
- _query_status, timeout)
- except Exception as ex:
- module.fail_json(msg="module(hwc_ecs_instance): error "
- "waiting to be done, error= %s" % str(ex))
-
-
-def multi_invoke_delete_volume(config, opts, client, timeout):
- module = config.module
-
- opts1 = None
- expect = opts["data_volumes"]
- current = opts["current_state"]["data_volumes"]
- if expect and current:
- v = [i["volume_id"] for i in expect]
- opts1 = {
- "data_volumes": [
- i for i in current if i["volume_id"] not in v
- ]
- }
-
- loop_val = navigate_value(opts1, ["data_volumes"])
- if not loop_val:
- return
-
- for i in range(len(loop_val)):
- r = send_delete_volume_request(module, None, client, loop_val[i])
- async_wait(config, r, client, timeout)
-
-
-def multi_invoke_attach_data_disk(config, opts, client, timeout):
- module = config.module
-
- opts1 = opts
- expect = opts["data_volumes"]
- current = opts["current_state"]["data_volumes"]
- if expect and current:
- v = [i["volume_id"] for i in current]
- opts1 = {
- "data_volumes": [
- i for i in expect if i["volume_id"] not in v
- ]
- }
-
- loop_val = navigate_value(opts1, ["data_volumes"])
- if not loop_val:
- return
-
- for i in range(len(loop_val)):
- params = build_attach_data_disk_parameters(opts1, {"data_volumes": i})
- r = send_attach_data_disk_request(module, params, client)
- async_wait(config, r, client, timeout)
-
-
-def send_read_request(module, client):
- url = build_path(module, "cloudservers/{id}")
-
- r = None
- try:
- r = client.get(url)
- except HwcClientException as ex:
- msg = ("module(hwc_ecs_instance): error running "
- "api(read), error: %s" % str(ex))
- module.fail_json(msg=msg)
-
- return navigate_value(r, ["server"], None)
-
-
-def fill_read_resp_body(body):
- result = dict()
-
- result["OS-DCF:diskConfig"] = body.get("OS-DCF:diskConfig")
-
- result["OS-EXT-AZ:availability_zone"] = body.get(
- "OS-EXT-AZ:availability_zone")
-
- result["OS-EXT-SRV-ATTR:hostname"] = body.get("OS-EXT-SRV-ATTR:hostname")
-
- result["OS-EXT-SRV-ATTR:instance_name"] = body.get(
- "OS-EXT-SRV-ATTR:instance_name")
-
- result["OS-EXT-SRV-ATTR:user_data"] = body.get("OS-EXT-SRV-ATTR:user_data")
-
- result["OS-EXT-STS:power_state"] = body.get("OS-EXT-STS:power_state")
-
- v = fill_read_resp_address(body.get("address"))
- result["address"] = v
-
- result["config_drive"] = body.get("config_drive")
-
- result["created"] = body.get("created")
-
- result["description"] = body.get("description")
-
- result["enterprise_project_id"] = body.get("enterprise_project_id")
-
- v = fill_read_resp_flavor(body.get("flavor"))
- result["flavor"] = v
-
- result["id"] = body.get("id")
-
- v = fill_read_resp_image(body.get("image"))
- result["image"] = v
-
- result["key_name"] = body.get("key_name")
-
- v = fill_read_resp_metadata(body.get("metadata"))
- result["metadata"] = v
-
- result["name"] = body.get("name")
-
- v = fill_read_resp_os_extended_volumes_volumes_attached(
- body.get("os-extended-volumes:volumes_attached"))
- result["os-extended-volumes:volumes_attached"] = v
-
- v = fill_read_resp_root_volume(body.get("root_volume"))
- result["root_volume"] = v
-
- result["status"] = body.get("status")
-
- result["tags"] = body.get("tags")
-
- return result
-
-
-def fill_read_resp_address(value):
- if not value:
- return None
-
- result = []
- for item in value:
- val = dict()
-
- val["OS-EXT-IPS:port_id"] = item.get("OS-EXT-IPS:port_id")
-
- val["OS-EXT-IPS:type"] = item.get("OS-EXT-IPS:type")
-
- val["addr"] = item.get("addr")
-
- result.append(val)
-
- return result
-
-
-def fill_read_resp_flavor(value):
- if not value:
- return None
-
- result = dict()
-
- result["id"] = value.get("id")
-
- return result
-
-
-def fill_read_resp_image(value):
- if not value:
- return None
-
- result = dict()
-
- result["id"] = value.get("id")
-
- return result
-
-
-def fill_read_resp_metadata(value):
- if not value:
- return None
-
- result = dict()
-
- result["image_name"] = value.get("image_name")
-
- result["vpc_id"] = value.get("vpc_id")
-
- return result
-
-
-def fill_read_resp_os_extended_volumes_volumes_attached(value):
- if not value:
- return None
-
- result = []
- for item in value:
- val = dict()
-
- val["bootIndex"] = item.get("bootIndex")
-
- val["device"] = item.get("device")
-
- val["id"] = item.get("id")
-
- result.append(val)
-
- return result
-
-
-def fill_read_resp_root_volume(value):
- if not value:
- return None
-
- result = dict()
-
- result["device"] = value.get("device")
-
- result["id"] = value.get("id")
-
- return result
-
-
-def send_read_auto_recovery_request(module, client):
- url = build_path(module, "cloudservers/{id}/autorecovery")
-
- r = None
- try:
- r = client.get(url)
- except HwcClientException as ex:
- msg = ("module(hwc_ecs_instance): error running "
- "api(read_auto_recovery), error: %s" % str(ex))
- module.fail_json(msg=msg)
-
- return r
-
-
-def fill_read_auto_recovery_resp_body(body):
- result = dict()
-
- result["support_auto_recovery"] = body.get("support_auto_recovery")
-
- return result
-
-
-def flatten_options(response, array_index):
- r = dict()
-
- v = navigate_value(
- response, ["read", "OS-EXT-AZ:availability_zone"], array_index)
- r["availability_zone"] = v
-
- v = navigate_value(response, ["read", "config_drive"], array_index)
- r["config_drive"] = v
-
- v = navigate_value(response, ["read", "created"], array_index)
- r["created"] = v
-
- v = flatten_data_volumes(response, array_index)
- r["data_volumes"] = v
-
- v = navigate_value(response, ["read", "description"], array_index)
- r["description"] = v
-
- v = navigate_value(response, ["read", "OS-DCF:diskConfig"], array_index)
- r["disk_config_type"] = v
-
- v = flatten_enable_auto_recovery(response, array_index)
- r["enable_auto_recovery"] = v
-
- v = navigate_value(
- response, ["read", "enterprise_project_id"], array_index)
- r["enterprise_project_id"] = v
-
- v = navigate_value(response, ["read", "flavor", "id"], array_index)
- r["flavor_name"] = v
-
- v = navigate_value(
- response, ["read", "OS-EXT-SRV-ATTR:hostname"], array_index)
- r["host_name"] = v
-
- v = navigate_value(response, ["read", "image", "id"], array_index)
- r["image_id"] = v
-
- v = navigate_value(
- response, ["read", "metadata", "image_name"], array_index)
- r["image_name"] = v
-
- v = navigate_value(response, ["read", "name"], array_index)
- r["name"] = v
-
- v = flatten_nics(response, array_index)
- r["nics"] = v
-
- v = navigate_value(
- response, ["read", "OS-EXT-STS:power_state"], array_index)
- r["power_state"] = v
-
- v = flatten_root_volume(response, array_index)
- r["root_volume"] = v
-
- v = navigate_value(
- response, ["read", "OS-EXT-SRV-ATTR:instance_name"], array_index)
- r["server_alias"] = v
-
- v = flatten_server_tags(response, array_index)
- r["server_tags"] = v
-
- v = navigate_value(response, ["read", "key_name"], array_index)
- r["ssh_key_name"] = v
-
- v = navigate_value(response, ["read", "status"], array_index)
- r["status"] = v
-
- v = navigate_value(
- response, ["read", "OS-EXT-SRV-ATTR:user_data"], array_index)
- r["user_data"] = v
-
- v = navigate_value(response, ["read", "metadata", "vpc_id"], array_index)
- r["vpc_id"] = v
-
- return r
-
-
-def flatten_data_volumes(d, array_index):
- v = navigate_value(d, ["read", "os-extended-volumes:volumes_attached"],
- array_index)
- if not v:
- return None
- n = len(v)
- result = []
-
- new_ai = dict()
- if array_index:
- new_ai.update(array_index)
-
- for i in range(n):
- new_ai["read.os-extended-volumes:volumes_attached"] = i
-
- val = dict()
-
- v = navigate_value(
- d, ["read", "os-extended-volumes:volumes_attached", "device"], new_ai)
- val["device"] = v
-
- v = navigate_value(
- d, ["read", "os-extended-volumes:volumes_attached", "id"], new_ai)
- val["volume_id"] = v
-
- for v in val.values():
- if v is not None:
- result.append(val)
- break
-
- return result if result else None
-
-
-def flatten_enable_auto_recovery(d, array_index):
- v = navigate_value(d, ["read_auto_recovery", "support_auto_recovery"],
- array_index)
- return v == "true"
-
-
-def flatten_nics(d, array_index):
- v = navigate_value(d, ["read", "address"],
- array_index)
- if not v:
- return None
- n = len(v)
- result = []
-
- new_ai = dict()
- if array_index:
- new_ai.update(array_index)
-
- for i in range(n):
- new_ai["read.address"] = i
-
- val = dict()
-
- v = navigate_value(d, ["read", "address", "addr"], new_ai)
- val["ip_address"] = v
-
- v = navigate_value(
- d, ["read", "address", "OS-EXT-IPS:port_id"], new_ai)
- val["port_id"] = v
-
- for v in val.values():
- if v is not None:
- result.append(val)
- break
-
- return result if result else None
-
-
-def flatten_root_volume(d, array_index):
- result = dict()
-
- v = navigate_value(d, ["read", "root_volume", "device"], array_index)
- result["device"] = v
-
- v = navigate_value(d, ["read", "root_volume", "id"], array_index)
- result["volume_id"] = v
-
- for v in result.values():
- if v is not None:
- return result
- return None
-
-
-def flatten_server_tags(d, array_index):
- v = navigate_value(d, ["read", "tags"], array_index)
- if not v:
- return None
-
- r = dict()
- for item in v:
- v1 = item.split("=")
- if v1:
- r[v1[0]] = v1[1]
- return r
-
-
-def adjust_options(opts, states):
- adjust_data_volumes(opts, states)
-
- adjust_nics(opts, states)
-
-
-def adjust_data_volumes(parent_input, parent_cur):
- iv = parent_input.get("data_volumes")
- if not (iv and isinstance(iv, list)):
- return
-
- cv = parent_cur.get("data_volumes")
- if not (cv and isinstance(cv, list)):
- return
-
- lcv = len(cv)
- result = []
- q = []
- for iiv in iv:
- if len(q) == lcv:
- break
-
- icv = None
- for j in range(lcv):
- if j in q:
- continue
-
- icv = cv[j]
-
- if iiv["volume_id"] != icv["volume_id"]:
- continue
-
- result.append(icv)
- q.append(j)
- break
- else:
- break
-
- if len(q) != lcv:
- for i in range(lcv):
- if i not in q:
- result.append(cv[i])
-
- if len(result) != lcv:
- raise Exception("adjust property(data_volumes) failed, "
- "the array number is not equal")
-
- parent_cur["data_volumes"] = result
-
-
-def adjust_nics(parent_input, parent_cur):
- iv = parent_input.get("nics")
- if not (iv and isinstance(iv, list)):
- return
-
- cv = parent_cur.get("nics")
- if not (cv and isinstance(cv, list)):
- return
-
- lcv = len(cv)
- result = []
- q = []
- for iiv in iv:
- if len(q) == lcv:
- break
-
- icv = None
- for j in range(lcv):
- if j in q:
- continue
-
- icv = cv[j]
-
- if iiv["ip_address"] != icv["ip_address"]:
- continue
-
- result.append(icv)
- q.append(j)
- break
- else:
- break
-
- if len(q) != lcv:
- for i in range(lcv):
- if i not in q:
- result.append(cv[i])
-
- if len(result) != lcv:
- raise Exception("adjust property(nics) failed, "
- "the array number is not equal")
-
- parent_cur["nics"] = result
-
-
-def set_unreadable_options(opts, states):
- states["admin_pass"] = opts.get("admin_pass")
-
- states["eip_id"] = opts.get("eip_id")
-
- set_unread_nics(
- opts.get("nics"), states.get("nics"))
-
- set_unread_root_volume(
- opts.get("root_volume"), states.get("root_volume"))
-
- states["security_groups"] = opts.get("security_groups")
-
- states["server_metadata"] = opts.get("server_metadata")
-
-
-def set_unread_nics(inputv, curv):
- if not (inputv and isinstance(inputv, list)):
- return
-
- if not (curv and isinstance(curv, list)):
- return
-
- lcv = len(curv)
- q = []
- for iv in inputv:
- if len(q) == lcv:
- break
-
- cv = None
- for j in range(lcv):
- if j in q:
- continue
-
- cv = curv[j]
-
- if iv["ip_address"] != cv["ip_address"]:
- continue
-
- q.append(j)
- break
- else:
- continue
-
- cv["subnet_id"] = iv.get("subnet_id")
-
-
-def set_unread_root_volume(inputv, curv):
- if not (inputv and isinstance(inputv, dict)):
- return
-
- if not (curv and isinstance(curv, dict)):
- return
-
- curv["size"] = inputv.get("size")
-
- curv["snapshot_id"] = inputv.get("snapshot_id")
-
- curv["volume_type"] = inputv.get("volume_type")
-
-
-def set_readonly_options(opts, states):
- opts["config_drive"] = states.get("config_drive")
-
- opts["created"] = states.get("created")
-
- opts["disk_config_type"] = states.get("disk_config_type")
-
- opts["host_name"] = states.get("host_name")
-
- opts["image_name"] = states.get("image_name")
-
- set_readonly_nics(
- opts.get("nics"), states.get("nics"))
-
- opts["power_state"] = states.get("power_state")
-
- set_readonly_root_volume(
- opts.get("root_volume"), states.get("root_volume"))
-
- opts["server_alias"] = states.get("server_alias")
-
- opts["status"] = states.get("status")
-
-
-def set_readonly_nics(inputv, curv):
- if not (curv and isinstance(curv, list)):
- return
-
- if not (inputv and isinstance(inputv, list)):
- return
-
- lcv = len(curv)
- q = []
- for iv in inputv:
- if len(q) == lcv:
- break
-
- cv = None
- for j in range(lcv):
- if j in q:
- continue
-
- cv = curv[j]
-
- if iv["ip_address"] != cv["ip_address"]:
- continue
-
- q.append(j)
- break
- else:
- continue
-
- iv["port_id"] = cv.get("port_id")
-
-
-def set_readonly_root_volume(inputv, curv):
- if not (inputv and isinstance(inputv, dict)):
- return
-
- if not (curv and isinstance(curv, dict)):
- return
-
- inputv["device"] = curv.get("device")
-
- inputv["volume_id"] = curv.get("volume_id")
-
-
-def send_list_request(module, client, url):
-
- r = None
- try:
- r = client.get(url)
- except HwcClientException as ex:
- msg = ("module(hwc_ecs_instance): error running "
- "api(list), error: %s" % str(ex))
- module.fail_json(msg=msg)
-
- return navigate_value(r, ["servers"], None)
-
-
-def _build_identity_object(all_opts):
- result = dict()
-
- result["OS-DCF:diskConfig"] = None
-
- v = navigate_value(all_opts, ["availability_zone"], None)
- result["OS-EXT-AZ:availability_zone"] = v
-
- result["OS-EXT-SRV-ATTR:hostname"] = None
-
- result["OS-EXT-SRV-ATTR:instance_name"] = None
-
- v = navigate_value(all_opts, ["user_data"], None)
- result["OS-EXT-SRV-ATTR:user_data"] = v
-
- result["OS-EXT-STS:power_state"] = None
-
- result["config_drive"] = None
-
- result["created"] = None
-
- v = navigate_value(all_opts, ["description"], None)
- result["description"] = v
-
- v = navigate_value(all_opts, ["enterprise_project_id"], None)
- result["enterprise_project_id"] = v
-
- v = expand_list_flavor(all_opts, None)
- result["flavor"] = v
-
- result["id"] = None
-
- v = expand_list_image(all_opts, None)
- result["image"] = v
-
- v = navigate_value(all_opts, ["ssh_key_name"], None)
- result["key_name"] = v
-
- v = expand_list_metadata(all_opts, None)
- result["metadata"] = v
-
- v = navigate_value(all_opts, ["name"], None)
- result["name"] = v
-
- result["status"] = None
-
- v = expand_list_tags(all_opts, None)
- result["tags"] = v
-
- return result
-
-
-def expand_list_flavor(d, array_index):
- r = dict()
-
- v = navigate_value(d, ["flavor_name"], array_index)
- r["id"] = v
-
- for v in r.values():
- if v is not None:
- return r
- return None
-
-
-def expand_list_image(d, array_index):
- r = dict()
-
- v = navigate_value(d, ["image_id"], array_index)
- r["id"] = v
-
- for v in r.values():
- if v is not None:
- return r
- return None
-
-
-def expand_list_metadata(d, array_index):
- r = dict()
-
- v = navigate_value(d, ["vpc_id"], array_index)
- r["vpc_id"] = v
-
- for v in r.values():
- if v is not None:
- return r
- return None
-
-
-def expand_list_tags(d, array_index):
- v = d.get("server_tags")
- if not v:
- return None
-
- return [k + "=" + v1 for k, v1 in v.items()]
-
-
-def fill_list_resp_body(body):
- result = dict()
-
- result["OS-DCF:diskConfig"] = body.get("OS-DCF:diskConfig")
-
- result["OS-EXT-AZ:availability_zone"] = body.get(
- "OS-EXT-AZ:availability_zone")
-
- result["OS-EXT-SRV-ATTR:hostname"] = body.get("OS-EXT-SRV-ATTR:hostname")
-
- result["OS-EXT-SRV-ATTR:instance_name"] = body.get(
- "OS-EXT-SRV-ATTR:instance_name")
-
- result["OS-EXT-SRV-ATTR:user_data"] = body.get("OS-EXT-SRV-ATTR:user_data")
-
- result["OS-EXT-STS:power_state"] = body.get("OS-EXT-STS:power_state")
-
- result["config_drive"] = body.get("config_drive")
-
- result["created"] = body.get("created")
-
- result["description"] = body.get("description")
-
- result["enterprise_project_id"] = body.get("enterprise_project_id")
-
- v = fill_list_resp_flavor(body.get("flavor"))
- result["flavor"] = v
-
- result["id"] = body.get("id")
-
- v = fill_list_resp_image(body.get("image"))
- result["image"] = v
-
- result["key_name"] = body.get("key_name")
-
- v = fill_list_resp_metadata(body.get("metadata"))
- result["metadata"] = v
-
- result["name"] = body.get("name")
-
- result["status"] = body.get("status")
-
- result["tags"] = body.get("tags")
-
- return result
-
-
-def fill_list_resp_flavor(value):
- if not value:
- return None
-
- result = dict()
-
- result["id"] = value.get("id")
-
- return result
-
-
-def fill_list_resp_image(value):
- if not value:
- return None
-
- result = dict()
-
- result["id"] = value.get("id")
-
- return result
-
-
-def fill_list_resp_metadata(value):
- if not value:
- return None
-
- result = dict()
-
- result["vpc_id"] = value.get("vpc_id")
-
- return result
-
-
-def adjust_list_resp(opts, resp):
- adjust_list_api_tags(opts, resp)
-
-
-def adjust_list_api_tags(parent_input, parent_cur):
- iv = parent_input.get("tags")
- if not (iv and isinstance(iv, list)):
- return
-
- cv = parent_cur.get("tags")
- if not (cv and isinstance(cv, list)):
- return
-
- result = []
- for iiv in iv:
- if iiv not in cv:
- break
-
- result.append(iiv)
-
- j = cv.index(iiv)
- cv[j] = cv[-1]
- cv.pop()
-
- if cv:
- result.extend(cv)
- parent_cur["tags"] = result
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_evs_disk.py b/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_evs_disk.py
deleted file mode 100644
index 4aec1b94..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_evs_disk.py
+++ /dev/null
@@ -1,1210 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright (C) 2019 Huawei
-# GNU General Public License v3.0+ (see COPYING or
-# https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-###############################################################################
-# Documentation
-###############################################################################
-
-DOCUMENTATION = '''
----
-module: hwc_evs_disk
-description:
- - block storage management.
-short_description: Creates a resource of Evs/Disk in Huawei Cloud
-version_added: '0.2.0'
-author: Huawei Inc. (@huaweicloud)
-requirements:
- - keystoneauth1 >= 3.6.0
-options:
- state:
- description:
- - Whether the given object should exist in Huaweicloud Cloud.
- type: str
- choices: ['present', 'absent']
- default: 'present'
- timeouts:
- description:
- - The timeouts for each operations.
- type: dict
- suboptions:
- create:
- description:
- - The timeouts for create operation.
- type: str
- default: '30m'
- update:
- description:
- - The timeouts for update operation.
- type: str
- default: '30m'
- delete:
- description:
- - The timeouts for delete operation.
- type: str
- default: '30m'
- availability_zone:
- description:
- - Specifies the AZ where you want to create the disk.
- type: str
- required: true
- name:
- description:
- - Specifies the disk name. The value can contain a maximum of 255
- bytes.
- type: str
- required: true
- volume_type:
- description:
- - Specifies the disk type. Currently, the value can be SSD, SAS, or
- SATA.
- - SSD specifies the ultra-high I/O disk type.
- - SAS specifies the high I/O disk type.
- - SATA specifies the common I/O disk type.
- - If the specified disk type is not available in the AZ, the
- disk will fail to create. If the EVS disk is created from a
- snapshot, the volume_type field must be the same as that of the
- snapshot's source disk.
- type: str
- required: true
- backup_id:
- description:
- - Specifies the ID of the backup that can be used to create a disk.
- This parameter is mandatory when you use a backup to create the
- disk.
- type: str
- required: false
- description:
- description:
- - Specifies the disk description. The value can contain a maximum
- of 255 bytes.
- type: str
- required: false
- enable_full_clone:
- description:
- - If the disk is created from a snapshot and linked cloning needs
- to be used, set this parameter to True.
- type: bool
- required: false
- enable_scsi:
- description:
- - If this parameter is set to True, the disk device type will be
- SCSI, which allows ECS OSs to directly access underlying storage
- media. SCSI reservation command is supported. If this parameter
- is set to False, the disk device type will be VBD, which supports
- only simple SCSI read/write commands.
- - If parameter enable_share is set to True and this parameter
- is not specified, shared SCSI disks are created. SCSI EVS disks
- cannot be created from backups, which means that this parameter
- cannot be True if backup_id has been specified.
- type: bool
- required: false
- enable_share:
- description:
- - Specifies whether the disk is shareable. The default value is
- False.
- type: bool
- required: false
- encryption_id:
- description:
- - Specifies the encryption ID. The length of it fixes at 36 bytes.
- type: str
- required: false
- enterprise_project_id:
- description:
- - Specifies the enterprise project ID. This ID is associated with
- the disk during the disk creation. If it is not specified, the
- disk is bound to the default enterprise project.
- type: str
- required: false
- image_id:
- description:
- - Specifies the image ID. If this parameter is specified, the disk
- is created from an image. BMS system disks cannot be
- created from BMS images.
- type: str
- required: false
- size:
- description:
- - Specifies the disk size, in GB. Its values are as follows, System
- disk 1 GB to 1024 GB, Data disk 10 GB to 32768 GB. This
- parameter is mandatory when you create an empty disk or use an
- image or a snapshot to create a disk. If you use an image or a
- snapshot to create a disk, the disk size must be greater than or
- equal to the image or snapshot size. This parameter is optional
- when you use a backup to create a disk. If this parameter is not
- specified, the disk size is equal to the backup size.
- type: int
- required: false
- snapshot_id:
- description:
- - Specifies the snapshot ID. If this parameter is specified, the
- disk is created from a snapshot.
- type: str
- required: false
-extends_documentation_fragment:
-- community.general.hwc
-
-'''
-
-EXAMPLES = '''
-# test create disk
-- name: Create a disk
- community.general.hwc_evs_disk:
- availability_zone: "cn-north-1a"
- name: "ansible_evs_disk_test"
- volume_type: "SATA"
- size: 10
-'''
-
-RETURN = '''
- availability_zone:
- description:
- - Specifies the AZ where you want to create the disk.
- type: str
- returned: success
- name:
- description:
- - Specifies the disk name. The value can contain a maximum of 255
- bytes.
- type: str
- returned: success
- volume_type:
- description:
- - Specifies the disk type. Currently, the value can be SSD, SAS, or
- SATA.
- - SSD specifies the ultra-high I/O disk type.
- - SAS specifies the high I/O disk type.
- - SATA specifies the common I/O disk type.
- - If the specified disk type is not available in the AZ, the
- disk will fail to create. If the EVS disk is created from a
- snapshot, the volume_type field must be the same as that of the
- snapshot's source disk.
- type: str
- returned: success
- backup_id:
- description:
- - Specifies the ID of the backup that can be used to create a disk.
- This parameter is mandatory when you use a backup to create the
- disk.
- type: str
- returned: success
- description:
- description:
- - Specifies the disk description. The value can contain a maximum
- of 255 bytes.
- type: str
- returned: success
- enable_full_clone:
- description:
- - If the disk is created from a snapshot and linked cloning needs
- to be used, set this parameter to True.
- type: bool
- returned: success
- enable_scsi:
- description:
- - If this parameter is set to True, the disk device type will be
- SCSI, which allows ECS OSs to directly access underlying storage
- media. SCSI reservation command is supported. If this parameter
- is set to False, the disk device type will be VBD, which supports
- only simple SCSI read/write commands.
- - If parameter enable_share is set to True and this parameter
- is not specified, shared SCSI disks are created. SCSI EVS disks
- cannot be created from backups, which means that this parameter
- cannot be True if backup_id has been specified.
- type: bool
- returned: success
- enable_share:
- description:
- - Specifies whether the disk is shareable. The default value is
- False.
- type: bool
- returned: success
- encryption_id:
- description:
- - Specifies the encryption ID. The length of it fixes at 36 bytes.
- type: str
- returned: success
- enterprise_project_id:
- description:
- - Specifies the enterprise project ID. This ID is associated with
- the disk during the disk creation. If it is not specified, the
- disk is bound to the default enterprise project.
- type: str
- returned: success
- image_id:
- description:
- - Specifies the image ID. If this parameter is specified, the disk
- is created from an image. BMS system disks cannot be
- created from BMS images.
- type: str
- returned: success
- size:
- description:
- - Specifies the disk size, in GB. Its values are as follows, System
- disk 1 GB to 1024 GB, Data disk 10 GB to 32768 GB. This
- parameter is mandatory when you create an empty disk or use an
- image or a snapshot to create a disk. If you use an image or a
- snapshot to create a disk, the disk size must be greater than or
- equal to the image or snapshot size. This parameter is optional
- when you use a backup to create a disk. If this parameter is not
- specified, the disk size is equal to the backup size.
- type: int
- returned: success
- snapshot_id:
- description:
- - Specifies the snapshot ID. If this parameter is specified, the
- disk is created from a snapshot.
- type: str
- returned: success
- attachments:
- description:
- - Specifies the disk attachment information.
- type: complex
- returned: success
- contains:
- attached_at:
- description:
- - Specifies the time when the disk was attached. Time
- format is 'UTC YYYY-MM-DDTHH:MM:SS'.
- type: str
- returned: success
- attachment_id:
- description:
- - Specifies the ID of the attachment information.
- type: str
- returned: success
- device:
- description:
- - Specifies the device name.
- type: str
- returned: success
- server_id:
- description:
- - Specifies the ID of the server to which the disk is
- attached.
- type: str
- returned: success
- backup_policy_id:
- description:
- - Specifies the backup policy ID.
- type: str
- returned: success
- created_at:
- description:
- - Specifies the time when the disk was created. Time format is 'UTC
- YYYY-MM-DDTHH:MM:SS'.
- type: str
- returned: success
- is_bootable:
- description:
- - Specifies whether the disk is bootable.
- type: bool
- returned: success
- is_readonly:
- description:
- - Specifies whether the disk is read-only or read/write. True
- indicates that the disk is read-only. False indicates that the
- disk is read/write.
- type: bool
- returned: success
- source_volume_id:
- description:
- - Specifies the source disk ID. This parameter has a value if the
- disk is created from a source disk.
- type: str
- returned: success
- status:
- description:
- - Specifies the disk status.
- type: str
- returned: success
- tags:
- description:
- - Specifies the disk tags.
- type: dict
- returned: success
-'''
-
-from ansible_collections.community.general.plugins.module_utils.hwc_utils import (
- Config, HwcClientException, HwcModule, are_different_dicts, build_path,
- get_region, is_empty_value, navigate_value, wait_to_finish)
-
-
-def build_module():
- return HwcModule(
- argument_spec=dict(
- state=dict(default='present', choices=['present', 'absent'],
- type='str'),
- timeouts=dict(type='dict', options=dict(
- create=dict(default='30m', type='str'),
- update=dict(default='30m', type='str'),
- delete=dict(default='30m', type='str'),
- ), default=dict()),
- availability_zone=dict(type='str', required=True),
- name=dict(type='str', required=True),
- volume_type=dict(type='str', required=True),
- backup_id=dict(type='str'),
- description=dict(type='str'),
- enable_full_clone=dict(type='bool'),
- enable_scsi=dict(type='bool'),
- enable_share=dict(type='bool'),
- encryption_id=dict(type='str'),
- enterprise_project_id=dict(type='str'),
- image_id=dict(type='str'),
- size=dict(type='int'),
- snapshot_id=dict(type='str')
- ),
- supports_check_mode=True,
- )
-
-
-def main():
- """Main function"""
-
- module = build_module()
- config = Config(module, "evs")
-
- try:
- _init(config)
- is_exist = module.params.get('id')
-
- result = None
- changed = False
- if module.params['state'] == 'present':
- if not is_exist:
- if not module.check_mode:
- create(config)
- changed = True
-
- inputv = user_input_parameters(module)
- resp, array_index = read_resource(config)
- result = build_state(inputv, resp, array_index)
- set_readonly_options(inputv, result)
- if are_different_dicts(inputv, result):
- if not module.check_mode:
- update(config, inputv, result)
-
- inputv = user_input_parameters(module)
- resp, array_index = read_resource(config)
- result = build_state(inputv, resp, array_index)
- set_readonly_options(inputv, result)
- if are_different_dicts(inputv, result):
- raise Exception("Update resource failed, "
- "some attributes are not updated")
-
- changed = True
-
- result['id'] = module.params.get('id')
- else:
- result = dict()
- if is_exist:
- if not module.check_mode:
- delete(config)
- changed = True
-
- except Exception as ex:
- module.fail_json(msg=str(ex))
-
- else:
- result['changed'] = changed
- module.exit_json(**result)
-
-
-def _init(config):
- module = config.module
- if module.params.get('id'):
- return
-
- v = search_resource(config)
- n = len(v)
- if n > 1:
- raise Exception("find more than one resources(%s)" % ", ".join([
- navigate_value(i, ["id"])
- for i in v
- ]))
-
- if n == 1:
- module.params['id'] = navigate_value(v[0], ["id"])
-
-
-def user_input_parameters(module):
- return {
- "availability_zone": module.params.get("availability_zone"),
- "backup_id": module.params.get("backup_id"),
- "description": module.params.get("description"),
- "enable_full_clone": module.params.get("enable_full_clone"),
- "enable_scsi": module.params.get("enable_scsi"),
- "enable_share": module.params.get("enable_share"),
- "encryption_id": module.params.get("encryption_id"),
- "enterprise_project_id": module.params.get("enterprise_project_id"),
- "image_id": module.params.get("image_id"),
- "name": module.params.get("name"),
- "size": module.params.get("size"),
- "snapshot_id": module.params.get("snapshot_id"),
- "volume_type": module.params.get("volume_type"),
- }
-
-
-def create(config):
- module = config.module
- client = config.client(get_region(module), "volumev3", "project")
- timeout = 60 * int(module.params['timeouts']['create'].rstrip('m'))
- opts = user_input_parameters(module)
- opts["ansible_module"] = module
-
- params = build_create_parameters(opts)
- r = send_create_request(module, params, client)
-
- client1 = config.client(get_region(module), "volume", "project")
- client1.endpoint = client1.endpoint.replace("/v2/", "/v1/")
- obj = async_wait(config, r, client1, timeout)
- module.params['id'] = navigate_value(obj, ["entities", "volume_id"])
-
-
-def update(config, expect_state, current_state):
- module = config.module
- expect_state["current_state"] = current_state
- current_state["current_state"] = current_state
- client = config.client(get_region(module), "evs", "project")
- timeout = 60 * int(module.params['timeouts']['update'].rstrip('m'))
-
- params = build_update_parameters(expect_state)
- params1 = build_update_parameters(current_state)
- if params and are_different_dicts(params, params1):
- send_update_request(module, params, client)
-
- params = build_extend_disk_parameters(expect_state)
- params1 = build_extend_disk_parameters(current_state)
- if params and are_different_dicts(params, params1):
- client1 = config.client(get_region(module), "evsv2.1", "project")
- r = send_extend_disk_request(module, params, client1)
-
- client1 = config.client(get_region(module), "volume", "project")
- client1.endpoint = client1.endpoint.replace("/v2/", "/v1/")
- async_wait(config, r, client1, timeout)
-
-
-def delete(config):
- module = config.module
- client = config.client(get_region(module), "evs", "project")
- timeout = 60 * int(module.params['timeouts']['delete'].rstrip('m'))
-
- r = send_delete_request(module, None, client)
-
- client = config.client(get_region(module), "volume", "project")
- client.endpoint = client.endpoint.replace("/v2/", "/v1/")
- async_wait(config, r, client, timeout)
-
-
-def read_resource(config):
- module = config.module
- client = config.client(get_region(module), "volumev3", "project")
-
- res = {}
-
- r = send_read_request(module, client)
- res["read"] = fill_read_resp_body(r)
-
- return res, None
-
-
-def build_state(opts, response, array_index):
- states = flatten_options(response, array_index)
- set_unreadable_options(opts, states)
- return states
-
-
-def _build_query_link(opts):
- query_params = []
-
- v = navigate_value(opts, ["enable_share"])
- if v or v in [False, 0]:
- query_params.append(
- "multiattach=" + (str(v) if v else str(v).lower()))
-
- v = navigate_value(opts, ["name"])
- if v or v in [False, 0]:
- query_params.append(
- "name=" + (str(v) if v else str(v).lower()))
-
- v = navigate_value(opts, ["availability_zone"])
- if v or v in [False, 0]:
- query_params.append(
- "availability_zone=" + (str(v) if v else str(v).lower()))
-
- query_link = "?limit=10&offset={start}"
- if query_params:
- query_link += "&" + "&".join(query_params)
-
- return query_link
-
-
-def search_resource(config):
- module = config.module
- client = config.client(get_region(module), "volumev3", "project")
- opts = user_input_parameters(module)
- name = module.params.get("name")
- query_link = _build_query_link(opts)
- link = "os-vendor-volumes/detail" + query_link
-
- result = []
- p = {'start': 0}
- while True:
- url = link.format(**p)
- r = send_list_request(module, client, url)
- if not r:
- break
-
- for item in r:
- if name == item.get("name"):
- result.append(item)
-
- if len(result) > 1:
- break
-
- p['start'] += len(r)
-
- return result
-
-
-def build_create_parameters(opts):
- params = dict()
-
- v = navigate_value(opts, ["availability_zone"], None)
- if not is_empty_value(v):
- params["availability_zone"] = v
-
- v = navigate_value(opts, ["backup_id"], None)
- if not is_empty_value(v):
- params["backup_id"] = v
-
- v = navigate_value(opts, ["description"], None)
- if not is_empty_value(v):
- params["description"] = v
-
- v = navigate_value(opts, ["enterprise_project_id"], None)
- if not is_empty_value(v):
- params["enterprise_project_id"] = v
-
- v = navigate_value(opts, ["image_id"], None)
- if not is_empty_value(v):
- params["imageRef"] = v
-
- v = expand_create_metadata(opts, None)
- if not is_empty_value(v):
- params["metadata"] = v
-
- v = navigate_value(opts, ["enable_share"], None)
- if not is_empty_value(v):
- params["multiattach"] = v
-
- v = navigate_value(opts, ["name"], None)
- if not is_empty_value(v):
- params["name"] = v
-
- v = navigate_value(opts, ["size"], None)
- if not is_empty_value(v):
- params["size"] = v
-
- v = navigate_value(opts, ["snapshot_id"], None)
- if not is_empty_value(v):
- params["snapshot_id"] = v
-
- v = navigate_value(opts, ["volume_type"], None)
- if not is_empty_value(v):
- params["volume_type"] = v
-
- if not params:
- return params
-
- params = {"volume": params}
-
- return params
-
-
-def expand_create_metadata(d, array_index):
- r = dict()
-
- v = navigate_value(d, ["encryption_id"], array_index)
- if not is_empty_value(v):
- r["__system__cmkid"] = v
-
- v = expand_create_metadata_system_encrypted(d, array_index)
- if not is_empty_value(v):
- r["__system__encrypted"] = v
-
- v = expand_create_metadata_full_clone(d, array_index)
- if not is_empty_value(v):
- r["full_clone"] = v
-
- v = expand_create_metadata_hw_passthrough(d, array_index)
- if not is_empty_value(v):
- r["hw:passthrough"] = v
-
- return r
-
-
-def expand_create_metadata_system_encrypted(d, array_index):
- v = navigate_value(d, ["encryption_id"], array_index)
- return "1" if v else ""
-
-
-def expand_create_metadata_full_clone(d, array_index):
- v = navigate_value(d, ["enable_full_clone"], array_index)
- return "0" if v else ""
-
-
-def expand_create_metadata_hw_passthrough(d, array_index):
- v = navigate_value(d, ["enable_scsi"], array_index)
- if v is None:
- return v
- return "true" if v else "false"
-
-
-def send_create_request(module, params, client):
- url = "cloudvolumes"
- try:
- r = client.post(url, params)
- except HwcClientException as ex:
- msg = ("module(hwc_evs_disk): error running "
- "api(create), error: %s" % str(ex))
- module.fail_json(msg=msg)
-
- return r
-
-
-def build_update_parameters(opts):
- params = dict()
-
- v = navigate_value(opts, ["description"], None)
- if v is not None:
- params["description"] = v
-
- v = navigate_value(opts, ["name"], None)
- if not is_empty_value(v):
- params["name"] = v
-
- if not params:
- return params
-
- params = {"volume": params}
-
- return params
-
-
-def send_update_request(module, params, client):
- url = build_path(module, "cloudvolumes/{id}")
-
- try:
- r = client.put(url, params)
- except HwcClientException as ex:
- msg = ("module(hwc_evs_disk): error running "
- "api(update), error: %s" % str(ex))
- module.fail_json(msg=msg)
-
- return r
-
-
-def send_delete_request(module, params, client):
- url = build_path(module, "cloudvolumes/{id}")
-
- try:
- r = client.delete(url, params)
- except HwcClientException as ex:
- msg = ("module(hwc_evs_disk): error running "
- "api(delete), error: %s" % str(ex))
- module.fail_json(msg=msg)
-
- return r
-
-
-def build_extend_disk_parameters(opts):
- params = dict()
-
- v = expand_extend_disk_os_extend(opts, None)
- if not is_empty_value(v):
- params["os-extend"] = v
-
- return params
-
-
-def expand_extend_disk_os_extend(d, array_index):
- r = dict()
-
- v = navigate_value(d, ["size"], array_index)
- if not is_empty_value(v):
- r["new_size"] = v
-
- return r
-
-
-def send_extend_disk_request(module, params, client):
- url = build_path(module, "cloudvolumes/{id}/action")
-
- try:
- r = client.post(url, params)
- except HwcClientException as ex:
- msg = ("module(hwc_evs_disk): error running "
- "api(extend_disk), error: %s" % str(ex))
- module.fail_json(msg=msg)
-
- return r
-
-
-def async_wait(config, result, client, timeout):
- module = config.module
-
- path_parameters = {
- "job_id": ["job_id"],
- }
- data = dict((key, navigate_value(result, path))
- for key, path in path_parameters.items())
-
- url = build_path(module, "jobs/{job_id}", data)
-
- def _query_status():
- r = None
- try:
- r = client.get(url, timeout=timeout)
- except HwcClientException:
- return None, ""
-
- try:
- s = navigate_value(r, ["status"])
- return r, s
- except Exception:
- return None, ""
-
- try:
- return wait_to_finish(
- ["SUCCESS"],
- ["RUNNING", "INIT"],
- _query_status, timeout)
- except Exception as ex:
- module.fail_json(msg="module(hwc_evs_disk): error "
- "waiting to be done, error= %s" % str(ex))
-
-
-def send_read_request(module, client):
- url = build_path(module, "os-vendor-volumes/{id}")
-
- r = None
- try:
- r = client.get(url)
- except HwcClientException as ex:
- msg = ("module(hwc_evs_disk): error running "
- "api(read), error: %s" % str(ex))
- module.fail_json(msg=msg)
-
- return navigate_value(r, ["volume"], None)
-
-
-def fill_read_resp_body(body):
- result = dict()
-
- v = fill_read_resp_attachments(body.get("attachments"))
- result["attachments"] = v
-
- result["availability_zone"] = body.get("availability_zone")
-
- result["bootable"] = body.get("bootable")
-
- result["created_at"] = body.get("created_at")
-
- result["description"] = body.get("description")
-
- result["enterprise_project_id"] = body.get("enterprise_project_id")
-
- result["id"] = body.get("id")
-
- v = fill_read_resp_metadata(body.get("metadata"))
- result["metadata"] = v
-
- result["multiattach"] = body.get("multiattach")
-
- result["name"] = body.get("name")
-
- result["size"] = body.get("size")
-
- result["snapshot_id"] = body.get("snapshot_id")
-
- result["source_volid"] = body.get("source_volid")
-
- result["status"] = body.get("status")
-
- result["tags"] = body.get("tags")
-
- v = fill_read_resp_volume_image_metadata(body.get("volume_image_metadata"))
- result["volume_image_metadata"] = v
-
- result["volume_type"] = body.get("volume_type")
-
- return result
-
-
-def fill_read_resp_attachments(value):
- if not value:
- return None
-
- result = []
- for item in value:
- val = dict()
-
- val["attached_at"] = item.get("attached_at")
-
- val["attachment_id"] = item.get("attachment_id")
-
- val["device"] = item.get("device")
-
- val["server_id"] = item.get("server_id")
-
- result.append(val)
-
- return result
-
-
-def fill_read_resp_metadata(value):
- if not value:
- return None
-
- result = dict()
-
- result["__system__cmkid"] = value.get("__system__cmkid")
-
- result["attached_mode"] = value.get("attached_mode")
-
- result["full_clone"] = value.get("full_clone")
-
- result["hw:passthrough"] = value.get("hw:passthrough")
-
- result["policy"] = value.get("policy")
-
- result["readonly"] = value.get("readonly")
-
- return result
-
-
-def fill_read_resp_volume_image_metadata(value):
- if not value:
- return None
-
- result = dict()
-
- result["id"] = value.get("id")
-
- return result
-
-
-def flatten_options(response, array_index):
- r = dict()
-
- v = flatten_attachments(response, array_index)
- r["attachments"] = v
-
- v = navigate_value(response, ["read", "availability_zone"], array_index)
- r["availability_zone"] = v
-
- v = navigate_value(response, ["read", "metadata", "policy"], array_index)
- r["backup_policy_id"] = v
-
- v = navigate_value(response, ["read", "created_at"], array_index)
- r["created_at"] = v
-
- v = navigate_value(response, ["read", "description"], array_index)
- r["description"] = v
-
- v = flatten_enable_full_clone(response, array_index)
- r["enable_full_clone"] = v
-
- v = flatten_enable_scsi(response, array_index)
- r["enable_scsi"] = v
-
- v = navigate_value(response, ["read", "multiattach"], array_index)
- r["enable_share"] = v
-
- v = navigate_value(
- response, ["read", "metadata", "__system__cmkid"], array_index)
- r["encryption_id"] = v
-
- v = navigate_value(
- response, ["read", "enterprise_project_id"], array_index)
- r["enterprise_project_id"] = v
-
- v = navigate_value(
- response, ["read", "volume_image_metadata", "id"], array_index)
- r["image_id"] = v
-
- v = flatten_is_bootable(response, array_index)
- r["is_bootable"] = v
-
- v = flatten_is_readonly(response, array_index)
- r["is_readonly"] = v
-
- v = navigate_value(response, ["read", "name"], array_index)
- r["name"] = v
-
- v = navigate_value(response, ["read", "size"], array_index)
- r["size"] = v
-
- v = navigate_value(response, ["read", "snapshot_id"], array_index)
- r["snapshot_id"] = v
-
- v = navigate_value(response, ["read", "source_volid"], array_index)
- r["source_volume_id"] = v
-
- v = navigate_value(response, ["read", "status"], array_index)
- r["status"] = v
-
- v = navigate_value(response, ["read", "tags"], array_index)
- r["tags"] = v
-
- v = navigate_value(response, ["read", "volume_type"], array_index)
- r["volume_type"] = v
-
- return r
-
-
-def flatten_attachments(d, array_index):
- v = navigate_value(d, ["read", "attachments"],
- array_index)
- if not v:
- return None
- n = len(v)
- result = []
-
- new_ai = dict()
- if array_index:
- new_ai.update(array_index)
-
- for i in range(n):
- new_ai["read.attachments"] = i
-
- val = dict()
-
- v = navigate_value(d, ["read", "attachments", "attached_at"], new_ai)
- val["attached_at"] = v
-
- v = navigate_value(d, ["read", "attachments", "attachment_id"], new_ai)
- val["attachment_id"] = v
-
- v = navigate_value(d, ["read", "attachments", "device"], new_ai)
- val["device"] = v
-
- v = navigate_value(d, ["read", "attachments", "server_id"], new_ai)
- val["server_id"] = v
-
- for v in val.values():
- if v is not None:
- result.append(val)
- break
-
- return result if result else None
-
-
-def flatten_enable_full_clone(d, array_index):
- v = navigate_value(d, ["read", "metadata", "full_clone"],
- array_index)
- if v is None:
- return v
- return True if v == "0" else False
-
-
-def flatten_enable_scsi(d, array_index):
- v = navigate_value(d, ["read", "metadata", "hw:passthrough"],
- array_index)
- if v is None:
- return v
- return True if v in ["true", "True"] else False
-
-
-def flatten_is_bootable(d, array_index):
- v = navigate_value(d, ["read", "bootable"], array_index)
- if v is None:
- return v
- return True if v in ["true", "True"] else False
-
-
-def flatten_is_readonly(d, array_index):
- v = navigate_value(d, ["read", "metadata", "readonly"],
- array_index)
- if v is None:
- return v
- return True if v in ["true", "True"] else False
-
-
-def set_unreadable_options(opts, states):
- states["backup_id"] = opts.get("backup_id")
-
-
-def set_readonly_options(opts, states):
- opts["attachments"] = states.get("attachments")
-
- opts["backup_policy_id"] = states.get("backup_policy_id")
-
- opts["created_at"] = states.get("created_at")
-
- opts["is_bootable"] = states.get("is_bootable")
-
- opts["is_readonly"] = states.get("is_readonly")
-
- opts["source_volume_id"] = states.get("source_volume_id")
-
- opts["status"] = states.get("status")
-
- opts["tags"] = states.get("tags")
-
-
-def send_list_request(module, client, url):
-
- r = None
- try:
- r = client.get(url)
- except HwcClientException as ex:
- msg = ("module(hwc_evs_disk): error running "
- "api(list), error: %s" % str(ex))
- module.fail_json(msg=msg)
-
- return navigate_value(r, ["volumes"], None)
-
-
-def expand_list_metadata(d, array_index):
- r = dict()
-
- v = navigate_value(d, ["encryption_id"], array_index)
- r["__system__cmkid"] = v
-
- r["attached_mode"] = None
-
- v = navigate_value(d, ["enable_full_clone"], array_index)
- r["full_clone"] = v
-
- v = navigate_value(d, ["enable_scsi"], array_index)
- r["hw:passthrough"] = v
-
- r["policy"] = None
-
- r["readonly"] = None
-
- for v in r.values():
- if v is not None:
- return r
- return None
-
-
-def expand_list_volume_image_metadata(d, array_index):
- r = dict()
-
- v = navigate_value(d, ["image_id"], array_index)
- r["id"] = v
-
- for v in r.values():
- if v is not None:
- return r
- return None
-
-
-def fill_list_resp_body(body):
- result = dict()
-
- v = fill_list_resp_attachments(body.get("attachments"))
- result["attachments"] = v
-
- result["availability_zone"] = body.get("availability_zone")
-
- result["bootable"] = body.get("bootable")
-
- result["created_at"] = body.get("created_at")
-
- result["description"] = body.get("description")
-
- result["enterprise_project_id"] = body.get("enterprise_project_id")
-
- result["id"] = body.get("id")
-
- v = fill_list_resp_metadata(body.get("metadata"))
- result["metadata"] = v
-
- result["multiattach"] = body.get("multiattach")
-
- result["name"] = body.get("name")
-
- result["size"] = body.get("size")
-
- result["snapshot_id"] = body.get("snapshot_id")
-
- result["source_volid"] = body.get("source_volid")
-
- result["status"] = body.get("status")
-
- result["tags"] = body.get("tags")
-
- v = fill_list_resp_volume_image_metadata(body.get("volume_image_metadata"))
- result["volume_image_metadata"] = v
-
- result["volume_type"] = body.get("volume_type")
-
- return result
-
-
-def fill_list_resp_attachments(value):
- if not value:
- return None
-
- result = []
- for item in value:
- val = dict()
-
- val["attached_at"] = item.get("attached_at")
-
- val["attachment_id"] = item.get("attachment_id")
-
- val["device"] = item.get("device")
-
- val["server_id"] = item.get("server_id")
-
- result.append(val)
-
- return result
-
-
-def fill_list_resp_metadata(value):
- if not value:
- return None
-
- result = dict()
-
- result["__system__cmkid"] = value.get("__system__cmkid")
-
- result["attached_mode"] = value.get("attached_mode")
-
- result["full_clone"] = value.get("full_clone")
-
- result["hw:passthrough"] = value.get("hw:passthrough")
-
- result["policy"] = value.get("policy")
-
- result["readonly"] = value.get("readonly")
-
- return result
-
-
-def fill_list_resp_volume_image_metadata(value):
- if not value:
- return None
-
- result = dict()
-
- result["id"] = value.get("id")
-
- return result
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_network_vpc.py b/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_network_vpc.py
deleted file mode 100644
index f53369ad..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_network_vpc.py
+++ /dev/null
@@ -1,493 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright (C) 2018 Huawei
-# GNU General Public License v3.0+ (see COPYING or
-# https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-###############################################################################
-# Documentation
-###############################################################################
-
-DOCUMENTATION = '''
----
-module: hwc_network_vpc
-description:
- - Represents an vpc resource.
-short_description: Creates a Huawei Cloud VPC
-author: Huawei Inc. (@huaweicloud)
-requirements:
- - requests >= 2.18.4
- - keystoneauth1 >= 3.6.0
-options:
- state:
- description:
- - Whether the given object should exist in vpc.
- type: str
- choices: ['present', 'absent']
- default: 'present'
- timeouts:
- description:
- - The timeouts for each operations.
- type: dict
- suboptions:
- create:
- description:
- - The timeout for create operation.
- type: str
- default: '15m'
- update:
- description:
- - The timeout for update operation.
- type: str
- default: '15m'
- delete:
- description:
- - The timeout for delete operation.
- type: str
- default: '15m'
- name:
- description:
- - The name of vpc.
- type: str
- required: true
- cidr:
- description:
- - The range of available subnets in the vpc.
- type: str
- required: true
-extends_documentation_fragment:
-- community.general.hwc
-
-'''
-
-EXAMPLES = '''
-- name: Create a vpc
- community.general.hwc_network_vpc:
- identity_endpoint: "{{ identity_endpoint }}"
- user: "{{ user }}"
- password: "{{ password }}"
- domain: "{{ domain }}"
- project: "{{ project }}"
- region: "{{ region }}"
- name: "vpc_1"
- cidr: "192.168.100.0/24"
- state: present
-'''
-
-RETURN = '''
- id:
- description:
- - the id of vpc.
- type: str
- returned: success
- name:
- description:
- - the name of vpc.
- type: str
- returned: success
- cidr:
- description:
- - the range of available subnets in the vpc.
- type: str
- returned: success
- status:
- description:
- - the status of vpc.
- type: str
- returned: success
- routes:
- description:
- - the route information.
- type: complex
- returned: success
- contains:
- destination:
- description:
- - the destination network segment of a route.
- type: str
- returned: success
- next_hop:
- description:
- - the next hop of a route. If the route type is peering,
- it will provide VPC peering connection ID.
- type: str
- returned: success
- enable_shared_snat:
- description:
- - show whether the shared snat is enabled.
- type: bool
- returned: success
-'''
-
-###############################################################################
-# Imports
-###############################################################################
-
-from ansible_collections.community.general.plugins.module_utils.hwc_utils import (Config, HwcClientException,
- HwcClientException404, HwcModule,
- are_different_dicts, is_empty_value,
- wait_to_finish, get_region,
- build_path, navigate_value)
-import re
-
-###############################################################################
-# Main
-###############################################################################
-
-
-def main():
- """Main function"""
-
- module = HwcModule(
- argument_spec=dict(
- state=dict(
- default='present', choices=['present', 'absent'], type='str'),
- timeouts=dict(type='dict', options=dict(
- create=dict(default='15m', type='str'),
- update=dict(default='15m', type='str'),
- delete=dict(default='15m', type='str'),
- ), default=dict()),
- name=dict(required=True, type='str'),
- cidr=dict(required=True, type='str')
- ),
- supports_check_mode=True,
- )
- config = Config(module, 'vpc')
-
- state = module.params['state']
-
- if (not module.params.get("id")) and module.params.get("name"):
- module.params['id'] = get_id_by_name(config)
-
- fetch = None
- link = self_link(module)
- # the link will include Nones if required format parameters are missed
- if not re.search('/None/|/None$', link):
- client = config.client(get_region(module), "vpc", "project")
- fetch = fetch_resource(module, client, link)
- if fetch:
- fetch = fetch.get('vpc')
- changed = False
-
- if fetch:
- if state == 'present':
- expect = _get_editable_properties(module)
- current_state = response_to_hash(module, fetch)
- current = {"cidr": current_state["cidr"]}
- if are_different_dicts(expect, current):
- if not module.check_mode:
- fetch = update(config, self_link(module))
- fetch = response_to_hash(module, fetch.get('vpc'))
- changed = True
- else:
- fetch = current_state
- else:
- if not module.check_mode:
- delete(config, self_link(module))
- fetch = {}
- changed = True
- else:
- if state == 'present':
- if not module.check_mode:
- fetch = create(config, "vpcs")
- fetch = response_to_hash(module, fetch.get('vpc'))
- changed = True
- else:
- fetch = {}
-
- fetch.update({'changed': changed})
-
- module.exit_json(**fetch)
-
-
-def create(config, link):
- module = config.module
- client = config.client(get_region(module), "vpc", "project")
-
- r = None
- try:
- r = client.post(link, resource_to_create(module))
- except HwcClientException as ex:
- msg = ("module(hwc_network_vpc): error creating "
- "resource, error: %s" % str(ex))
- module.fail_json(msg=msg)
-
- wait_done = wait_for_operation(config, 'create', r)
- v = ""
- try:
- v = navigate_value(wait_done, ['vpc', 'id'])
- except Exception as ex:
- module.fail_json(msg=str(ex))
-
- url = build_path(module, 'vpcs/{op_id}', {'op_id': v})
- return fetch_resource(module, client, url)
-
-
-def update(config, link):
- module = config.module
- client = config.client(get_region(module), "vpc", "project")
-
- r = None
- try:
- r = client.put(link, resource_to_update(module))
- except HwcClientException as ex:
- msg = ("module(hwc_network_vpc): error updating "
- "resource, error: %s" % str(ex))
- module.fail_json(msg=msg)
-
- wait_for_operation(config, 'update', r)
-
- return fetch_resource(module, client, link)
-
-
-def delete(config, link):
- module = config.module
- client = config.client(get_region(module), "vpc", "project")
-
- try:
- client.delete(link)
- except HwcClientException as ex:
- msg = ("module(hwc_network_vpc): error deleting "
- "resource, error: %s" % str(ex))
- module.fail_json(msg=msg)
-
- wait_for_delete(module, client, link)
-
-
-def fetch_resource(module, client, link):
- try:
- return client.get(link)
- except HwcClientException as ex:
- msg = ("module(hwc_network_vpc): error fetching "
- "resource, error: %s" % str(ex))
- module.fail_json(msg=msg)
-
-
-def get_id_by_name(config):
- module = config.module
- client = config.client(get_region(module), "vpc", "project")
- name = module.params.get("name")
- link = "vpcs"
- query_link = "?marker={marker}&limit=10"
- link += query_link
- not_format_keys = re.findall("={marker}", link)
- none_values = re.findall("=None", link)
-
- if not (not_format_keys or none_values):
- r = None
- try:
- r = client.get(link)
- except Exception:
- pass
- if r is None:
- return None
- r = r.get('vpcs', [])
- ids = [
- i.get('id') for i in r if i.get('name', '') == name
- ]
- if not ids:
- return None
- elif len(ids) == 1:
- return ids[0]
- else:
- module.fail_json(
- msg="Multiple resources with same name are found.")
- elif none_values:
- module.fail_json(
- msg="Can not find id by name because url includes None.")
- else:
- p = {'marker': ''}
- ids = set()
- while True:
- r = None
- try:
- r = client.get(link.format(**p))
- except Exception:
- pass
- if r is None:
- break
- r = r.get('vpcs', [])
- if r == []:
- break
- for i in r:
- if i.get('name') == name:
- ids.add(i.get('id'))
- if len(ids) >= 2:
- module.fail_json(
- msg="Multiple resources with same name are found.")
-
- p['marker'] = r[-1].get('id')
-
- return ids.pop() if ids else None
-
-
-def self_link(module):
- return build_path(module, "vpcs/{id}")
-
-
-def resource_to_create(module):
- params = dict()
-
- v = module.params.get('cidr')
- if not is_empty_value(v):
- params["cidr"] = v
-
- v = module.params.get('name')
- if not is_empty_value(v):
- params["name"] = v
-
- if not params:
- return params
-
- params = {"vpc": params}
-
- return params
-
-
-def resource_to_update(module):
- params = dict()
-
- v = module.params.get('cidr')
- if not is_empty_value(v):
- params["cidr"] = v
-
- if not params:
- return params
-
- params = {"vpc": params}
-
- return params
-
-
-def _get_editable_properties(module):
- return {
- "cidr": module.params.get("cidr"),
- }
-
-
-def response_to_hash(module, response):
- """ Remove unnecessary properties from the response.
- This is for doing comparisons with Ansible's current parameters.
- """
- return {
- u'id': response.get(u'id'),
- u'name': response.get(u'name'),
- u'cidr': response.get(u'cidr'),
- u'status': response.get(u'status'),
- u'routes': VpcRoutesArray(
- response.get(u'routes', []), module).from_response(),
- u'enable_shared_snat': response.get(u'enable_shared_snat')
- }
-
-
-def wait_for_operation(config, op_type, op_result):
- module = config.module
- op_id = ""
- try:
- op_id = navigate_value(op_result, ['vpc', 'id'])
- except Exception as ex:
- module.fail_json(msg=str(ex))
-
- url = build_path(module, "vpcs/{op_id}", {'op_id': op_id})
- timeout = 60 * int(module.params['timeouts'][op_type].rstrip('m'))
- states = {
- 'create': {
- 'allowed': ['CREATING', 'DONW', 'OK'],
- 'complete': ['OK'],
- },
- 'update': {
- 'allowed': ['PENDING_UPDATE', 'DONW', 'OK'],
- 'complete': ['OK'],
- }
- }
-
- return wait_for_completion(url, timeout, states[op_type]['allowed'],
- states[op_type]['complete'], config)
-
-
-def wait_for_completion(op_uri, timeout, allowed_states,
- complete_states, config):
- module = config.module
- client = config.client(get_region(module), "vpc", "project")
-
- def _refresh_status():
- r = None
- try:
- r = fetch_resource(module, client, op_uri)
- except Exception:
- return None, ""
-
- status = ""
- try:
- status = navigate_value(r, ['vpc', 'status'])
- except Exception:
- return None, ""
-
- return r, status
-
- try:
- return wait_to_finish(complete_states, allowed_states,
- _refresh_status, timeout)
- except Exception as ex:
- module.fail_json(msg=str(ex))
-
-
-def wait_for_delete(module, client, link):
-
- def _refresh_status():
- try:
- client.get(link)
- except HwcClientException404:
- return True, "Done"
-
- except Exception:
- return None, ""
-
- return True, "Pending"
-
- timeout = 60 * int(module.params['timeouts']['delete'].rstrip('m'))
- try:
- return wait_to_finish(["Done"], ["Pending"], _refresh_status, timeout)
- except Exception as ex:
- module.fail_json(msg=str(ex))
-
-
-class VpcRoutesArray(object):
- def __init__(self, request, module):
- self.module = module
- if request:
- self.request = request
- else:
- self.request = []
-
- def to_request(self):
- items = []
- for item in self.request:
- items.append(self._request_for_item(item))
- return items
-
- def from_response(self):
- items = []
- for item in self.request:
- items.append(self._response_from_item(item))
- return items
-
- def _request_for_item(self, item):
- return {
- u'destination': item.get('destination'),
- u'nexthop': item.get('next_hop')
- }
-
- def _response_from_item(self, item):
- return {
- u'destination': item.get(u'destination'),
- u'next_hop': item.get(u'nexthop')
- }
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_smn_topic.py b/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_smn_topic.py
deleted file mode 100644
index f7fb4fae..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_smn_topic.py
+++ /dev/null
@@ -1,338 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright (C) 2019 Huawei
-# GNU General Public License v3.0+ (see COPYING or
-# https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-###############################################################################
-# Documentation
-###############################################################################
-
-DOCUMENTATION = '''
----
-module: hwc_smn_topic
-description:
- - Represents a SMN notification topic resource.
-short_description: Creates a resource of SMNTopic in Huaweicloud Cloud
-author: Huawei Inc. (@huaweicloud)
-requirements:
- - requests >= 2.18.4
- - keystoneauth1 >= 3.6.0
-options:
- state:
- description:
- - Whether the given object should exist in Huaweicloud Cloud.
- type: str
- choices: ['present', 'absent']
- default: 'present'
- display_name:
- description:
- - Topic display name, which is presented as the name of the email
- sender in an email message. The topic display name contains a
- maximum of 192 bytes.
- type: str
- required: false
- name:
- description:
- - Name of the topic to be created. The topic name is a string of 1
- to 256 characters. It must contain upper- or lower-case letters,
- digits, hyphens (-), and underscores C(_), and must start with a
- letter or digit.
- type: str
- required: true
-extends_documentation_fragment:
-- community.general.hwc
-
-'''
-
-EXAMPLES = '''
-- name: Create a smn topic
- community.general.hwc_smn_topic:
- identity_endpoint: "{{ identity_endpoint }}"
- user_name: "{{ user_name }}"
- password: "{{ password }}"
- domain_name: "{{ domain_name }}"
- project_name: "{{ project_name }}"
- region: "{{ region }}"
- name: "ansible_smn_topic_test"
- state: present
-'''
-
-RETURN = '''
-create_time:
- description:
- - Time when the topic was created.
- returned: success
- type: str
-display_name:
- description:
- - Topic display name, which is presented as the name of the email
- sender in an email message. The topic display name contains a
- maximum of 192 bytes.
- returned: success
- type: str
-name:
- description:
- - Name of the topic to be created. The topic name is a string of 1
- to 256 characters. It must contain upper- or lower-case letters,
- digits, hyphens (-), and underscores C(_), and must start with a
- letter or digit.
- returned: success
- type: str
-push_policy:
- description:
- - Message pushing policy. 0 indicates that the message sending
- fails and the message is cached in the queue. 1 indicates that
- the failed message is discarded.
- returned: success
- type: int
-topic_urn:
- description:
- - Resource identifier of a topic, which is unique.
- returned: success
- type: str
-update_time:
- description:
- - Time when the topic was updated.
- returned: success
- type: str
-'''
-
-###############################################################################
-# Imports
-###############################################################################
-
-from ansible_collections.community.general.plugins.module_utils.hwc_utils import (Config, HwcClientException,
- HwcModule, navigate_value,
- are_different_dicts, is_empty_value,
- build_path, get_region)
-import re
-
-###############################################################################
-# Main
-###############################################################################
-
-
-def main():
- """Main function"""
-
- module = HwcModule(
- argument_spec=dict(
- state=dict(default='present', choices=['present', 'absent'],
- type='str'),
- display_name=dict(type='str'),
- name=dict(required=True, type='str')
- ),
- supports_check_mode=True,
- )
-
- config = Config(module, "smn")
-
- state = module.params['state']
-
- if not module.params.get("id"):
- module.params['id'] = get_resource_id(config)
-
- fetch = None
- link = self_link(module)
- # the link will include Nones if required format parameters are missed
- if not re.search('/None/|/None$', link):
- client = config.client(get_region(module), "smn", "project")
- fetch = fetch_resource(module, client, link)
- changed = False
-
- if fetch:
- if state == 'present':
- expect = _get_resource_editable_properties(module)
- current_state = response_to_hash(module, fetch)
- current = {'display_name': current_state['display_name']}
- if are_different_dicts(expect, current):
- if not module.check_mode:
- fetch = update(config)
- fetch = response_to_hash(module, fetch)
- changed = True
- else:
- fetch = current_state
- else:
- if not module.check_mode:
- delete(config)
- fetch = {}
- changed = True
- else:
- if state == 'present':
- if not module.check_mode:
- fetch = create(config)
- fetch = response_to_hash(module, fetch)
- changed = True
- else:
- fetch = {}
-
- fetch.update({'changed': changed})
-
- module.exit_json(**fetch)
-
-
-def create(config):
- module = config.module
- client = config.client(get_region(module), "smn", "project")
-
- link = "notifications/topics"
- r = None
- try:
- r = client.post(link, create_resource_opts(module))
- except HwcClientException as ex:
- msg = ("module(hwc_smn_topic): error creating "
- "resource, error: %s" % str(ex))
- module.fail_json(msg=msg)
-
- return get_resource(config, r)
-
-
-def update(config):
- module = config.module
- client = config.client(get_region(module), "smn", "project")
-
- link = self_link(module)
- try:
- client.put(link, update_resource_opts(module))
- except HwcClientException as ex:
- msg = ("module(hwc_smn_topic): error updating "
- "resource, error: %s" % str(ex))
- module.fail_json(msg=msg)
-
- return fetch_resource(module, client, link)
-
-
-def delete(config):
- module = config.module
- client = config.client(get_region(module), "smn", "project")
-
- link = self_link(module)
- try:
- client.delete(link)
- except HwcClientException as ex:
- msg = ("module(hwc_smn_topic): error deleting "
- "resource, error: %s" % str(ex))
- module.fail_json(msg=msg)
-
-
-def fetch_resource(module, client, link):
- try:
- return client.get(link)
- except HwcClientException as ex:
- msg = ("module(hwc_smn_topic): error fetching "
- "resource, error: %s" % str(ex))
- module.fail_json(msg=msg)
-
-
-def get_resource(config, result):
- module = config.module
- client = config.client(get_region(module), "smn", "project")
-
- v = ""
- try:
- v = navigate_value(result, ['topic_urn'])
- except Exception as ex:
- module.fail_json(msg=str(ex))
-
- d = {'topic_urn': v}
- url = build_path(module, 'notifications/topics/{topic_urn}', d)
-
- return fetch_resource(module, client, url)
-
-
-def get_resource_id(config):
- module = config.module
- client = config.client(get_region(module), "smn", "project")
-
- link = "notifications/topics"
- query_link = "?offset={offset}&limit=10"
- link += query_link
-
- p = {'offset': 0}
- v = module.params.get('name')
- ids = set()
- while True:
- r = None
- try:
- r = client.get(link.format(**p))
- except Exception:
- pass
- if r is None:
- break
- r = r.get('topics', [])
- if r == []:
- break
- for i in r:
- if i.get('name') == v:
- ids.add(i.get('topic_urn'))
- if len(ids) >= 2:
- module.fail_json(msg="Multiple resources are found")
-
- p['offset'] += 1
-
- return ids.pop() if ids else None
-
-
-def self_link(module):
- return build_path(module, "notifications/topics/{id}")
-
-
-def create_resource_opts(module):
- params = dict()
-
- v = module.params.get('display_name')
- if not is_empty_value(v):
- params["display_name"] = v
-
- v = module.params.get('name')
- if not is_empty_value(v):
- params["name"] = v
-
- return params
-
-
-def update_resource_opts(module):
- params = dict()
-
- v = module.params.get('display_name')
- if not is_empty_value(v):
- params["display_name"] = v
-
- return params
-
-
-def _get_resource_editable_properties(module):
- return {
- "display_name": module.params.get("display_name"),
- }
-
-
-def response_to_hash(module, response):
- """Remove unnecessary properties from the response.
- This is for doing comparisons with Ansible's current parameters.
- """
- return {
- u'create_time': response.get(u'create_time'),
- u'display_name': response.get(u'display_name'),
- u'name': response.get(u'name'),
- u'push_policy': _push_policy_convert_from_response(
- response.get('push_policy')),
- u'topic_urn': response.get(u'topic_urn'),
- u'update_time': response.get(u'update_time')
- }
-
-
-def _push_policy_convert_from_response(value):
- return {
- 0: "the message sending fails and is cached in the queue",
- 1: "the failed message is discarded",
- }.get(int(value))
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_eip.py b/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_eip.py
deleted file mode 100644
index b53395f8..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_eip.py
+++ /dev/null
@@ -1,877 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright (C) 2019 Huawei
-# GNU General Public License v3.0+ (see COPYING or
-# https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-###############################################################################
-# Documentation
-###############################################################################
-
-DOCUMENTATION = '''
----
-module: hwc_vpc_eip
-description:
- - elastic ip management.
-short_description: Creates a resource of Vpc/EIP in Huawei Cloud
-version_added: '0.2.0'
-author: Huawei Inc. (@huaweicloud)
-requirements:
- - keystoneauth1 >= 3.6.0
-options:
- state:
- description:
- - Whether the given object should exist in Huawei Cloud.
- type: str
- choices: ['present', 'absent']
- default: 'present'
- timeouts:
- description:
- - The timeouts for each operations.
- type: dict
- suboptions:
- create:
- description:
- - The timeouts for create operation.
- type: str
- default: '5m'
- update:
- description:
- - The timeouts for update operation.
- type: str
- default: '5m'
- type:
- description:
- - Specifies the EIP type.
- type: str
- required: true
- dedicated_bandwidth:
- description:
- - Specifies the dedicated bandwidth object.
- type: dict
- required: false
- suboptions:
- charge_mode:
- description:
- - Specifies whether the bandwidth is billed by traffic or
- by bandwidth size. The value can be bandwidth or traffic.
- If this parameter is left blank or is null character
- string, default value bandwidth is used. For IPv6
- addresses, the default parameter value is bandwidth
- outside China and is traffic in China.
- type: str
- required: true
- name:
- description:
- - Specifies the bandwidth name. The value is a string of 1
- to 64 characters that can contain letters, digits,
- underscores C(_), hyphens (-), and periods (.).
- type: str
- required: true
- size:
- description:
- - Specifies the bandwidth size. The value ranges from 1
- Mbit/s to 2000 Mbit/s by default. (The specific range may
- vary depending on the configuration in each region. You
- can see the bandwidth range of each region on the
- management console.) The minimum unit for bandwidth
- adjustment varies depending on the bandwidth range. The
- details are as follows.
- - The minimum unit is 1 Mbit/s if the allowed bandwidth
- size ranges from 0 to 300 Mbit/s (with 300 Mbit/s
- included).
- - The minimum unit is 50 Mbit/s if the allowed bandwidth
- size ranges 300 Mbit/s to 1000 Mbit/s (with 1000 Mbit/s
- included).
- - The minimum unit is 500 Mbit/s if the allowed bandwidth
- size is greater than 1000 Mbit/s.
- type: int
- required: true
- enterprise_project_id:
- description:
- - Specifies the enterprise project ID.
- type: str
- required: false
- ip_version:
- description:
- - The value can be 4 (IPv4 address) or 6 (IPv6 address). If this
- parameter is left blank, an IPv4 address will be assigned.
- type: int
- required: false
- ipv4_address:
- description:
- - Specifies the obtained IPv4 EIP. The system automatically assigns
- an EIP if you do not specify it.
- type: str
- required: false
- port_id:
- description:
- - Specifies the port ID. This parameter is returned only when a
- private IP address is bound with the EIP.
- type: str
- required: false
- shared_bandwidth_id:
- description:
- - Specifies the ID of shared bandwidth.
- type: str
- required: false
-extends_documentation_fragment:
-- community.general.hwc
-
-'''
-
-EXAMPLES = '''
-# create an eip and bind it to a port
-- name: Create vpc
- hwc_network_vpc:
- cidr: "192.168.100.0/24"
- name: "ansible_network_vpc_test"
- register: vpc
-- name: Create subnet
- hwc_vpc_subnet:
- gateway_ip: "192.168.100.32"
- name: "ansible_network_subnet_test"
- dhcp_enable: True
- vpc_id: "{{ vpc.id }}"
- cidr: "192.168.100.0/26"
- register: subnet
-- name: Create a port
- hwc_vpc_port:
- subnet_id: "{{ subnet.id }}"
- ip_address: "192.168.100.33"
- register: port
-- name: Create an eip and bind it to a port
- community.general.hwc_vpc_eip:
- type: "5_bgp"
- dedicated_bandwidth:
- charge_mode: "traffic"
- name: "ansible_test_dedicated_bandwidth"
- size: 1
- port_id: "{{ port.id }}"
-'''
-
-RETURN = '''
- type:
- description:
- - Specifies the EIP type.
- type: str
- returned: success
- dedicated_bandwidth:
- description:
- - Specifies the dedicated bandwidth object.
- type: dict
- returned: success
- contains:
- charge_mode:
- description:
- - Specifies whether the bandwidth is billed by traffic or
- by bandwidth size. The value can be bandwidth or traffic.
- If this parameter is left blank or is null character
- string, default value bandwidth is used. For IPv6
- addresses, the default parameter value is bandwidth
- outside China and is traffic in China.
- type: str
- returned: success
- name:
- description:
- - Specifies the bandwidth name. The value is a string of 1
- to 64 characters that can contain letters, digits,
- underscores C(_), hyphens (-), and periods (.).
- type: str
- returned: success
- size:
- description:
- - Specifies the bandwidth size. The value ranges from 1
- Mbit/s to 2000 Mbit/s by default. (The specific range may
- vary depending on the configuration in each region. You
- can see the bandwidth range of each region on the
- management console.) The minimum unit for bandwidth
- adjustment varies depending on the bandwidth range. The
- details are as follows:.
- - The minimum unit is 1 Mbit/s if the allowed bandwidth
- size ranges from 0 to 300 Mbit/s (with 300 Mbit/s
- included).
- - The minimum unit is 50 Mbit/s if the allowed bandwidth
- size ranges 300 Mbit/s to 1000 Mbit/s (with 1000 Mbit/s
- included).
- - The minimum unit is 500 Mbit/s if the allowed bandwidth
- size is greater than 1000 Mbit/s.
- type: int
- returned: success
- id:
- description:
- - Specifies the ID of dedicated bandwidth.
- type: str
- returned: success
- enterprise_project_id:
- description:
- - Specifies the enterprise project ID.
- type: str
- returned: success
- ip_version:
- description:
- - The value can be 4 (IPv4 address) or 6 (IPv6 address). If this
- parameter is left blank, an IPv4 address will be assigned.
- type: int
- returned: success
- ipv4_address:
- description:
- - Specifies the obtained IPv4 EIP. The system automatically assigns
- an EIP if you do not specify it.
- type: str
- returned: success
- port_id:
- description:
- - Specifies the port ID. This parameter is returned only when a
- private IP address is bound with the EIP.
- type: str
- returned: success
- shared_bandwidth_id:
- description:
- - Specifies the ID of shared bandwidth.
- type: str
- returned: success
- create_time:
- description:
- - Specifies the time (UTC time) when the EIP was assigned.
- type: str
- returned: success
- ipv6_address:
- description:
- - Specifies the obtained IPv6 EIP.
- type: str
- returned: success
- private_ip_address:
- description:
- - Specifies the private IP address bound with the EIP. This
- parameter is returned only when a private IP address is bound
- with the EIP.
- type: str
- returned: success
-'''
-
-from ansible_collections.community.general.plugins.module_utils.hwc_utils import (
- Config, HwcClientException, HwcClientException404, HwcModule,
- are_different_dicts, build_path, get_region, is_empty_value,
- navigate_value, wait_to_finish)
-
-
-def build_module():
- return HwcModule(
- argument_spec=dict(
- state=dict(default='present', choices=['present', 'absent'],
- type='str'),
- timeouts=dict(type='dict', options=dict(
- create=dict(default='5m', type='str'),
- update=dict(default='5m', type='str'),
- ), default=dict()),
- type=dict(type='str', required=True),
- dedicated_bandwidth=dict(type='dict', options=dict(
- charge_mode=dict(type='str', required=True),
- name=dict(type='str', required=True),
- size=dict(type='int', required=True)
- )),
- enterprise_project_id=dict(type='str'),
- ip_version=dict(type='int'),
- ipv4_address=dict(type='str'),
- port_id=dict(type='str'),
- shared_bandwidth_id=dict(type='str')
- ),
- supports_check_mode=True,
- )
-
-
-def main():
- """Main function"""
-
- module = build_module()
- config = Config(module, "vpc")
-
- try:
- resource = None
- if module.params['id']:
- resource = True
- else:
- v = search_resource(config)
- if len(v) > 1:
- raise Exception("Found more than one resource(%s)" % ", ".join([
- navigate_value(i, ["id"]) for i in v]))
-
- if len(v) == 1:
- resource = v[0]
- module.params['id'] = navigate_value(resource, ["id"])
-
- result = {}
- changed = False
- if module.params['state'] == 'present':
- if resource is None:
- if not module.check_mode:
- create(config)
- changed = True
-
- current = read_resource(config, exclude_output=True)
- expect = user_input_parameters(module)
- if are_different_dicts(expect, current):
- if not module.check_mode:
- update(config)
- changed = True
-
- result = read_resource(config)
- result['id'] = module.params.get('id')
- else:
- if resource:
- if not module.check_mode:
- delete(config)
- changed = True
-
- except Exception as ex:
- module.fail_json(msg=str(ex))
-
- else:
- result['changed'] = changed
- module.exit_json(**result)
-
-
-def user_input_parameters(module):
- return {
- "dedicated_bandwidth": module.params.get("dedicated_bandwidth"),
- "enterprise_project_id": module.params.get("enterprise_project_id"),
- "ip_version": module.params.get("ip_version"),
- "ipv4_address": module.params.get("ipv4_address"),
- "port_id": module.params.get("port_id"),
- "shared_bandwidth_id": module.params.get("shared_bandwidth_id"),
- "type": module.params.get("type"),
- }
-
-
-def create(config):
- module = config.module
- client = config.client(get_region(module), "vpc", "project")
- timeout = 60 * int(module.params['timeouts']['create'].rstrip('m'))
- opts = user_input_parameters(module)
-
- params = build_create_parameters(opts)
- r = send_create_request(module, params, client)
- obj = async_wait_create(config, r, client, timeout)
- module.params['id'] = navigate_value(obj, ["publicip", "id"])
-
-
-def update(config):
- module = config.module
- client = config.client(get_region(module), "vpc", "project")
- timeout = 60 * int(module.params['timeouts']['update'].rstrip('m'))
- opts = user_input_parameters(module)
-
- params = build_update_parameters(opts)
- if params:
- r = send_update_request(module, params, client)
- async_wait_update(config, r, client, timeout)
-
-
-def delete(config):
- module = config.module
- client = config.client(get_region(module), "vpc", "project")
-
- if module.params["port_id"]:
- module.params["port_id"] = ""
- update(config)
-
- send_delete_request(module, None, client)
-
- url = build_path(module, "publicips/{id}")
-
- def _refresh_status():
- try:
- client.get(url)
- except HwcClientException404:
- return True, "Done"
-
- except Exception:
- return None, ""
-
- return True, "Pending"
-
- timeout = 60 * int(module.params['timeouts']['create'].rstrip('m'))
- try:
- wait_to_finish(["Done"], ["Pending"], _refresh_status, timeout)
- except Exception as ex:
- module.fail_json(msg="module(hwc_vpc_eip): error "
- "waiting for api(delete) to "
- "be done, error= %s" % str(ex))
-
-
-def read_resource(config, exclude_output=False):
- module = config.module
- client = config.client(get_region(module), "vpc", "project")
-
- res = {}
-
- r = send_read_request(module, client)
- res["read"] = fill_read_resp_body(r)
-
- return update_properties(module, res, None, exclude_output)
-
-
-def _build_query_link(opts):
- query_params = []
-
- v = navigate_value(opts, ["ip_version"])
- if v:
- query_params.append("ip_version=" + str(v))
-
- v = navigate_value(opts, ["enterprise_project_id"])
- if v:
- query_params.append("enterprise_project_id=" + str(v))
-
- query_link = "?marker={marker}&limit=10"
- if query_params:
- query_link += "&" + "&".join(query_params)
-
- return query_link
-
-
-def search_resource(config):
- module = config.module
- client = config.client(get_region(module), "vpc", "project")
- opts = user_input_parameters(module)
- identity_obj = _build_identity_object(opts)
- query_link = _build_query_link(opts)
- link = "publicips" + query_link
-
- result = []
- p = {'marker': ''}
- while True:
- url = link.format(**p)
- r = send_list_request(module, client, url)
- if not r:
- break
-
- for item in r:
- item = fill_list_resp_body(item)
- if not are_different_dicts(identity_obj, item):
- result.append(item)
-
- if len(result) > 1:
- break
-
- p['marker'] = r[-1].get('id')
-
- return result
-
-
-def build_create_parameters(opts):
- params = dict()
-
- v = expand_create_bandwidth(opts, None)
- if not is_empty_value(v):
- params["bandwidth"] = v
-
- v = navigate_value(opts, ["enterprise_project_id"], None)
- if not is_empty_value(v):
- params["enterprise_project_id"] = v
-
- v = expand_create_publicip(opts, None)
- if not is_empty_value(v):
- params["publicip"] = v
-
- return params
-
-
-def expand_create_bandwidth(d, array_index):
- v = navigate_value(d, ["dedicated_bandwidth"], array_index)
- sbwid = navigate_value(d, ["shared_bandwidth_id"], array_index)
- if v and sbwid:
- raise Exception("don't input shared_bandwidth_id and "
- "dedicated_bandwidth at same time")
-
- if not (v or sbwid):
- raise Exception("must input shared_bandwidth_id or "
- "dedicated_bandwidth")
-
- if sbwid:
- return {
- "id": sbwid,
- "share_type": "WHOLE"}
-
- return {
- "charge_mode": v["charge_mode"],
- "name": v["name"],
- "share_type": "PER",
- "size": v["size"]}
-
-
-def expand_create_publicip(d, array_index):
- r = dict()
-
- v = navigate_value(d, ["ipv4_address"], array_index)
- if not is_empty_value(v):
- r["ip_address"] = v
-
- v = navigate_value(d, ["ip_version"], array_index)
- if not is_empty_value(v):
- r["ip_version"] = v
-
- v = navigate_value(d, ["type"], array_index)
- if not is_empty_value(v):
- r["type"] = v
-
- return r
-
-
-def send_create_request(module, params, client):
- url = "publicips"
- try:
- r = client.post(url, params)
- except HwcClientException as ex:
- msg = ("module(hwc_vpc_eip): error running "
- "api(create), error: %s" % str(ex))
- module.fail_json(msg=msg)
-
- return r
-
-
-def async_wait_create(config, result, client, timeout):
- module = config.module
-
- path_parameters = {
- "publicip_id": ["publicip", "id"],
- }
- data = dict((key, navigate_value(result, path))
- for key, path in path_parameters.items())
-
- url = build_path(module, "publicips/{publicip_id}", data)
-
- def _query_status():
- r = None
- try:
- r = client.get(url, timeout=timeout)
- except HwcClientException:
- return None, ""
-
- try:
- s = navigate_value(r, ["publicip", "status"])
- return r, s
- except Exception:
- return None, ""
-
- try:
- return wait_to_finish(
- ["ACTIVE", "DOWN"],
- None,
- _query_status, timeout)
- except Exception as ex:
- module.fail_json(msg="module(hwc_vpc_eip): error "
- "waiting for api(create) to "
- "be done, error= %s" % str(ex))
-
-
-def build_update_parameters(opts):
- params = dict()
-
- v = navigate_value(opts, ["ip_version"], None)
- if not is_empty_value(v):
- params["ip_version"] = v
-
- v = navigate_value(opts, ["port_id"], None)
- if v is not None:
- params["port_id"] = v
-
- if not params:
- return params
-
- params = {"publicip": params}
-
- return params
-
-
-def send_update_request(module, params, client):
- url = build_path(module, "publicips/{id}")
-
- try:
- r = client.put(url, params)
- except HwcClientException as ex:
- msg = ("module(hwc_vpc_eip): error running "
- "api(update), error: %s" % str(ex))
- module.fail_json(msg=msg)
-
- return r
-
-
-def async_wait_update(config, result, client, timeout):
- module = config.module
-
- url = build_path(module, "publicips/{id}")
-
- def _query_status():
- r = None
- try:
- r = client.get(url, timeout=timeout)
- except HwcClientException:
- return None, ""
-
- try:
- s = navigate_value(r, ["publicip", "status"])
- return r, s
- except Exception:
- return None, ""
-
- try:
- return wait_to_finish(
- ["ACTIVE", "DOWN"],
- None,
- _query_status, timeout)
- except Exception as ex:
- module.fail_json(msg="module(hwc_vpc_eip): error "
- "waiting for api(update) to "
- "be done, error= %s" % str(ex))
-
-
-def send_delete_request(module, params, client):
- url = build_path(module, "publicips/{id}")
-
- try:
- r = client.delete(url, params)
- except HwcClientException as ex:
- msg = ("module(hwc_vpc_eip): error running "
- "api(delete), error: %s" % str(ex))
- module.fail_json(msg=msg)
-
- return r
-
-
-def send_read_request(module, client):
- url = build_path(module, "publicips/{id}")
-
- r = None
- try:
- r = client.get(url)
- except HwcClientException as ex:
- msg = ("module(hwc_vpc_eip): error running "
- "api(read), error: %s" % str(ex))
- module.fail_json(msg=msg)
-
- return navigate_value(r, ["publicip"], None)
-
-
-def fill_read_resp_body(body):
- result = dict()
-
- result["bandwidth_id"] = body.get("bandwidth_id")
-
- result["bandwidth_name"] = body.get("bandwidth_name")
-
- result["bandwidth_share_type"] = body.get("bandwidth_share_type")
-
- result["bandwidth_size"] = body.get("bandwidth_size")
-
- result["create_time"] = body.get("create_time")
-
- result["enterprise_project_id"] = body.get("enterprise_project_id")
-
- result["id"] = body.get("id")
-
- result["ip_version"] = body.get("ip_version")
-
- result["port_id"] = body.get("port_id")
-
- result["private_ip_address"] = body.get("private_ip_address")
-
- result["public_ip_address"] = body.get("public_ip_address")
-
- result["public_ipv6_address"] = body.get("public_ipv6_address")
-
- result["status"] = body.get("status")
-
- result["tenant_id"] = body.get("tenant_id")
-
- result["type"] = body.get("type")
-
- return result
-
-
-def update_properties(module, response, array_index, exclude_output=False):
- r = user_input_parameters(module)
-
- if not exclude_output:
- v = navigate_value(response, ["read", "create_time"], array_index)
- r["create_time"] = v
-
- v = r.get("dedicated_bandwidth")
- v = flatten_dedicated_bandwidth(response, array_index, v, exclude_output)
- r["dedicated_bandwidth"] = v
-
- v = navigate_value(response, ["read", "enterprise_project_id"],
- array_index)
- r["enterprise_project_id"] = v
-
- v = navigate_value(response, ["read", "ip_version"], array_index)
- r["ip_version"] = v
-
- v = navigate_value(response, ["read", "public_ip_address"], array_index)
- r["ipv4_address"] = v
-
- if not exclude_output:
- v = navigate_value(response, ["read", "public_ipv6_address"],
- array_index)
- r["ipv6_address"] = v
-
- v = navigate_value(response, ["read", "port_id"], array_index)
- r["port_id"] = v
-
- if not exclude_output:
- v = navigate_value(response, ["read", "private_ip_address"],
- array_index)
- r["private_ip_address"] = v
-
- v = r.get("shared_bandwidth_id")
- v = flatten_shared_bandwidth_id(response, array_index, v, exclude_output)
- r["shared_bandwidth_id"] = v
-
- v = navigate_value(response, ["read", "type"], array_index)
- r["type"] = v
-
- return r
-
-
-def flatten_dedicated_bandwidth(d, array_index, current_value, exclude_output):
- v = navigate_value(d, ["read", "bandwidth_share_type"], array_index)
- if not (v and v == "PER"):
- return current_value
-
- result = current_value
- if not result:
- result = dict()
-
- if not exclude_output:
- v = navigate_value(d, ["read", "bandwidth_id"], array_index)
- if v is not None:
- result["id"] = v
-
- v = navigate_value(d, ["read", "bandwidth_name"], array_index)
- if v is not None:
- result["name"] = v
-
- v = navigate_value(d, ["read", "bandwidth_size"], array_index)
- if v is not None:
- result["size"] = v
-
- return result if result else current_value
-
-
-def flatten_shared_bandwidth_id(d, array_index, current_value, exclude_output):
- v = navigate_value(d, ["read", "bandwidth_id"], array_index)
-
- v1 = navigate_value(d, ["read", "bandwidth_share_type"], array_index)
-
- return v if (v1 and v1 == "WHOLE") else current_value
-
-
-def send_list_request(module, client, url):
-
- r = None
- try:
- r = client.get(url)
- except HwcClientException as ex:
- msg = ("module(hwc_vpc_eip): error running "
- "api(list), error: %s" % str(ex))
- module.fail_json(msg=msg)
-
- return navigate_value(r, ["publicips"], None)
-
-
-def _build_identity_object(all_opts):
- result = dict()
-
- v = expand_list_bandwidth_id(all_opts, None)
- result["bandwidth_id"] = v
-
- v = navigate_value(all_opts, ["dedicated_bandwidth", "name"], None)
- result["bandwidth_name"] = v
-
- result["bandwidth_share_type"] = None
-
- v = navigate_value(all_opts, ["dedicated_bandwidth", "size"], None)
- result["bandwidth_size"] = v
-
- result["create_time"] = None
-
- v = navigate_value(all_opts, ["enterprise_project_id"], None)
- result["enterprise_project_id"] = v
-
- result["id"] = None
-
- v = navigate_value(all_opts, ["ip_version"], None)
- result["ip_version"] = v
-
- v = navigate_value(all_opts, ["port_id"], None)
- result["port_id"] = v
-
- result["private_ip_address"] = None
-
- v = navigate_value(all_opts, ["ipv4_address"], None)
- result["public_ip_address"] = v
-
- result["public_ipv6_address"] = None
-
- result["status"] = None
-
- result["tenant_id"] = None
-
- v = navigate_value(all_opts, ["type"], None)
- result["type"] = v
-
- return result
-
-
-def expand_list_bandwidth_id(d, array_index):
- v = navigate_value(d, ["dedicated_bandwidth"], array_index)
- sbwid = navigate_value(d, ["shared_bandwidth_id"], array_index)
- if v and sbwid:
- raise Exception("don't input shared_bandwidth_id and "
- "dedicated_bandwidth at same time")
-
- return sbwid
-
-
-def fill_list_resp_body(body):
- result = dict()
-
- result["bandwidth_id"] = body.get("bandwidth_id")
-
- result["bandwidth_name"] = body.get("bandwidth_name")
-
- result["bandwidth_share_type"] = body.get("bandwidth_share_type")
-
- result["bandwidth_size"] = body.get("bandwidth_size")
-
- result["create_time"] = body.get("create_time")
-
- result["enterprise_project_id"] = body.get("enterprise_project_id")
-
- result["id"] = body.get("id")
-
- result["ip_version"] = body.get("ip_version")
-
- result["port_id"] = body.get("port_id")
-
- result["private_ip_address"] = body.get("private_ip_address")
-
- result["public_ip_address"] = body.get("public_ip_address")
-
- result["public_ipv6_address"] = body.get("public_ipv6_address")
-
- result["status"] = body.get("status")
-
- result["tenant_id"] = body.get("tenant_id")
-
- result["type"] = body.get("type")
-
- return result
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_peering_connect.py b/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_peering_connect.py
deleted file mode 100644
index a4d5921b..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_peering_connect.py
+++ /dev/null
@@ -1,691 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright (C) 2019 Huawei
-# GNU General Public License v3.0+ (see COPYING or
-# https://www.gnu.org/licenses/gpl-3.0.txt)
-
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-###############################################################################
-# Documentation
-###############################################################################
-
-DOCUMENTATION = '''
----
-module: hwc_vpc_peering_connect
-description:
- - vpc peering management.
-short_description: Creates a resource of Vpc/PeeringConnect in Huawei Cloud
-version_added: '0.2.0'
-author: Huawei Inc. (@huaweicloud)
-requirements:
- - keystoneauth1 >= 3.6.0
-options:
- state:
- description:
- - Whether the given object should exist in Huawei Cloud.
- type: str
- choices: ['present', 'absent']
- default: 'present'
- timeouts:
- description:
- - The timeouts for each operations.
- type: dict
- suboptions:
- create:
- description:
- - The timeouts for create operation.
- type: str
- default: '15m'
- local_vpc_id:
- description:
- - Specifies the ID of local VPC.
- type: str
- required: true
- name:
- description:
- - Specifies the name of the VPC peering connection. The value can
- contain 1 to 64 characters.
- type: str
- required: true
- peering_vpc:
- description:
- - Specifies information about the peering VPC.
- type: dict
- required: true
- suboptions:
- vpc_id:
- description:
- - Specifies the ID of peering VPC.
- type: str
- required: true
- project_id:
- description:
- - Specifies the ID of the project which the peering vpc
- belongs to.
- type: str
- required: false
- description:
- description:
- - The description of vpc peering connection.
- type: str
- required: false
-extends_documentation_fragment:
-- community.general.hwc
-
-'''
-
-EXAMPLES = '''
-# create a peering connect
-- name: Create a local vpc
- hwc_network_vpc:
- cidr: "192.168.0.0/16"
- name: "ansible_network_vpc_test_local"
- register: vpc1
-- name: Create a peering vpc
- hwc_network_vpc:
- cidr: "192.168.0.0/16"
- name: "ansible_network_vpc_test_peering"
- register: vpc2
-- name: Create a peering connect
- community.general.hwc_vpc_peering_connect:
- local_vpc_id: "{{ vpc1.id }}"
- name: "ansible_network_peering_test"
- peering_vpc:
- vpc_id: "{{ vpc2.id }}"
-'''
-
-RETURN = '''
- local_vpc_id:
- description:
- - Specifies the ID of local VPC.
- type: str
- returned: success
- name:
- description:
- - Specifies the name of the VPC peering connection. The value can
- contain 1 to 64 characters.
- type: str
- returned: success
- peering_vpc:
- description:
- - Specifies information about the peering VPC.
- type: dict
- returned: success
- contains:
- vpc_id:
- description:
- - Specifies the ID of peering VPC.
- type: str
- returned: success
- project_id:
- description:
- - Specifies the ID of the project which the peering vpc
- belongs to.
- type: str
- returned: success
- description:
- description:
- - The description of vpc peering connection.
- type: str
- returned: success
-'''
-
-from ansible_collections.community.general.plugins.module_utils.hwc_utils import (
- Config, HwcClientException, HwcClientException404, HwcModule,
- are_different_dicts, build_path, get_region, is_empty_value,
- navigate_value, wait_to_finish)
-
-
-def build_module():
- return HwcModule(
- argument_spec=dict(
- state=dict(default='present', choices=['present', 'absent'],
- type='str'),
- timeouts=dict(type='dict', options=dict(
- create=dict(default='15m', type='str'),
- ), default=dict()),
- local_vpc_id=dict(type='str', required=True),
- name=dict(type='str', required=True),
- peering_vpc=dict(type='dict', required=True, options=dict(
- vpc_id=dict(type='str', required=True),
- project_id=dict(type='str')
- )),
- description=dict(type='str')
- ),
- supports_check_mode=True,
- )
-
-
-def main():
- """Main function"""
-
- module = build_module()
- config = Config(module, "vpc")
-
- try:
- resource = None
- if module.params['id']:
- resource = True
- else:
- v = search_resource(config)
- if len(v) > 1:
- raise Exception("Found more than one resource(%s)" % ", ".join([
- navigate_value(i, ["id"]) for i in v]))
-
- if len(v) == 1:
- resource = v[0]
- module.params['id'] = navigate_value(resource, ["id"])
-
- result = {}
- changed = False
- if module.params['state'] == 'present':
- if resource is None:
- if not module.check_mode:
- create(config)
- changed = True
-
- current = read_resource(config, exclude_output=True)
- expect = user_input_parameters(module)
- if are_different_dicts(expect, current):
- if not module.check_mode:
- update(config)
- changed = True
-
- result = read_resource(config)
- result['id'] = module.params.get('id')
- else:
- if resource:
- if not module.check_mode:
- delete(config)
- changed = True
-
- except Exception as ex:
- module.fail_json(msg=str(ex))
-
- else:
- result['changed'] = changed
- module.exit_json(**result)
-
-
-def user_input_parameters(module):
- return {
- "description": module.params.get("description"),
- "local_vpc_id": module.params.get("local_vpc_id"),
- "name": module.params.get("name"),
- "peering_vpc": module.params.get("peering_vpc"),
- }
-
-
-def create(config):
- module = config.module
- client = config.client(get_region(module), "network", "project")
- timeout = 60 * int(module.params['timeouts']['create'].rstrip('m'))
- opts = user_input_parameters(module)
-
- params = build_create_parameters(opts)
- r = send_create_request(module, params, client)
- obj = async_wait_create(config, r, client, timeout)
- module.params['id'] = navigate_value(obj, ["peering", "id"])
-
-
-def update(config):
- module = config.module
- client = config.client(get_region(module), "network", "project")
- opts = user_input_parameters(module)
-
- params = build_update_parameters(opts)
- if params:
- send_update_request(module, params, client)
-
-
-def delete(config):
- module = config.module
- client = config.client(get_region(module), "network", "project")
-
- send_delete_request(module, None, client)
-
- url = build_path(module, "v2.0/vpc/peerings/{id}")
-
- def _refresh_status():
- try:
- client.get(url)
- except HwcClientException404:
- return True, "Done"
-
- except Exception:
- return None, ""
-
- return True, "Pending"
-
- timeout = 60 * int(module.params['timeouts']['create'].rstrip('m'))
- try:
- wait_to_finish(["Done"], ["Pending"], _refresh_status, timeout)
- except Exception as ex:
- module.fail_json(msg="module(hwc_vpc_peering_connect): error "
- "waiting for api(delete) to "
- "be done, error= %s" % str(ex))
-
-
-def read_resource(config, exclude_output=False):
- module = config.module
- client = config.client(get_region(module), "network", "project")
-
- res = {}
-
- r = send_read_request(module, client)
- res["read"] = fill_read_resp_body(r)
-
- return update_properties(module, res, None, exclude_output)
-
-
-def _build_query_link(opts):
- query_params = []
-
- v = navigate_value(opts, ["local_vpc_id"])
- if v:
- query_params.append("vpc_id=" + str(v))
-
- v = navigate_value(opts, ["name"])
- if v:
- query_params.append("name=" + str(v))
-
- query_link = "?marker={marker}&limit=10"
- if query_params:
- query_link += "&" + "&".join(query_params)
-
- return query_link
-
-
-def search_resource(config):
- module = config.module
- client = config.client(get_region(module), "network", "project")
- opts = user_input_parameters(module)
- identity_obj = _build_identity_object(opts)
- query_link = _build_query_link(opts)
- link = "v2.0/vpc/peerings" + query_link
-
- result = []
- p = {'marker': ''}
- while True:
- url = link.format(**p)
- r = send_list_request(module, client, url)
- if not r:
- break
-
- for item in r:
- item = fill_list_resp_body(item)
- if not are_different_dicts(identity_obj, item):
- result.append(item)
-
- if len(result) > 1:
- break
-
- p['marker'] = r[-1].get('id')
-
- return result
-
-
-def build_create_parameters(opts):
- params = dict()
-
- v = expand_create_accept_vpc_info(opts, None)
- if not is_empty_value(v):
- params["accept_vpc_info"] = v
-
- v = navigate_value(opts, ["description"], None)
- if not is_empty_value(v):
- params["description"] = v
-
- v = navigate_value(opts, ["name"], None)
- if not is_empty_value(v):
- params["name"] = v
-
- v = expand_create_request_vpc_info(opts, None)
- if not is_empty_value(v):
- params["request_vpc_info"] = v
-
- if not params:
- return params
-
- params = {"peering": params}
-
- return params
-
-
-def expand_create_accept_vpc_info(d, array_index):
- r = dict()
-
- v = navigate_value(d, ["peering_vpc", "project_id"], array_index)
- if not is_empty_value(v):
- r["tenant_id"] = v
-
- v = navigate_value(d, ["peering_vpc", "vpc_id"], array_index)
- if not is_empty_value(v):
- r["vpc_id"] = v
-
- return r
-
-
-def expand_create_request_vpc_info(d, array_index):
- r = dict()
-
- r["tenant_id"] = ""
-
- v = navigate_value(d, ["local_vpc_id"], array_index)
- if not is_empty_value(v):
- r["vpc_id"] = v
-
- return r
-
-
-def send_create_request(module, params, client):
- url = "v2.0/vpc/peerings"
- try:
- r = client.post(url, params)
- except HwcClientException as ex:
- msg = ("module(hwc_vpc_peering_connect): error running "
- "api(create), error: %s" % str(ex))
- module.fail_json(msg=msg)
-
- return r
-
-
-def async_wait_create(config, result, client, timeout):
- module = config.module
-
- path_parameters = {
- "peering_id": ["peering", "id"],
- }
- data = dict((key, navigate_value(result, path))
- for key, path in path_parameters.items())
-
- url = build_path(module, "v2.0/vpc/peerings/{peering_id}", data)
-
- def _query_status():
- r = None
- try:
- r = client.get(url, timeout=timeout)
- except HwcClientException:
- return None, ""
-
- try:
- s = navigate_value(r, ["peering", "status"])
- return r, s
- except Exception:
- return None, ""
-
- try:
- return wait_to_finish(
- ["ACTIVE"],
- ["PENDING_ACCEPTANCE"],
- _query_status, timeout)
- except Exception as ex:
- module.fail_json(msg="module(hwc_vpc_peering_connect): error "
- "waiting for api(create) to "
- "be done, error= %s" % str(ex))
-
-
-def build_update_parameters(opts):
- params = dict()
-
- v = navigate_value(opts, ["description"], None)
- if not is_empty_value(v):
- params["description"] = v
-
- v = navigate_value(opts, ["name"], None)
- if not is_empty_value(v):
- params["name"] = v
-
- if not params:
- return params
-
- params = {"peering": params}
-
- return params
-
-
-def send_update_request(module, params, client):
- url = build_path(module, "v2.0/vpc/peerings/{id}")
-
- try:
- r = client.put(url, params)
- except HwcClientException as ex:
- msg = ("module(hwc_vpc_peering_connect): error running "
- "api(update), error: %s" % str(ex))
- module.fail_json(msg=msg)
-
- return r
-
-
-def send_delete_request(module, params, client):
- url = build_path(module, "v2.0/vpc/peerings/{id}")
-
- try:
- r = client.delete(url, params)
- except HwcClientException as ex:
- msg = ("module(hwc_vpc_peering_connect): error running "
- "api(delete), error: %s" % str(ex))
- module.fail_json(msg=msg)
-
- return r
-
-
-def send_read_request(module, client):
- url = build_path(module, "v2.0/vpc/peerings/{id}")
-
- r = None
- try:
- r = client.get(url)
- except HwcClientException as ex:
- msg = ("module(hwc_vpc_peering_connect): error running "
- "api(read), error: %s" % str(ex))
- module.fail_json(msg=msg)
-
- return navigate_value(r, ["peering"], None)
-
-
-def fill_read_resp_body(body):
- result = dict()
-
- v = fill_read_resp_accept_vpc_info(body.get("accept_vpc_info"))
- result["accept_vpc_info"] = v
-
- result["description"] = body.get("description")
-
- result["id"] = body.get("id")
-
- result["name"] = body.get("name")
-
- v = fill_read_resp_request_vpc_info(body.get("request_vpc_info"))
- result["request_vpc_info"] = v
-
- result["status"] = body.get("status")
-
- return result
-
-
-def fill_read_resp_accept_vpc_info(value):
- if not value:
- return None
-
- result = dict()
-
- result["tenant_id"] = value.get("tenant_id")
-
- result["vpc_id"] = value.get("vpc_id")
-
- return result
-
-
-def fill_read_resp_request_vpc_info(value):
- if not value:
- return None
-
- result = dict()
-
- result["tenant_id"] = value.get("tenant_id")
-
- result["vpc_id"] = value.get("vpc_id")
-
- return result
-
-
-def update_properties(module, response, array_index, exclude_output=False):
- r = user_input_parameters(module)
-
- v = navigate_value(response, ["read", "description"], array_index)
- r["description"] = v
-
- v = navigate_value(response, ["read", "request_vpc_info", "vpc_id"],
- array_index)
- r["local_vpc_id"] = v
-
- v = navigate_value(response, ["read", "name"], array_index)
- r["name"] = v
-
- v = r.get("peering_vpc")
- v = flatten_peering_vpc(response, array_index, v, exclude_output)
- r["peering_vpc"] = v
-
- return r
-
-
-def flatten_peering_vpc(d, array_index, current_value, exclude_output):
- result = current_value
- has_init_value = True
- if not result:
- result = dict()
- has_init_value = False
-
- v = navigate_value(d, ["read", "accept_vpc_info", "tenant_id"],
- array_index)
- result["project_id"] = v
-
- v = navigate_value(d, ["read", "accept_vpc_info", "vpc_id"], array_index)
- result["vpc_id"] = v
-
- if has_init_value:
- return result
-
- for v in result.values():
- if v is not None:
- return result
- return current_value
-
-
-def send_list_request(module, client, url):
-
- r = None
- try:
- r = client.get(url)
- except HwcClientException as ex:
- msg = ("module(hwc_vpc_peering_connect): error running "
- "api(list), error: %s" % str(ex))
- module.fail_json(msg=msg)
-
- return navigate_value(r, ["peerings"], None)
-
-
-def _build_identity_object(all_opts):
- result = dict()
-
- v = expand_list_accept_vpc_info(all_opts, None)
- result["accept_vpc_info"] = v
-
- v = navigate_value(all_opts, ["description"], None)
- result["description"] = v
-
- result["id"] = None
-
- v = navigate_value(all_opts, ["name"], None)
- result["name"] = v
-
- v = expand_list_request_vpc_info(all_opts, None)
- result["request_vpc_info"] = v
-
- result["status"] = None
-
- return result
-
-
-def expand_list_accept_vpc_info(d, array_index):
- r = dict()
-
- v = navigate_value(d, ["peering_vpc", "project_id"], array_index)
- r["tenant_id"] = v
-
- v = navigate_value(d, ["peering_vpc", "vpc_id"], array_index)
- r["vpc_id"] = v
-
- for v in r.values():
- if v is not None:
- return r
- return None
-
-
-def expand_list_request_vpc_info(d, array_index):
- r = dict()
-
- r["tenant_id"] = None
-
- v = navigate_value(d, ["local_vpc_id"], array_index)
- r["vpc_id"] = v
-
- for v in r.values():
- if v is not None:
- return r
- return None
-
-
-def fill_list_resp_body(body):
- result = dict()
-
- v = fill_list_resp_accept_vpc_info(body.get("accept_vpc_info"))
- result["accept_vpc_info"] = v
-
- result["description"] = body.get("description")
-
- result["id"] = body.get("id")
-
- result["name"] = body.get("name")
-
- v = fill_list_resp_request_vpc_info(body.get("request_vpc_info"))
- result["request_vpc_info"] = v
-
- result["status"] = body.get("status")
-
- return result
-
-
-def fill_list_resp_accept_vpc_info(value):
- if not value:
- return None
-
- result = dict()
-
- result["tenant_id"] = value.get("tenant_id")
-
- result["vpc_id"] = value.get("vpc_id")
-
- return result
-
-
-def fill_list_resp_request_vpc_info(value):
- if not value:
- return None
-
- result = dict()
-
- result["tenant_id"] = value.get("tenant_id")
-
- result["vpc_id"] = value.get("vpc_id")
-
- return result
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_port.py b/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_port.py
deleted file mode 100644
index cf0718f5..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_port.py
+++ /dev/null
@@ -1,1160 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright (C) 2019 Huawei
-# GNU General Public License v3.0+ (see COPYING or
-# https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-###############################################################################
-# Documentation
-###############################################################################
-
-DOCUMENTATION = '''
----
-module: hwc_vpc_port
-description:
- - vpc port management.
-short_description: Creates a resource of Vpc/Port in Huawei Cloud
-version_added: '0.2.0'
-author: Huawei Inc. (@huaweicloud)
-requirements:
- - keystoneauth1 >= 3.6.0
-options:
- state:
- description:
- - Whether the given object should exist in Huawei Cloud.
- type: str
- choices: ['present', 'absent']
- default: 'present'
- timeouts:
- description:
- - The timeouts for each operations.
- type: dict
- suboptions:
- create:
- description:
- - The timeouts for create operation.
- type: str
- default: '15m'
- subnet_id:
- description:
- - Specifies the ID of the subnet to which the port belongs.
- type: str
- required: true
- admin_state_up:
- description:
- - Specifies the administrative state of the port.
- type: bool
- required: false
- allowed_address_pairs:
- description:
- - Specifies a set of zero or more allowed address pairs.
- required: false
- type: list
- elements: dict
- suboptions:
- ip_address:
- description:
- - Specifies the IP address. It cannot set it to 0.0.0.0.
- Configure an independent security group for the port if a
- large CIDR block (subnet mask less than 24) is configured
- for parameter allowed_address_pairs.
- type: str
- required: false
- mac_address:
- description:
- - Specifies the MAC address.
- type: str
- required: false
- extra_dhcp_opts:
- description:
- - Specifies the extended option of DHCP.
- type: list
- elements: dict
- required: false
- suboptions:
- name:
- description:
- - Specifies the option name.
- type: str
- required: false
- value:
- description:
- - Specifies the option value.
- type: str
- required: false
- ip_address:
- description:
- - Specifies the port IP address.
- type: str
- required: false
- name:
- description:
- - Specifies the port name. The value can contain no more than 255
- characters.
- type: str
- required: false
- security_groups:
- description:
- - Specifies the ID of the security group.
- type: list
- elements: str
- required: false
-extends_documentation_fragment:
-- community.general.hwc
-
-'''
-
-EXAMPLES = '''
-# create a port
-- name: Create vpc
- hwc_network_vpc:
- cidr: "192.168.100.0/24"
- name: "ansible_network_vpc_test"
- register: vpc
-- name: Create subnet
- hwc_vpc_subnet:
- gateway_ip: "192.168.100.32"
- name: "ansible_network_subnet_test"
- dhcp_enable: True
- vpc_id: "{{ vpc.id }}"
- cidr: "192.168.100.0/26"
- register: subnet
-- name: Create a port
- community.general.hwc_vpc_port:
- subnet_id: "{{ subnet.id }}"
- ip_address: "192.168.100.33"
-'''
-
-RETURN = '''
- subnet_id:
- description:
- - Specifies the ID of the subnet to which the port belongs.
- type: str
- returned: success
- admin_state_up:
- description:
- - Specifies the administrative state of the port.
- type: bool
- returned: success
- allowed_address_pairs:
- description:
- - Specifies a set of zero or more allowed address pairs.
- type: list
- returned: success
- contains:
- ip_address:
- description:
- - Specifies the IP address. It cannot set it to 0.0.0.0.
- Configure an independent security group for the port if a
- large CIDR block (subnet mask less than 24) is configured
- for parameter allowed_address_pairs.
- type: str
- returned: success
- mac_address:
- description:
- - Specifies the MAC address.
- type: str
- returned: success
- extra_dhcp_opts:
- description:
- - Specifies the extended option of DHCP.
- type: list
- returned: success
- contains:
- name:
- description:
- - Specifies the option name.
- type: str
- returned: success
- value:
- description:
- - Specifies the option value.
- type: str
- returned: success
- ip_address:
- description:
- - Specifies the port IP address.
- type: str
- returned: success
- name:
- description:
- - Specifies the port name. The value can contain no more than 255
- characters.
- type: str
- returned: success
- security_groups:
- description:
- - Specifies the ID of the security group.
- type: list
- returned: success
- mac_address:
- description:
- - Specifies the port MAC address.
- type: str
- returned: success
-'''
-
-from ansible_collections.community.general.plugins.module_utils.hwc_utils import (
- Config, HwcClientException, HwcClientException404, HwcModule,
- are_different_dicts, build_path, get_region, is_empty_value,
- navigate_value, wait_to_finish)
-
-
-def build_module():
- return HwcModule(
- argument_spec=dict(
- state=dict(default='present', choices=['present', 'absent'],
- type='str'),
- timeouts=dict(type='dict', options=dict(
- create=dict(default='15m', type='str'),
- ), default=dict()),
- subnet_id=dict(type='str', required=True),
- admin_state_up=dict(type='bool'),
- allowed_address_pairs=dict(
- type='list', elements='dict',
- options=dict(
- ip_address=dict(type='str'),
- mac_address=dict(type='str')
- ),
- ),
- extra_dhcp_opts=dict(type='list', elements='dict', options=dict(
- name=dict(type='str'),
- value=dict(type='str')
- )),
- ip_address=dict(type='str'),
- name=dict(type='str'),
- security_groups=dict(type='list', elements='str')
- ),
- supports_check_mode=True,
- )
-
-
-def main():
- """Main function"""
-
- module = build_module()
- config = Config(module, "vpc")
-
- try:
- resource = None
- if module.params['id']:
- resource = True
- else:
- v = search_resource(config)
- if len(v) > 1:
- raise Exception("Found more than one resource(%s)" % ", ".join([
- navigate_value(i, ["id"]) for i in v]))
-
- if len(v) == 1:
- resource = v[0]
- module.params['id'] = navigate_value(resource, ["id"])
-
- result = {}
- changed = False
- if module.params['state'] == 'present':
- if resource is None:
- if not module.check_mode:
- create(config)
- changed = True
-
- current = read_resource(config, exclude_output=True)
- expect = user_input_parameters(module)
- if are_different_dicts(expect, current):
- if not module.check_mode:
- update(config)
- changed = True
-
- result = read_resource(config)
- result['id'] = module.params.get('id')
- else:
- if resource:
- if not module.check_mode:
- delete(config)
- changed = True
-
- except Exception as ex:
- module.fail_json(msg=str(ex))
-
- else:
- result['changed'] = changed
- module.exit_json(**result)
-
-
-def user_input_parameters(module):
- return {
- "admin_state_up": module.params.get("admin_state_up"),
- "allowed_address_pairs": module.params.get("allowed_address_pairs"),
- "extra_dhcp_opts": module.params.get("extra_dhcp_opts"),
- "ip_address": module.params.get("ip_address"),
- "name": module.params.get("name"),
- "security_groups": module.params.get("security_groups"),
- "subnet_id": module.params.get("subnet_id"),
- }
-
-
-def create(config):
- module = config.module
- client = config.client(get_region(module), "vpc", "project")
- timeout = 60 * int(module.params['timeouts']['create'].rstrip('m'))
- opts = user_input_parameters(module)
-
- params = build_create_parameters(opts)
- r = send_create_request(module, params, client)
- obj = async_wait_create(config, r, client, timeout)
- module.params['id'] = navigate_value(obj, ["port", "id"])
-
-
-def update(config):
- module = config.module
- client = config.client(get_region(module), "vpc", "project")
- opts = user_input_parameters(module)
-
- params = build_update_parameters(opts)
- if params:
- send_update_request(module, params, client)
-
-
-def delete(config):
- module = config.module
- client = config.client(get_region(module), "vpc", "project")
-
- send_delete_request(module, None, client)
-
- url = build_path(module, "ports/{id}")
-
- def _refresh_status():
- try:
- client.get(url)
- except HwcClientException404:
- return True, "Done"
-
- except Exception:
- return None, ""
-
- return True, "Pending"
-
- timeout = 60 * int(module.params['timeouts']['create'].rstrip('m'))
- try:
- wait_to_finish(["Done"], ["Pending"], _refresh_status, timeout)
- except Exception as ex:
- module.fail_json(msg="module(hwc_vpc_port): error "
- "waiting for api(delete) to "
- "be done, error= %s" % str(ex))
-
-
-def read_resource(config, exclude_output=False):
- module = config.module
- client = config.client(get_region(module), "vpc", "project")
-
- res = {}
-
- r = send_read_request(module, client)
- res["read"] = fill_read_resp_body(r)
-
- array_index = {
- "read.fixed_ips": 0,
- }
-
- return update_properties(module, res, array_index, exclude_output)
-
-
-def _build_query_link(opts):
- query_params = []
-
- v = navigate_value(opts, ["subnet_id"])
- if v:
- query_params.append("network_id=" + str(v))
-
- v = navigate_value(opts, ["name"])
- if v:
- query_params.append("name=" + str(v))
-
- v = navigate_value(opts, ["admin_state_up"])
- if v:
- query_params.append("admin_state_up=" + str(v))
-
- query_link = "?marker={marker}&limit=10"
- if query_params:
- query_link += "&" + "&".join(query_params)
-
- return query_link
-
-
-def search_resource(config):
- module = config.module
- client = config.client(get_region(module), "vpc", "project")
- opts = user_input_parameters(module)
- identity_obj = _build_identity_object(opts)
- query_link = _build_query_link(opts)
- link = "ports" + query_link
-
- result = []
- p = {'marker': ''}
- while True:
- url = link.format(**p)
- r = send_list_request(module, client, url)
- if not r:
- break
-
- for item in r:
- item = fill_list_resp_body(item)
- if not are_different_dicts(identity_obj, item):
- result.append(item)
-
- if len(result) > 1:
- break
-
- p['marker'] = r[-1].get('id')
-
- return result
-
-
-def build_create_parameters(opts):
- params = dict()
-
- v = navigate_value(opts, ["admin_state_up"], None)
- if not is_empty_value(v):
- params["admin_state_up"] = v
-
- v = expand_create_allowed_address_pairs(opts, None)
- if not is_empty_value(v):
- params["allowed_address_pairs"] = v
-
- v = expand_create_extra_dhcp_opts(opts, None)
- if not is_empty_value(v):
- params["extra_dhcp_opts"] = v
-
- v = expand_create_fixed_ips(opts, None)
- if not is_empty_value(v):
- params["fixed_ips"] = v
-
- v = navigate_value(opts, ["name"], None)
- if not is_empty_value(v):
- params["name"] = v
-
- v = navigate_value(opts, ["subnet_id"], None)
- if not is_empty_value(v):
- params["network_id"] = v
-
- v = navigate_value(opts, ["security_groups"], None)
- if not is_empty_value(v):
- params["security_groups"] = v
-
- if not params:
- return params
-
- params = {"port": params}
-
- return params
-
-
-def expand_create_allowed_address_pairs(d, array_index):
- new_array_index = dict()
- if array_index:
- new_array_index.update(array_index)
-
- req = []
-
- v = navigate_value(d, ["allowed_address_pairs"],
- new_array_index)
- if not v:
- return req
- n = len(v)
- for i in range(n):
- new_array_index["allowed_address_pairs"] = i
- transformed = dict()
-
- v = navigate_value(d, ["allowed_address_pairs", "ip_address"],
- new_array_index)
- if not is_empty_value(v):
- transformed["ip_address"] = v
-
- v = navigate_value(d, ["allowed_address_pairs", "mac_address"],
- new_array_index)
- if not is_empty_value(v):
- transformed["mac_address"] = v
-
- if transformed:
- req.append(transformed)
-
- return req
-
-
-def expand_create_extra_dhcp_opts(d, array_index):
- new_array_index = dict()
- if array_index:
- new_array_index.update(array_index)
-
- req = []
-
- v = navigate_value(d, ["extra_dhcp_opts"],
- new_array_index)
- if not v:
- return req
- n = len(v)
- for i in range(n):
- new_array_index["extra_dhcp_opts"] = i
- transformed = dict()
-
- v = navigate_value(d, ["extra_dhcp_opts", "name"], new_array_index)
- if not is_empty_value(v):
- transformed["opt_name"] = v
-
- v = navigate_value(d, ["extra_dhcp_opts", "value"], new_array_index)
- if not is_empty_value(v):
- transformed["opt_value"] = v
-
- if transformed:
- req.append(transformed)
-
- return req
-
-
-def expand_create_fixed_ips(d, array_index):
- new_array_index = dict()
- if array_index:
- new_array_index.update(array_index)
-
- req = []
-
- n = 1
- for i in range(n):
- transformed = dict()
-
- v = navigate_value(d, ["ip_address"], new_array_index)
- if not is_empty_value(v):
- transformed["ip_address"] = v
-
- if transformed:
- req.append(transformed)
-
- return req
-
-
-def send_create_request(module, params, client):
- url = "ports"
- try:
- r = client.post(url, params)
- except HwcClientException as ex:
- msg = ("module(hwc_vpc_port): error running "
- "api(create), error: %s" % str(ex))
- module.fail_json(msg=msg)
-
- return r
-
-
-def async_wait_create(config, result, client, timeout):
- module = config.module
-
- path_parameters = {
- "port_id": ["port", "id"],
- }
- data = dict((key, navigate_value(result, path))
- for key, path in path_parameters.items())
-
- url = build_path(module, "ports/{port_id}", data)
-
- def _query_status():
- r = None
- try:
- r = client.get(url, timeout=timeout)
- except HwcClientException:
- return None, ""
-
- try:
- s = navigate_value(r, ["port", "status"])
- return r, s
- except Exception:
- return None, ""
-
- try:
- return wait_to_finish(
- ["ACTIVE", "DOWN"],
- ["BUILD"],
- _query_status, timeout)
- except Exception as ex:
- module.fail_json(msg="module(hwc_vpc_port): error "
- "waiting for api(create) to "
- "be done, error= %s" % str(ex))
-
-
-def build_update_parameters(opts):
- params = dict()
-
- v = expand_update_allowed_address_pairs(opts, None)
- if v is not None:
- params["allowed_address_pairs"] = v
-
- v = expand_update_extra_dhcp_opts(opts, None)
- if v is not None:
- params["extra_dhcp_opts"] = v
-
- v = navigate_value(opts, ["name"], None)
- if not is_empty_value(v):
- params["name"] = v
-
- v = navigate_value(opts, ["security_groups"], None)
- if not is_empty_value(v):
- params["security_groups"] = v
-
- if not params:
- return params
-
- params = {"port": params}
-
- return params
-
-
-def expand_update_allowed_address_pairs(d, array_index):
- new_array_index = dict()
- if array_index:
- new_array_index.update(array_index)
-
- req = []
-
- v = navigate_value(d, ["allowed_address_pairs"],
- new_array_index)
- if not v:
- return req
- n = len(v)
- for i in range(n):
- new_array_index["allowed_address_pairs"] = i
- transformed = dict()
-
- v = navigate_value(d, ["allowed_address_pairs", "ip_address"],
- new_array_index)
- if not is_empty_value(v):
- transformed["ip_address"] = v
-
- v = navigate_value(d, ["allowed_address_pairs", "mac_address"],
- new_array_index)
- if not is_empty_value(v):
- transformed["mac_address"] = v
-
- if transformed:
- req.append(transformed)
-
- return req
-
-
-def expand_update_extra_dhcp_opts(d, array_index):
- new_array_index = dict()
- if array_index:
- new_array_index.update(array_index)
-
- req = []
-
- v = navigate_value(d, ["extra_dhcp_opts"],
- new_array_index)
- if not v:
- return req
- n = len(v)
- for i in range(n):
- new_array_index["extra_dhcp_opts"] = i
- transformed = dict()
-
- v = navigate_value(d, ["extra_dhcp_opts", "name"], new_array_index)
- if not is_empty_value(v):
- transformed["opt_name"] = v
-
- v = navigate_value(d, ["extra_dhcp_opts", "value"], new_array_index)
- if not is_empty_value(v):
- transformed["opt_value"] = v
-
- if transformed:
- req.append(transformed)
-
- return req
-
-
-def send_update_request(module, params, client):
- url = build_path(module, "ports/{id}")
-
- try:
- r = client.put(url, params)
- except HwcClientException as ex:
- msg = ("module(hwc_vpc_port): error running "
- "api(update), error: %s" % str(ex))
- module.fail_json(msg=msg)
-
- return r
-
-
-def send_delete_request(module, params, client):
- url = build_path(module, "ports/{id}")
-
- try:
- r = client.delete(url, params)
- except HwcClientException as ex:
- msg = ("module(hwc_vpc_port): error running "
- "api(delete), error: %s" % str(ex))
- module.fail_json(msg=msg)
-
- return r
-
-
-def send_read_request(module, client):
- url = build_path(module, "ports/{id}")
-
- r = None
- try:
- r = client.get(url)
- except HwcClientException as ex:
- msg = ("module(hwc_vpc_port): error running "
- "api(read), error: %s" % str(ex))
- module.fail_json(msg=msg)
-
- return navigate_value(r, ["port"], None)
-
-
-def fill_read_resp_body(body):
- result = dict()
-
- result["admin_state_up"] = body.get("admin_state_up")
-
- v = fill_read_resp_allowed_address_pairs(body.get("allowed_address_pairs"))
- result["allowed_address_pairs"] = v
-
- result["binding_host_id"] = body.get("binding_host_id")
-
- result["binding_vnic_type"] = body.get("binding_vnic_type")
-
- result["device_id"] = body.get("device_id")
-
- result["device_owner"] = body.get("device_owner")
-
- result["dns_name"] = body.get("dns_name")
-
- v = fill_read_resp_extra_dhcp_opts(body.get("extra_dhcp_opts"))
- result["extra_dhcp_opts"] = v
-
- v = fill_read_resp_fixed_ips(body.get("fixed_ips"))
- result["fixed_ips"] = v
-
- result["id"] = body.get("id")
-
- result["mac_address"] = body.get("mac_address")
-
- result["name"] = body.get("name")
-
- result["network_id"] = body.get("network_id")
-
- result["security_groups"] = body.get("security_groups")
-
- result["status"] = body.get("status")
-
- result["tenant_id"] = body.get("tenant_id")
-
- return result
-
-
-def fill_read_resp_allowed_address_pairs(value):
- if not value:
- return None
-
- result = []
- for item in value:
- val = dict()
-
- val["ip_address"] = item.get("ip_address")
-
- val["mac_address"] = item.get("mac_address")
-
- result.append(val)
-
- return result
-
-
-def fill_read_resp_extra_dhcp_opts(value):
- if not value:
- return None
-
- result = []
- for item in value:
- val = dict()
-
- val["opt_name"] = item.get("opt_name")
-
- val["opt_value"] = item.get("opt_value")
-
- result.append(val)
-
- return result
-
-
-def fill_read_resp_fixed_ips(value):
- if not value:
- return None
-
- result = []
- for item in value:
- val = dict()
-
- val["ip_address"] = item.get("ip_address")
-
- result.append(val)
-
- return result
-
-
-def update_properties(module, response, array_index, exclude_output=False):
- r = user_input_parameters(module)
-
- v = navigate_value(response, ["read", "admin_state_up"], array_index)
- r["admin_state_up"] = v
-
- v = r.get("allowed_address_pairs")
- v = flatten_allowed_address_pairs(response, array_index, v, exclude_output)
- r["allowed_address_pairs"] = v
-
- v = r.get("extra_dhcp_opts")
- v = flatten_extra_dhcp_opts(response, array_index, v, exclude_output)
- r["extra_dhcp_opts"] = v
-
- v = navigate_value(response, ["read", "fixed_ips", "ip_address"],
- array_index)
- r["ip_address"] = v
-
- if not exclude_output:
- v = navigate_value(response, ["read", "mac_address"], array_index)
- r["mac_address"] = v
-
- v = navigate_value(response, ["read", "name"], array_index)
- r["name"] = v
-
- v = navigate_value(response, ["read", "security_groups"], array_index)
- r["security_groups"] = v
-
- v = navigate_value(response, ["read", "network_id"], array_index)
- r["subnet_id"] = v
-
- return r
-
-
-def flatten_allowed_address_pairs(d, array_index,
- current_value, exclude_output):
- n = 0
- result = current_value
- has_init_value = True
- if result:
- n = len(result)
- else:
- has_init_value = False
- result = []
- v = navigate_value(d, ["read", "allowed_address_pairs"],
- array_index)
- if not v:
- return current_value
- n = len(v)
-
- new_array_index = dict()
- if array_index:
- new_array_index.update(array_index)
-
- for i in range(n):
- new_array_index["read.allowed_address_pairs"] = i
-
- val = dict()
- if len(result) >= (i + 1) and result[i]:
- val = result[i]
-
- v = navigate_value(d, ["read", "allowed_address_pairs", "ip_address"],
- new_array_index)
- val["ip_address"] = v
-
- v = navigate_value(d, ["read", "allowed_address_pairs", "mac_address"],
- new_array_index)
- val["mac_address"] = v
-
- if len(result) >= (i + 1):
- result[i] = val
- else:
- for v in val.values():
- if v is not None:
- result.append(val)
- break
-
- return result if (has_init_value or result) else current_value
-
-
-def flatten_extra_dhcp_opts(d, array_index, current_value, exclude_output):
- n = 0
- result = current_value
- has_init_value = True
- if result:
- n = len(result)
- else:
- has_init_value = False
- result = []
- v = navigate_value(d, ["read", "extra_dhcp_opts"],
- array_index)
- if not v:
- return current_value
- n = len(v)
-
- new_array_index = dict()
- if array_index:
- new_array_index.update(array_index)
-
- for i in range(n):
- new_array_index["read.extra_dhcp_opts"] = i
-
- val = dict()
- if len(result) >= (i + 1) and result[i]:
- val = result[i]
-
- v = navigate_value(d, ["read", "extra_dhcp_opts", "opt_name"],
- new_array_index)
- val["name"] = v
-
- v = navigate_value(d, ["read", "extra_dhcp_opts", "opt_value"],
- new_array_index)
- val["value"] = v
-
- if len(result) >= (i + 1):
- result[i] = val
- else:
- for v in val.values():
- if v is not None:
- result.append(val)
- break
-
- return result if (has_init_value or result) else current_value
-
-
-def send_list_request(module, client, url):
-
- r = None
- try:
- r = client.get(url)
- except HwcClientException as ex:
- msg = ("module(hwc_vpc_port): error running "
- "api(list), error: %s" % str(ex))
- module.fail_json(msg=msg)
-
- return navigate_value(r, ["ports"], None)
-
-
-def _build_identity_object(all_opts):
- result = dict()
-
- v = navigate_value(all_opts, ["admin_state_up"], None)
- result["admin_state_up"] = v
-
- v = expand_list_allowed_address_pairs(all_opts, None)
- result["allowed_address_pairs"] = v
-
- result["binding_host_id"] = None
-
- result["binding_vnic_type"] = None
-
- result["device_id"] = None
-
- result["device_owner"] = None
-
- result["dns_name"] = None
-
- v = expand_list_extra_dhcp_opts(all_opts, None)
- result["extra_dhcp_opts"] = v
-
- v = expand_list_fixed_ips(all_opts, None)
- result["fixed_ips"] = v
-
- result["id"] = None
-
- result["mac_address"] = None
-
- v = navigate_value(all_opts, ["name"], None)
- result["name"] = v
-
- v = navigate_value(all_opts, ["subnet_id"], None)
- result["network_id"] = v
-
- v = navigate_value(all_opts, ["security_groups"], None)
- result["security_groups"] = v
-
- result["status"] = None
-
- result["tenant_id"] = None
-
- return result
-
-
-def expand_list_allowed_address_pairs(d, array_index):
- new_array_index = dict()
- if array_index:
- new_array_index.update(array_index)
-
- req = []
-
- v = navigate_value(d, ["allowed_address_pairs"],
- new_array_index)
-
- n = len(v) if v else 1
- for i in range(n):
- new_array_index["allowed_address_pairs"] = i
- transformed = dict()
-
- v = navigate_value(d, ["allowed_address_pairs", "ip_address"],
- new_array_index)
- transformed["ip_address"] = v
-
- v = navigate_value(d, ["allowed_address_pairs", "mac_address"],
- new_array_index)
- transformed["mac_address"] = v
-
- for v in transformed.values():
- if v is not None:
- req.append(transformed)
- break
-
- return req if req else None
-
-
-def expand_list_extra_dhcp_opts(d, array_index):
- new_array_index = dict()
- if array_index:
- new_array_index.update(array_index)
-
- req = []
-
- v = navigate_value(d, ["extra_dhcp_opts"],
- new_array_index)
-
- n = len(v) if v else 1
- for i in range(n):
- new_array_index["extra_dhcp_opts"] = i
- transformed = dict()
-
- v = navigate_value(d, ["extra_dhcp_opts", "name"], new_array_index)
- transformed["opt_name"] = v
-
- v = navigate_value(d, ["extra_dhcp_opts", "value"], new_array_index)
- transformed["opt_value"] = v
-
- for v in transformed.values():
- if v is not None:
- req.append(transformed)
- break
-
- return req if req else None
-
-
-def expand_list_fixed_ips(d, array_index):
- new_array_index = dict()
- if array_index:
- new_array_index.update(array_index)
-
- req = []
-
- n = 1
- for i in range(n):
- transformed = dict()
-
- v = navigate_value(d, ["ip_address"], new_array_index)
- transformed["ip_address"] = v
-
- for v in transformed.values():
- if v is not None:
- req.append(transformed)
- break
-
- return req if req else None
-
-
-def fill_list_resp_body(body):
- result = dict()
-
- result["admin_state_up"] = body.get("admin_state_up")
-
- v = fill_list_resp_allowed_address_pairs(body.get("allowed_address_pairs"))
- result["allowed_address_pairs"] = v
-
- result["binding_host_id"] = body.get("binding_host_id")
-
- result["binding_vnic_type"] = body.get("binding_vnic_type")
-
- result["device_id"] = body.get("device_id")
-
- result["device_owner"] = body.get("device_owner")
-
- result["dns_name"] = body.get("dns_name")
-
- v = fill_list_resp_extra_dhcp_opts(body.get("extra_dhcp_opts"))
- result["extra_dhcp_opts"] = v
-
- v = fill_list_resp_fixed_ips(body.get("fixed_ips"))
- result["fixed_ips"] = v
-
- result["id"] = body.get("id")
-
- result["mac_address"] = body.get("mac_address")
-
- result["name"] = body.get("name")
-
- result["network_id"] = body.get("network_id")
-
- result["security_groups"] = body.get("security_groups")
-
- result["status"] = body.get("status")
-
- result["tenant_id"] = body.get("tenant_id")
-
- return result
-
-
-def fill_list_resp_allowed_address_pairs(value):
- if not value:
- return None
-
- result = []
- for item in value:
- val = dict()
-
- val["ip_address"] = item.get("ip_address")
-
- val["mac_address"] = item.get("mac_address")
-
- result.append(val)
-
- return result
-
-
-def fill_list_resp_extra_dhcp_opts(value):
- if not value:
- return None
-
- result = []
- for item in value:
- val = dict()
-
- val["opt_name"] = item.get("opt_name")
-
- val["opt_value"] = item.get("opt_value")
-
- result.append(val)
-
- return result
-
-
-def fill_list_resp_fixed_ips(value):
- if not value:
- return None
-
- result = []
- for item in value:
- val = dict()
-
- val["ip_address"] = item.get("ip_address")
-
- result.append(val)
-
- return result
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_private_ip.py b/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_private_ip.py
deleted file mode 100644
index 901755f3..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_private_ip.py
+++ /dev/null
@@ -1,354 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright (C) 2019 Huawei
-# GNU General Public License v3.0+ (see COPYING or
-# https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-###############################################################################
-# Documentation
-###############################################################################
-
-DOCUMENTATION = '''
----
-module: hwc_vpc_private_ip
-description:
- - vpc private ip management.
-short_description: Creates a resource of Vpc/PrivateIP in Huawei Cloud
-notes:
- - If I(id) option is provided, it takes precedence over I(subnet_id), I(ip_address) for private ip selection.
- - I(subnet_id), I(ip_address) are used for private ip selection. If more than one private ip with this options exists, execution is aborted.
- - No parameter support updating. If one of option is changed, the module will create a new resource.
-version_added: '0.2.0'
-author: Huawei Inc. (@huaweicloud)
-requirements:
- - keystoneauth1 >= 3.6.0
-options:
- state:
- description:
- - Whether the given object should exist in Huawei Cloud.
- type: str
- choices: ['present', 'absent']
- default: 'present'
- subnet_id:
- description:
- - Specifies the ID of the subnet from which IP addresses are
- assigned. Cannot be changed after creating the private ip.
- type: str
- required: true
- ip_address:
- description:
- - Specifies the target IP address. The value can be an available IP
- address in the subnet. If it is not specified, the system
- automatically assigns an IP address. Cannot be changed after
- creating the private ip.
- type: str
- required: false
-extends_documentation_fragment:
-- community.general.hwc
-
-'''
-
-EXAMPLES = '''
-# create a private ip
-- name: Create vpc
- hwc_network_vpc:
- cidr: "192.168.100.0/24"
- name: "ansible_network_vpc_test"
- register: vpc
-- name: Create subnet
- hwc_vpc_subnet:
- gateway_ip: "192.168.100.32"
- name: "ansible_network_subnet_test"
- dhcp_enable: True
- vpc_id: "{{ vpc.id }}"
- cidr: "192.168.100.0/26"
- register: subnet
-- name: Create a private ip
- community.general.hwc_vpc_private_ip:
- subnet_id: "{{ subnet.id }}"
- ip_address: "192.168.100.33"
-'''
-
-RETURN = '''
- subnet_id:
- description:
- - Specifies the ID of the subnet from which IP addresses are
- assigned.
- type: str
- returned: success
- ip_address:
- description:
- - Specifies the target IP address. The value can be an available IP
- address in the subnet. If it is not specified, the system
- automatically assigns an IP address.
- type: str
- returned: success
-'''
-
-from ansible_collections.community.general.plugins.module_utils.hwc_utils import (
- Config, HwcClientException, HwcModule, are_different_dicts, build_path,
- get_region, is_empty_value, navigate_value)
-
-
-def build_module():
- return HwcModule(
- argument_spec=dict(
- state=dict(default='present', choices=['present', 'absent'],
- type='str'),
- subnet_id=dict(type='str', required=True),
- ip_address=dict(type='str')
- ),
- supports_check_mode=True,
- )
-
-
-def main():
- """Main function"""
-
- module = build_module()
- config = Config(module, "vpc")
-
- try:
- resource = None
- if module.params['id']:
- resource = True
- else:
- v = search_resource(config)
- if len(v) > 1:
- raise Exception("Found more than one resource(%s)" % ", ".join([
- navigate_value(i, ["id"]) for i in v]))
-
- if len(v) == 1:
- resource = v[0]
- module.params['id'] = navigate_value(resource, ["id"])
-
- result = {}
- changed = False
- if module.params['state'] == 'present':
- if resource is None:
- if not module.check_mode:
- create(config)
- changed = True
-
- current = read_resource(config, exclude_output=True)
- expect = user_input_parameters(module)
- if are_different_dicts(expect, current):
- raise Exception(
- "Cannot change option from (%s) to (%s)of an"
- " existing resource.(%s)" % (current, expect, module.params.get('id')))
-
- result = read_resource(config)
- result['id'] = module.params.get('id')
- else:
- if resource:
- if not module.check_mode:
- delete(config)
- changed = True
-
- except Exception as ex:
- module.fail_json(msg=str(ex))
-
- else:
- result['changed'] = changed
- module.exit_json(**result)
-
-
-def user_input_parameters(module):
- return {
- "ip_address": module.params.get("ip_address"),
- "subnet_id": module.params.get("subnet_id"),
- }
-
-
-def create(config):
- module = config.module
- client = config.client(get_region(module), "vpc", "project")
- opts = user_input_parameters(module)
-
- params = build_create_parameters(opts)
- r = send_create_request(module, params, client)
- module.params['id'] = navigate_value(r, ["privateips", "id"],
- {"privateips": 0})
-
-
-def delete(config):
- module = config.module
- client = config.client(get_region(module), "vpc", "project")
-
- send_delete_request(module, None, client)
-
-
-def read_resource(config, exclude_output=False):
- module = config.module
- client = config.client(get_region(module), "vpc", "project")
-
- res = {}
-
- r = send_read_request(module, client)
- res["read"] = fill_read_resp_body(r)
-
- return update_properties(module, res, None, exclude_output)
-
-
-def _build_query_link(opts):
- query_link = "?marker={marker}&limit=10"
-
- return query_link
-
-
-def search_resource(config):
- module = config.module
- client = config.client(get_region(module), "vpc", "project")
- opts = user_input_parameters(module)
- identity_obj = _build_identity_object(opts)
- query_link = _build_query_link(opts)
- link = build_path(module, "subnets/{subnet_id}/privateips") + query_link
-
- result = []
- p = {'marker': ''}
- while True:
- url = link.format(**p)
- r = send_list_request(module, client, url)
- if not r:
- break
-
- for item in r:
- item = fill_list_resp_body(item)
- if not are_different_dicts(identity_obj, item):
- result.append(item)
-
- if len(result) > 1:
- break
-
- p['marker'] = r[-1].get('id')
-
- return result
-
-
-def build_create_parameters(opts):
- params = dict()
-
- v = navigate_value(opts, ["ip_address"], None)
- if not is_empty_value(v):
- params["ip_address"] = v
-
- v = navigate_value(opts, ["subnet_id"], None)
- if not is_empty_value(v):
- params["subnet_id"] = v
-
- if not params:
- return params
-
- params = {"privateips": [params]}
-
- return params
-
-
-def send_create_request(module, params, client):
- url = "privateips"
- try:
- r = client.post(url, params)
- except HwcClientException as ex:
- msg = ("module(hwc_vpc_private_ip): error running "
- "api(create), error: %s" % str(ex))
- module.fail_json(msg=msg)
-
- return r
-
-
-def send_delete_request(module, params, client):
- url = build_path(module, "privateips/{id}")
-
- try:
- r = client.delete(url, params)
- except HwcClientException as ex:
- msg = ("module(hwc_vpc_private_ip): error running "
- "api(delete), error: %s" % str(ex))
- module.fail_json(msg=msg)
-
- return r
-
-
-def send_read_request(module, client):
- url = build_path(module, "privateips/{id}")
-
- r = None
- try:
- r = client.get(url)
- except HwcClientException as ex:
- msg = ("module(hwc_vpc_private_ip): error running "
- "api(read), error: %s" % str(ex))
- module.fail_json(msg=msg)
-
- return navigate_value(r, ["privateip"], None)
-
-
-def fill_read_resp_body(body):
- result = dict()
-
- result["id"] = body.get("id")
-
- result["ip_address"] = body.get("ip_address")
-
- result["subnet_id"] = body.get("subnet_id")
-
- return result
-
-
-def update_properties(module, response, array_index, exclude_output=False):
- r = user_input_parameters(module)
-
- v = navigate_value(response, ["read", "ip_address"], array_index)
- r["ip_address"] = v
-
- v = navigate_value(response, ["read", "subnet_id"], array_index)
- r["subnet_id"] = v
-
- return r
-
-
-def send_list_request(module, client, url):
-
- r = None
- try:
- r = client.get(url)
- except HwcClientException as ex:
- msg = ("module(hwc_vpc_private_ip): error running "
- "api(list), error: %s" % str(ex))
- module.fail_json(msg=msg)
-
- return navigate_value(r, ["privateips"], None)
-
-
-def _build_identity_object(all_opts):
- result = dict()
-
- result["id"] = None
-
- v = navigate_value(all_opts, ["ip_address"], None)
- result["ip_address"] = v
-
- v = navigate_value(all_opts, ["subnet_id"], None)
- result["subnet_id"] = v
-
- return result
-
-
-def fill_list_resp_body(body):
- result = dict()
-
- result["id"] = body.get("id")
-
- result["ip_address"] = body.get("ip_address")
-
- result["subnet_id"] = body.get("subnet_id")
-
- return result
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_route.py b/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_route.py
deleted file mode 100644
index 31829dc6..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_route.py
+++ /dev/null
@@ -1,437 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright (C) 2019 Huawei
-# GNU General Public License v3.0+ (see COPYING or
-# https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-###############################################################################
-# Documentation
-###############################################################################
-
-DOCUMENTATION = '''
----
-module: hwc_vpc_route
-description:
- - vpc route management.
-short_description: Creates a resource of Vpc/Route in Huawei Cloud
-notes:
- - If I(id) option is provided, it takes precedence over I(destination), I(vpc_id), I(type) and I(next_hop) for route selection.
- - I(destination), I(vpc_id), I(type) and I(next_hop) are used for route selection. If more than one route with this options exists, execution is aborted.
- - No parameter support updating. If one of option is changed, the module will create a new resource.
-version_added: '0.2.0'
-author: Huawei Inc. (@huaweicloud)
-requirements:
- - keystoneauth1 >= 3.6.0
-options:
- state:
- description:
- - Whether the given object should exist in Huawei Cloud.
- type: str
- choices: ['present', 'absent']
- default: 'present'
- destination:
- description:
- - Specifies the destination IP address or CIDR block.
- type: str
- required: true
- next_hop:
- description:
- - Specifies the next hop. The value is VPC peering connection ID.
- type: str
- required: true
- vpc_id:
- description:
- - Specifies the VPC ID to which route is added.
- type: str
- required: true
- type:
- description:
- - Specifies the type of route.
- type: str
- required: false
- default: 'peering'
-extends_documentation_fragment:
-- community.general.hwc
-
-'''
-
-EXAMPLES = '''
-# create a peering connect
-- name: Create a local vpc
- hwc_network_vpc:
- cidr: "192.168.0.0/16"
- name: "ansible_network_vpc_test_local"
- register: vpc1
-- name: Create a peering vpc
- hwc_network_vpc:
- cidr: "192.168.0.0/16"
- name: "ansible_network_vpc_test_peering"
- register: vpc2
-- name: Create a peering connect
- hwc_vpc_peering_connect:
- local_vpc_id: "{{ vpc1.id }}"
- name: "ansible_network_peering_test"
- filters:
- - "name"
- peering_vpc:
- vpc_id: "{{ vpc2.id }}"
- register: connect
-- name: Create a route
- community.general.hwc_vpc_route:
- vpc_id: "{{ vpc1.id }}"
- destination: "192.168.0.0/16"
- next_hop: "{{ connect.id }}"
-'''
-
-RETURN = '''
- id:
- description:
- - UUID of the route.
- type: str
- returned: success
- destination:
- description:
- - Specifies the destination IP address or CIDR block.
- type: str
- returned: success
- next_hop:
- description:
- - Specifies the next hop. The value is VPC peering connection ID.
- type: str
- returned: success
- vpc_id:
- description:
- - Specifies the VPC ID to which route is added.
- type: str
- returned: success
- type:
- description:
- - Specifies the type of route.
- type: str
- returned: success
-'''
-
-from ansible_collections.community.general.plugins.module_utils.hwc_utils import (
- Config, HwcClientException, HwcModule, are_different_dicts, build_path,
- get_region, is_empty_value, navigate_value)
-
-
-def build_module():
- return HwcModule(
- argument_spec=dict(
- state=dict(default='present', choices=['present', 'absent'],
- type='str'),
- destination=dict(type='str', required=True),
- next_hop=dict(type='str', required=True),
- vpc_id=dict(type='str', required=True),
- type=dict(type='str', default='peering'),
- id=dict(type='str')
- ),
- supports_check_mode=True,
- )
-
-
-def main():
- """Main function"""
-
- module = build_module()
- config = Config(module, "vpc")
-
- try:
- resource = None
- if module.params.get("id"):
- resource = get_resource_by_id(config)
- if module.params['state'] == 'present':
- opts = user_input_parameters(module)
- if are_different_dicts(resource, opts):
- raise Exception(
- "Cannot change option from (%s) to (%s) for an"
- " existing route.(%s)" % (resource, opts,
- config.module.params.get(
- 'id')))
- else:
- v = search_resource(config)
- if len(v) > 1:
- raise Exception("Found more than one resource(%s)" % ", ".join([
- navigate_value(i, ["id"]) for i in v]))
-
- if len(v) == 1:
- resource = update_properties(module, {"read": v[0]}, None)
- module.params['id'] = navigate_value(resource, ["id"])
-
- result = {}
- changed = False
- if module.params['state'] == 'present':
- if resource is None:
- if not module.check_mode:
- resource = create(config)
- changed = True
-
- result = resource
- else:
- if resource:
- if not module.check_mode:
- delete(config)
- changed = True
-
- except Exception as ex:
- module.fail_json(msg=str(ex))
-
- else:
- result['changed'] = changed
- module.exit_json(**result)
-
-
-def user_input_parameters(module):
- return {
- "destination": module.params.get("destination"),
- "next_hop": module.params.get("next_hop"),
- "type": module.params.get("type"),
- "vpc_id": module.params.get("vpc_id"),
- "id": module.params.get("id"),
- }
-
-
-def create(config):
- module = config.module
- client = config.client(get_region(module), "network", "project")
- opts = user_input_parameters(module)
-
- params = build_create_parameters(opts)
- r = send_create_request(module, params, client)
- module.params['id'] = navigate_value(r, ["route", "id"])
-
- result = update_properties(module, {"read": fill_resp_body(r)}, None)
- return result
-
-
-def delete(config):
- module = config.module
- client = config.client(get_region(module), "network", "project")
-
- send_delete_request(module, None, client)
-
-
-def get_resource_by_id(config, exclude_output=False):
- module = config.module
- client = config.client(get_region(module), "network", "project")
-
- res = {}
-
- r = send_read_request(module, client)
- res["read"] = fill_resp_body(r)
-
- result = update_properties(module, res, None, exclude_output)
- return result
-
-
-def _build_query_link(opts):
- query_params = []
-
- v = navigate_value(opts, ["type"])
- if v:
- query_params.append("type=" + str(v))
-
- v = navigate_value(opts, ["destination"])
- if v:
- query_params.append("destination=" + str(v))
-
- v = navigate_value(opts, ["vpc_id"])
- if v:
- query_params.append("vpc_id=" + str(v))
-
- query_link = "?marker={marker}&limit=10"
- if query_params:
- query_link += "&" + "&".join(query_params)
-
- return query_link
-
-
-def search_resource(config):
- module = config.module
- client = config.client(get_region(module), "network", "project")
- opts = user_input_parameters(module)
- identity_obj = _build_identity_object(opts)
- query_link = _build_query_link(opts)
- link = "v2.0/vpc/routes" + query_link
-
- result = []
- p = {'marker': ''}
- while True:
- url = link.format(**p)
- r = send_list_request(module, client, url)
- if not r:
- break
-
- for item in r:
- item = fill_list_resp_body(item)
- if not are_different_dicts(identity_obj, item):
- result.append(item)
-
- if len(result) > 1:
- break
-
- p['marker'] = r[-1].get('id')
-
- return result
-
-
-def build_create_parameters(opts):
- params = dict()
-
- v = navigate_value(opts, ["destination"], None)
- if not is_empty_value(v):
- params["destination"] = v
-
- v = navigate_value(opts, ["next_hop"], None)
- if not is_empty_value(v):
- params["nexthop"] = v
-
- v = navigate_value(opts, ["type"], None)
- if not is_empty_value(v):
- params["type"] = v
-
- v = navigate_value(opts, ["vpc_id"], None)
- if not is_empty_value(v):
- params["vpc_id"] = v
-
- if not params:
- return params
-
- params = {"route": params}
-
- return params
-
-
-def send_create_request(module, params, client):
- url = "v2.0/vpc/routes"
- try:
- r = client.post(url, params)
- except HwcClientException as ex:
- msg = ("module(hwc_vpc_route): error running "
- "api(create), error: %s" % str(ex))
- module.fail_json(msg=msg)
-
- return r
-
-
-def send_delete_request(module, params, client):
- url = build_path(module, "v2.0/vpc/routes/{id}")
-
- try:
- r = client.delete(url, params)
- except HwcClientException as ex:
- msg = ("module(hwc_vpc_route): error running "
- "api(delete), error: %s" % str(ex))
- module.fail_json(msg=msg)
-
- return r
-
-
-def send_read_request(module, client):
- url = build_path(module, "v2.0/vpc/routes/{id}")
-
- r = None
- try:
- r = client.get(url)
- except HwcClientException as ex:
- msg = ("module(hwc_vpc_route): error running "
- "api(read), error: %s" % str(ex))
- module.fail_json(msg=msg)
-
- return navigate_value(r, ["route"], None)
-
-
-def fill_resp_body(body):
- result = dict()
-
- result["destination"] = body.get("destination")
-
- result["id"] = body.get("id")
-
- result["nexthop"] = body.get("nexthop")
-
- result["type"] = body.get("type")
-
- result["vpc_id"] = body.get("vpc_id")
-
- return result
-
-
-def update_properties(module, response, array_index, exclude_output=False):
- r = user_input_parameters(module)
-
- v = navigate_value(response, ["read", "destination"], array_index)
- r["destination"] = v
-
- v = navigate_value(response, ["read", "nexthop"], array_index)
- r["next_hop"] = v
-
- v = navigate_value(response, ["read", "type"], array_index)
- r["type"] = v
-
- v = navigate_value(response, ["read", "vpc_id"], array_index)
- r["vpc_id"] = v
-
- v = navigate_value(response, ["read", "id"], array_index)
- r["id"] = v
-
- return r
-
-
-def send_list_request(module, client, url):
-
- r = None
- try:
- r = client.get(url)
- except HwcClientException as ex:
- msg = ("module(hwc_vpc_route): error running "
- "api(list), error: %s" % str(ex))
- module.fail_json(msg=msg)
-
- return navigate_value(r, ["routes"], None)
-
-
-def _build_identity_object(all_opts):
- result = dict()
-
- v = navigate_value(all_opts, ["destination"], None)
- result["destination"] = v
-
- v = navigate_value(all_opts, ["id"], None)
- result["id"] = v
-
- v = navigate_value(all_opts, ["next_hop"], None)
- result["nexthop"] = v
-
- v = navigate_value(all_opts, ["type"], None)
- result["type"] = v
-
- v = navigate_value(all_opts, ["vpc_id"], None)
- result["vpc_id"] = v
-
- return result
-
-
-def fill_list_resp_body(body):
- result = dict()
-
- result["destination"] = body.get("destination")
-
- result["id"] = body.get("id")
-
- result["nexthop"] = body.get("nexthop")
-
- result["type"] = body.get("type")
-
- result["vpc_id"] = body.get("vpc_id")
-
- return result
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_security_group.py b/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_security_group.py
deleted file mode 100644
index 5a1dfe70..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_security_group.py
+++ /dev/null
@@ -1,644 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright (C) 2019 Huawei
-# GNU General Public License v3.0+ (see COPYING or
-# https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-###############################################################################
-# Documentation
-###############################################################################
-
-DOCUMENTATION = '''
----
-module: hwc_vpc_security_group
-description:
- - vpc security group management.
-short_description: Creates a resource of Vpc/SecurityGroup in Huawei Cloud
-notes:
- - If I(id) option is provided, it takes precedence over I(name),
- I(enterprise_project_id) and I(vpc_id) for security group selection.
- - I(name), I(enterprise_project_id) and I(vpc_id) are used for security
- group selection. If more than one security group with this options exists,
- execution is aborted.
- - No parameter support updating. If one of option is changed, the module
- will create a new resource.
-version_added: '0.2.0'
-author: Huawei Inc. (@huaweicloud)
-requirements:
- - keystoneauth1 >= 3.6.0
-options:
- state:
- description:
- - Whether the given object should exist in Huawei Cloud.
- type: str
- choices: ['present', 'absent']
- default: 'present'
- name:
- description:
- - Specifies the security group name. The value is a string of 1 to
- 64 characters that can contain letters, digits, underscores C(_),
- hyphens (-), and periods (.).
- type: str
- required: true
- enterprise_project_id:
- description:
- - Specifies the enterprise project ID. When creating a security
- group, associate the enterprise project ID with the security
- group.s
- type: str
- required: false
- vpc_id:
- description:
- - Specifies the resource ID of the VPC to which the security group
- belongs.
- type: str
- required: false
-extends_documentation_fragment:
-- community.general.hwc
-
-'''
-
-EXAMPLES = '''
-# create a security group
-- name: Create a security group
- community.general.hwc_vpc_security_group:
- name: "ansible_network_security_group_test"
-'''
-
-RETURN = '''
- name:
- description:
- - Specifies the security group name. The value is a string of 1 to
- 64 characters that can contain letters, digits, underscores C(_),
- hyphens (-), and periods (.).
- type: str
- returned: success
- enterprise_project_id:
- description:
- - Specifies the enterprise project ID. When creating a security
- group, associate the enterprise project ID with the security
- group.
- type: str
- returned: success
- vpc_id:
- description:
- - Specifies the resource ID of the VPC to which the security group
- belongs.
- type: str
- returned: success
- rules:
- description:
- - Specifies the security group rule, which ensures that resources
- in the security group can communicate with one another.
- type: complex
- returned: success
- contains:
- description:
- description:
- - Provides supplementary information about the security
- group rule.
- type: str
- returned: success
- direction:
- description:
- - Specifies the direction of access control. The value can
- be egress or ingress.
- type: str
- returned: success
- ethertype:
- description:
- - Specifies the IP protocol version. The value can be IPv4
- or IPv6.
- type: str
- returned: success
- id:
- description:
- - Specifies the security group rule ID.
- type: str
- returned: success
- port_range_max:
- description:
- - Specifies the end port number. The value ranges from 1 to
- 65535. If the protocol is not icmp, the value cannot be
- smaller than the port_range_min value. An empty value
- indicates all ports.
- type: int
- returned: success
- port_range_min:
- description:
- - Specifies the start port number. The value ranges from 1
- to 65535. The value cannot be greater than the
- port_range_max value. An empty value indicates all ports.
- type: int
- returned: success
- protocol:
- description:
- - Specifies the protocol type. The value can be icmp, tcp,
- udp, or others. If the parameter is left blank, the
- security group supports all protocols.
- type: str
- returned: success
- remote_address_group_id:
- description:
- - Specifies the ID of remote IP address group.
- type: str
- returned: success
- remote_group_id:
- description:
- - Specifies the ID of the peer security group.
- type: str
- returned: success
- remote_ip_prefix:
- description:
- - Specifies the remote IP address. If the access control
- direction is set to egress, the parameter specifies the
- source IP address. If the access control direction is set
- to ingress, the parameter specifies the destination IP
- address.
- type: str
- returned: success
-'''
-
-from ansible_collections.community.general.plugins.module_utils.hwc_utils import (
- Config, HwcClientException, HwcModule, are_different_dicts, build_path,
- get_region, is_empty_value, navigate_value)
-
-
-def build_module():
- return HwcModule(
- argument_spec=dict(
- state=dict(default='present', choices=['present', 'absent'],
- type='str'),
- name=dict(type='str', required=True),
- enterprise_project_id=dict(type='str'),
- vpc_id=dict(type='str')
- ),
- supports_check_mode=True,
- )
-
-
-def main():
- """Main function"""
-
- module = build_module()
- config = Config(module, "vpc")
-
- try:
- resource = None
- if module.params.get("id"):
- resource = read_resource(config)
- if module.params['state'] == 'present':
- check_resource_option(resource, module)
- else:
- v = search_resource(config)
- if len(v) > 1:
- raise Exception("Found more than one resource(%s)" % ", ".join([
- navigate_value(i, ["id"]) for i in v]))
-
- if len(v) == 1:
- resource = update_properties(module, {"read": v[0]}, None)
- module.params['id'] = navigate_value(resource, ["id"])
-
- result = {}
- changed = False
- if module.params['state'] == 'present':
- if resource is None:
- if not module.check_mode:
- resource = create(config)
- changed = True
-
- result = resource
- else:
- if resource:
- if not module.check_mode:
- delete(config)
- changed = True
-
- except Exception as ex:
- module.fail_json(msg=str(ex))
-
- else:
- result['changed'] = changed
- module.exit_json(**result)
-
-
-def user_input_parameters(module):
- return {
- "enterprise_project_id": module.params.get("enterprise_project_id"),
- "name": module.params.get("name"),
- "vpc_id": module.params.get("vpc_id"),
- "id": module.params.get("id"),
- }
-
-
-def check_resource_option(resource, module):
- opts = user_input_parameters(module)
-
- resource = {
- "enterprise_project_id": resource.get("enterprise_project_id"),
- "name": resource.get("name"),
- "vpc_id": resource.get("vpc_id"),
- "id": resource.get("id"),
- }
-
- if are_different_dicts(resource, opts):
- raise Exception(
- "Cannot change option from (%s) to (%s) for an"
- " existing security group(%s)." % (resource, opts,
- module.params.get('id')))
-
-
-def create(config):
- module = config.module
- client = config.client(get_region(module), "vpc", "project")
- opts = user_input_parameters(module)
-
- params = build_create_parameters(opts)
- r = send_create_request(module, params, client)
- module.params['id'] = navigate_value(r, ["security_group", "id"])
-
- result = update_properties(module, {"read": fill_read_resp_body(r)}, None)
- return result
-
-
-def delete(config):
- module = config.module
- client = config.client(get_region(module), "vpc", "project")
-
- send_delete_request(module, None, client)
-
-
-def read_resource(config, exclude_output=False):
- module = config.module
- client = config.client(get_region(module), "vpc", "project")
-
- res = {}
-
- r = send_read_request(module, client)
- res["read"] = fill_read_resp_body(r)
-
- return update_properties(module, res, None, exclude_output)
-
-
-def _build_query_link(opts):
- query_params = []
-
- v = navigate_value(opts, ["enterprise_project_id"])
- if v:
- query_params.append("enterprise_project_id=" + str(v))
-
- v = navigate_value(opts, ["vpc_id"])
- if v:
- query_params.append("vpc_id=" + str(v))
-
- query_link = "?marker={marker}&limit=10"
- if query_params:
- query_link += "&" + "&".join(query_params)
-
- return query_link
-
-
-def search_resource(config):
- module = config.module
- client = config.client(get_region(module), "vpc", "project")
- opts = user_input_parameters(module)
- identity_obj = _build_identity_object(opts)
- query_link = _build_query_link(opts)
- link = "security-groups" + query_link
-
- result = []
- p = {'marker': ''}
- while True:
- url = link.format(**p)
- r = send_list_request(module, client, url)
- if not r:
- break
-
- for item in r:
- item = fill_list_resp_body(item)
- if not are_different_dicts(identity_obj, item):
- result.append(item)
-
- if len(result) > 1:
- break
-
- p['marker'] = r[-1].get('id')
-
- return result
-
-
-def build_create_parameters(opts):
- params = dict()
-
- v = navigate_value(opts, ["enterprise_project_id"], None)
- if not is_empty_value(v):
- params["enterprise_project_id"] = v
-
- v = navigate_value(opts, ["name"], None)
- if not is_empty_value(v):
- params["name"] = v
-
- v = navigate_value(opts, ["vpc_id"], None)
- if not is_empty_value(v):
- params["vpc_id"] = v
-
- if not params:
- return params
-
- params = {"security_group": params}
-
- return params
-
-
-def send_create_request(module, params, client):
- url = "security-groups"
- try:
- r = client.post(url, params)
- except HwcClientException as ex:
- msg = ("module(hwc_vpc_security_group): error running "
- "api(create), error: %s" % str(ex))
- module.fail_json(msg=msg)
-
- return r
-
-
-def send_delete_request(module, params, client):
- url = build_path(module, "security-groups/{id}")
-
- try:
- r = client.delete(url, params)
- except HwcClientException as ex:
- msg = ("module(hwc_vpc_security_group): error running "
- "api(delete), error: %s" % str(ex))
- module.fail_json(msg=msg)
-
- return r
-
-
-def send_read_request(module, client):
- url = build_path(module, "security-groups/{id}")
-
- r = None
- try:
- r = client.get(url)
- except HwcClientException as ex:
- msg = ("module(hwc_vpc_security_group): error running "
- "api(read), error: %s" % str(ex))
- module.fail_json(msg=msg)
-
- return navigate_value(r, ["security_group"], None)
-
-
-def fill_read_resp_body(body):
- result = dict()
-
- result["enterprise_project_id"] = body.get("enterprise_project_id")
-
- result["id"] = body.get("id")
-
- result["name"] = body.get("name")
-
- v = fill_read_resp_security_group_rules(body.get("security_group_rules"))
- result["security_group_rules"] = v
-
- result["vpc_id"] = body.get("vpc_id")
-
- return result
-
-
-def fill_read_resp_security_group_rules(value):
- if not value:
- return None
-
- result = []
- for item in value:
- val = dict()
-
- val["description"] = item.get("description")
-
- val["direction"] = item.get("direction")
-
- val["ethertype"] = item.get("ethertype")
-
- val["id"] = item.get("id")
-
- val["port_range_max"] = item.get("port_range_max")
-
- val["port_range_min"] = item.get("port_range_min")
-
- val["protocol"] = item.get("protocol")
-
- val["remote_address_group_id"] = item.get("remote_address_group_id")
-
- val["remote_group_id"] = item.get("remote_group_id")
-
- val["remote_ip_prefix"] = item.get("remote_ip_prefix")
-
- val["security_group_id"] = item.get("security_group_id")
-
- result.append(val)
-
- return result
-
-
-def update_properties(module, response, array_index, exclude_output=False):
- r = user_input_parameters(module)
-
- v = navigate_value(response, ["read", "enterprise_project_id"],
- array_index)
- r["enterprise_project_id"] = v
-
- v = navigate_value(response, ["read", "name"], array_index)
- r["name"] = v
-
- if not exclude_output:
- v = r.get("rules")
- v = flatten_rules(response, array_index, v, exclude_output)
- r["rules"] = v
-
- v = navigate_value(response, ["read", "vpc_id"], array_index)
- r["vpc_id"] = v
-
- v = navigate_value(response, ["read", "id"], array_index)
- r["id"] = v
-
- return r
-
-
-def flatten_rules(d, array_index, current_value, exclude_output):
- n = 0
- result = current_value
- has_init_value = True
- if result:
- n = len(result)
- else:
- has_init_value = False
- result = []
- v = navigate_value(d, ["read", "security_group_rules"],
- array_index)
- if not v:
- return current_value
- n = len(v)
-
- new_array_index = dict()
- if array_index:
- new_array_index.update(array_index)
-
- for i in range(n):
- new_array_index["read.security_group_rules"] = i
-
- val = dict()
- if len(result) >= (i + 1) and result[i]:
- val = result[i]
-
- if not exclude_output:
- v = navigate_value(d, ["read", "security_group_rules", "description"],
- new_array_index)
- val["description"] = v
-
- if not exclude_output:
- v = navigate_value(d, ["read", "security_group_rules", "direction"],
- new_array_index)
- val["direction"] = v
-
- if not exclude_output:
- v = navigate_value(d, ["read", "security_group_rules", "ethertype"],
- new_array_index)
- val["ethertype"] = v
-
- if not exclude_output:
- v = navigate_value(d, ["read", "security_group_rules", "id"],
- new_array_index)
- val["id"] = v
-
- if not exclude_output:
- v = navigate_value(d, ["read", "security_group_rules", "port_range_max"],
- new_array_index)
- val["port_range_max"] = v
-
- if not exclude_output:
- v = navigate_value(d, ["read", "security_group_rules", "port_range_min"],
- new_array_index)
- val["port_range_min"] = v
-
- if not exclude_output:
- v = navigate_value(d, ["read", "security_group_rules", "protocol"],
- new_array_index)
- val["protocol"] = v
-
- if not exclude_output:
- v = navigate_value(d, ["read", "security_group_rules", "remote_address_group_id"],
- new_array_index)
- val["remote_address_group_id"] = v
-
- if not exclude_output:
- v = navigate_value(d, ["read", "security_group_rules", "remote_group_id"],
- new_array_index)
- val["remote_group_id"] = v
-
- if not exclude_output:
- v = navigate_value(d, ["read", "security_group_rules", "remote_ip_prefix"],
- new_array_index)
- val["remote_ip_prefix"] = v
-
- if len(result) >= (i + 1):
- result[i] = val
- else:
- for v in val.values():
- if v is not None:
- result.append(val)
- break
-
- return result if (has_init_value or result) else current_value
-
-
-def send_list_request(module, client, url):
-
- r = None
- try:
- r = client.get(url)
- except HwcClientException as ex:
- msg = ("module(hwc_vpc_security_group): error running "
- "api(list), error: %s" % str(ex))
- module.fail_json(msg=msg)
-
- return navigate_value(r, ["security_groups"], None)
-
-
-def _build_identity_object(all_opts):
- result = dict()
-
- v = navigate_value(all_opts, ["enterprise_project_id"], None)
- result["enterprise_project_id"] = v
-
- result["id"] = None
-
- v = navigate_value(all_opts, ["name"], None)
- result["name"] = v
-
- result["security_group_rules"] = None
-
- v = navigate_value(all_opts, ["vpc_id"], None)
- result["vpc_id"] = v
-
- return result
-
-
-def fill_list_resp_body(body):
- result = dict()
-
- result["enterprise_project_id"] = body.get("enterprise_project_id")
-
- result["id"] = body.get("id")
-
- result["name"] = body.get("name")
-
- v = fill_list_resp_security_group_rules(body.get("security_group_rules"))
- result["security_group_rules"] = v
-
- result["vpc_id"] = body.get("vpc_id")
-
- return result
-
-
-def fill_list_resp_security_group_rules(value):
- if not value:
- return None
-
- result = []
- for item in value:
- val = dict()
-
- val["description"] = item.get("description")
-
- val["direction"] = item.get("direction")
-
- val["ethertype"] = item.get("ethertype")
-
- val["id"] = item.get("id")
-
- val["port_range_max"] = item.get("port_range_max")
-
- val["port_range_min"] = item.get("port_range_min")
-
- val["protocol"] = item.get("protocol")
-
- val["remote_address_group_id"] = item.get("remote_address_group_id")
-
- val["remote_group_id"] = item.get("remote_group_id")
-
- val["remote_ip_prefix"] = item.get("remote_ip_prefix")
-
- val["security_group_id"] = item.get("security_group_id")
-
- result.append(val)
-
- return result
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_security_group_rule.py b/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_security_group_rule.py
deleted file mode 100644
index f92c8276..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_security_group_rule.py
+++ /dev/null
@@ -1,570 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright (C) 2019 Huawei
-# GNU General Public License v3.0+ (see COPYING or
-# https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-###############################################################################
-# Documentation
-###############################################################################
-
-DOCUMENTATION = '''
----
-module: hwc_vpc_security_group_rule
-description:
- - vpc security group management.
-short_description: Creates a resource of Vpc/SecurityGroupRule in Huawei Cloud
-notes:
- - If I(id) option is provided, it takes precedence over
- I(enterprise_project_id) for security group rule selection.
- - I(security_group_id) is used for security group rule selection. If more
- than one security group rule with this options exists, execution is
- aborted.
- - No parameter support updating. If one of option is changed, the module
- will create a new resource.
-version_added: '0.2.0'
-author: Huawei Inc. (@huaweicloud)
-requirements:
- - keystoneauth1 >= 3.6.0
-options:
- state:
- description:
- - Whether the given object should exist in Huawei Cloud.
- type: str
- choices: ['present', 'absent']
- default: 'present'
- direction:
- description:
- - Specifies the direction of access control. The value can be
- egress or ingress.
- type: str
- required: true
- security_group_id:
- description:
- - Specifies the security group rule ID, which uniquely identifies
- the security group rule.
- type: str
- required: true
- description:
- description:
- - Provides supplementary information about the security group rule.
- The value is a string of no more than 255 characters that can
- contain letters and digits.
- type: str
- required: false
- ethertype:
- description:
- - Specifies the IP protocol version. The value can be IPv4 or IPv6.
- If you do not set this parameter, IPv4 is used by default.
- type: str
- required: false
- port_range_max:
- description:
- - Specifies the end port number. The value ranges from 1 to 65535.
- If the protocol is not icmp, the value cannot be smaller than the
- port_range_min value. An empty value indicates all ports.
- type: int
- required: false
- port_range_min:
- description:
- - Specifies the start port number. The value ranges from 1 to
- 65535. The value cannot be greater than the port_range_max value.
- An empty value indicates all ports.
- type: int
- required: false
- protocol:
- description:
- - Specifies the protocol type. The value can be icmp, tcp, or udp.
- If the parameter is left blank, the security group supports all
- protocols.
- type: str
- required: false
- remote_group_id:
- description:
- - Specifies the ID of the peer security group. The value is
- exclusive with parameter remote_ip_prefix.
- type: str
- required: false
- remote_ip_prefix:
- description:
- - Specifies the remote IP address. If the access control direction
- is set to egress, the parameter specifies the source IP address.
- If the access control direction is set to ingress, the parameter
- specifies the destination IP address. The value can be in the
- CIDR format or IP addresses. The parameter is exclusive with
- parameter remote_group_id.
- type: str
- required: false
-extends_documentation_fragment:
-- community.general.hwc
-
-'''
-
-EXAMPLES = '''
-# create a security group rule
-- name: Create a security group
- hwc_vpc_security_group:
- name: "ansible_network_security_group_test"
- register: sg
-- name: Create a security group rule
- community.general.hwc_vpc_security_group_rule:
- direction: "ingress"
- protocol: "tcp"
- ethertype: "IPv4"
- port_range_max: 22
- security_group_id: "{{ sg.id }}"
- port_range_min: 22
- remote_ip_prefix: "0.0.0.0/0"
-'''
-
-RETURN = '''
- direction:
- description:
- - Specifies the direction of access control. The value can be
- egress or ingress.
- type: str
- returned: success
- security_group_id:
- description:
- - Specifies the security group rule ID, which uniquely identifies
- the security group rule.
- type: str
- returned: success
- description:
- description:
- - Provides supplementary information about the security group rule.
- The value is a string of no more than 255 characters that can
- contain letters and digits.
- type: str
- returned: success
- ethertype:
- description:
- - Specifies the IP protocol version. The value can be IPv4 or IPv6.
- If you do not set this parameter, IPv4 is used by default.
- type: str
- returned: success
- port_range_max:
- description:
- - Specifies the end port number. The value ranges from 1 to 65535.
- If the protocol is not icmp, the value cannot be smaller than the
- port_range_min value. An empty value indicates all ports.
- type: int
- returned: success
- port_range_min:
- description:
- - Specifies the start port number. The value ranges from 1 to
- 65535. The value cannot be greater than the port_range_max value.
- An empty value indicates all ports.
- type: int
- returned: success
- protocol:
- description:
- - Specifies the protocol type. The value can be icmp, tcp, or udp.
- If the parameter is left blank, the security group supports all
- protocols.
- type: str
- returned: success
- remote_group_id:
- description:
- - Specifies the ID of the peer security group. The value is
- exclusive with parameter remote_ip_prefix.
- type: str
- returned: success
- remote_ip_prefix:
- description:
- - Specifies the remote IP address. If the access control direction
- is set to egress, the parameter specifies the source IP address.
- If the access control direction is set to ingress, the parameter
- specifies the destination IP address. The value can be in the
- CIDR format or IP addresses. The parameter is exclusive with
- parameter remote_group_id.
- type: str
- returned: success
-'''
-
-from ansible_collections.community.general.plugins.module_utils.hwc_utils import (
- Config, HwcClientException, HwcModule, are_different_dicts, build_path,
- get_region, is_empty_value, navigate_value)
-
-
-def build_module():
- return HwcModule(
- argument_spec=dict(
- state=dict(default='present', choices=['present', 'absent'],
- type='str'),
- direction=dict(type='str', required=True),
- security_group_id=dict(type='str', required=True),
- description=dict(type='str'),
- ethertype=dict(type='str'),
- port_range_max=dict(type='int'),
- port_range_min=dict(type='int'),
- protocol=dict(type='str'),
- remote_group_id=dict(type='str'),
- remote_ip_prefix=dict(type='str')
- ),
- supports_check_mode=True,
- )
-
-
-def main():
- """Main function"""
-
- module = build_module()
- config = Config(module, "vpc")
-
- try:
- resource = None
- if module.params['id']:
- resource = True
- else:
- v = search_resource(config)
- if len(v) > 1:
- raise Exception("Found more than one resource(%s)" % ", ".join([
- navigate_value(i, ["id"]) for i in v]))
-
- if len(v) == 1:
- resource = v[0]
- module.params['id'] = navigate_value(resource, ["id"])
-
- result = {}
- changed = False
- if module.params['state'] == 'present':
- if resource is None:
- if not module.check_mode:
- create(config)
- changed = True
-
- current = read_resource(config, exclude_output=True)
- expect = user_input_parameters(module)
- if are_different_dicts(expect, current):
- raise Exception(
- "Cannot change option from (%s) to (%s) for an"
- " existing security group(%s)." % (current, expect, module.params.get('id')))
- result = read_resource(config)
- result['id'] = module.params.get('id')
- else:
- if resource:
- if not module.check_mode:
- delete(config)
- changed = True
-
- except Exception as ex:
- module.fail_json(msg=str(ex))
-
- else:
- result['changed'] = changed
- module.exit_json(**result)
-
-
-def user_input_parameters(module):
- return {
- "description": module.params.get("description"),
- "direction": module.params.get("direction"),
- "ethertype": module.params.get("ethertype"),
- "port_range_max": module.params.get("port_range_max"),
- "port_range_min": module.params.get("port_range_min"),
- "protocol": module.params.get("protocol"),
- "remote_group_id": module.params.get("remote_group_id"),
- "remote_ip_prefix": module.params.get("remote_ip_prefix"),
- "security_group_id": module.params.get("security_group_id"),
- }
-
-
-def create(config):
- module = config.module
- client = config.client(get_region(module), "vpc", "project")
- opts = user_input_parameters(module)
-
- params = build_create_parameters(opts)
- r = send_create_request(module, params, client)
- module.params['id'] = navigate_value(r, ["security_group_rule", "id"])
-
-
-def delete(config):
- module = config.module
- client = config.client(get_region(module), "vpc", "project")
-
- send_delete_request(module, None, client)
-
-
-def read_resource(config, exclude_output=False):
- module = config.module
- client = config.client(get_region(module), "vpc", "project")
-
- res = {}
-
- r = send_read_request(module, client)
- res["read"] = fill_read_resp_body(r)
-
- return update_properties(module, res, None, exclude_output)
-
-
-def _build_query_link(opts):
- query_link = "?marker={marker}&limit=10"
- v = navigate_value(opts, ["security_group_id"])
- if v:
- query_link += "&security_group_id=" + str(v)
-
- return query_link
-
-
-def search_resource(config):
- module = config.module
- client = config.client(get_region(module), "vpc", "project")
- opts = user_input_parameters(module)
- identity_obj = _build_identity_object(opts)
- query_link = _build_query_link(opts)
- link = "security-group-rules" + query_link
-
- result = []
- p = {'marker': ''}
- while True:
- url = link.format(**p)
- r = send_list_request(module, client, url)
- if not r:
- break
-
- for item in r:
- item = fill_list_resp_body(item)
- if not are_different_dicts(identity_obj, item):
- result.append(item)
-
- if len(result) > 1:
- break
-
- p['marker'] = r[-1].get('id')
-
- return result
-
-
-def build_create_parameters(opts):
- params = dict()
-
- v = navigate_value(opts, ["description"], None)
- if not is_empty_value(v):
- params["description"] = v
-
- v = navigate_value(opts, ["direction"], None)
- if not is_empty_value(v):
- params["direction"] = v
-
- v = navigate_value(opts, ["ethertype"], None)
- if not is_empty_value(v):
- params["ethertype"] = v
-
- v = navigate_value(opts, ["port_range_max"], None)
- if not is_empty_value(v):
- params["port_range_max"] = v
-
- v = navigate_value(opts, ["port_range_min"], None)
- if not is_empty_value(v):
- params["port_range_min"] = v
-
- v = navigate_value(opts, ["protocol"], None)
- if not is_empty_value(v):
- params["protocol"] = v
-
- v = navigate_value(opts, ["remote_group_id"], None)
- if not is_empty_value(v):
- params["remote_group_id"] = v
-
- v = navigate_value(opts, ["remote_ip_prefix"], None)
- if not is_empty_value(v):
- params["remote_ip_prefix"] = v
-
- v = navigate_value(opts, ["security_group_id"], None)
- if not is_empty_value(v):
- params["security_group_id"] = v
-
- if not params:
- return params
-
- params = {"security_group_rule": params}
-
- return params
-
-
-def send_create_request(module, params, client):
- url = "security-group-rules"
- try:
- r = client.post(url, params)
- except HwcClientException as ex:
- msg = ("module(hwc_vpc_security_group_rule): error running "
- "api(create), error: %s" % str(ex))
- module.fail_json(msg=msg)
-
- return r
-
-
-def send_delete_request(module, params, client):
- url = build_path(module, "security-group-rules/{id}")
-
- try:
- r = client.delete(url, params)
- except HwcClientException as ex:
- msg = ("module(hwc_vpc_security_group_rule): error running "
- "api(delete), error: %s" % str(ex))
- module.fail_json(msg=msg)
-
- return r
-
-
-def send_read_request(module, client):
- url = build_path(module, "security-group-rules/{id}")
-
- r = None
- try:
- r = client.get(url)
- except HwcClientException as ex:
- msg = ("module(hwc_vpc_security_group_rule): error running "
- "api(read), error: %s" % str(ex))
- module.fail_json(msg=msg)
-
- return navigate_value(r, ["security_group_rule"], None)
-
-
-def fill_read_resp_body(body):
- result = dict()
-
- result["description"] = body.get("description")
-
- result["direction"] = body.get("direction")
-
- result["ethertype"] = body.get("ethertype")
-
- result["id"] = body.get("id")
-
- result["port_range_max"] = body.get("port_range_max")
-
- result["port_range_min"] = body.get("port_range_min")
-
- result["protocol"] = body.get("protocol")
-
- result["remote_address_group_id"] = body.get("remote_address_group_id")
-
- result["remote_group_id"] = body.get("remote_group_id")
-
- result["remote_ip_prefix"] = body.get("remote_ip_prefix")
-
- result["security_group_id"] = body.get("security_group_id")
-
- return result
-
-
-def update_properties(module, response, array_index, exclude_output=False):
- r = user_input_parameters(module)
-
- v = navigate_value(response, ["read", "description"], array_index)
- r["description"] = v
-
- v = navigate_value(response, ["read", "direction"], array_index)
- r["direction"] = v
-
- v = navigate_value(response, ["read", "ethertype"], array_index)
- r["ethertype"] = v
-
- v = navigate_value(response, ["read", "port_range_max"], array_index)
- r["port_range_max"] = v
-
- v = navigate_value(response, ["read", "port_range_min"], array_index)
- r["port_range_min"] = v
-
- v = navigate_value(response, ["read", "protocol"], array_index)
- r["protocol"] = v
-
- v = navigate_value(response, ["read", "remote_group_id"], array_index)
- r["remote_group_id"] = v
-
- v = navigate_value(response, ["read", "remote_ip_prefix"], array_index)
- r["remote_ip_prefix"] = v
-
- v = navigate_value(response, ["read", "security_group_id"], array_index)
- r["security_group_id"] = v
-
- return r
-
-
-def send_list_request(module, client, url):
-
- r = None
- try:
- r = client.get(url)
- except HwcClientException as ex:
- msg = ("module(hwc_vpc_security_group_rule): error running "
- "api(list), error: %s" % str(ex))
- module.fail_json(msg=msg)
-
- return navigate_value(r, ["security_group_rules"], None)
-
-
-def _build_identity_object(all_opts):
- result = dict()
-
- v = navigate_value(all_opts, ["description"], None)
- result["description"] = v
-
- v = navigate_value(all_opts, ["direction"], None)
- result["direction"] = v
-
- v = navigate_value(all_opts, ["ethertype"], None)
- result["ethertype"] = v
-
- result["id"] = None
-
- v = navigate_value(all_opts, ["port_range_max"], None)
- result["port_range_max"] = v
-
- v = navigate_value(all_opts, ["port_range_min"], None)
- result["port_range_min"] = v
-
- v = navigate_value(all_opts, ["protocol"], None)
- result["protocol"] = v
-
- result["remote_address_group_id"] = None
-
- v = navigate_value(all_opts, ["remote_group_id"], None)
- result["remote_group_id"] = v
-
- v = navigate_value(all_opts, ["remote_ip_prefix"], None)
- result["remote_ip_prefix"] = v
-
- v = navigate_value(all_opts, ["security_group_id"], None)
- result["security_group_id"] = v
-
- return result
-
-
-def fill_list_resp_body(body):
- result = dict()
-
- result["description"] = body.get("description")
-
- result["direction"] = body.get("direction")
-
- result["ethertype"] = body.get("ethertype")
-
- result["id"] = body.get("id")
-
- result["port_range_max"] = body.get("port_range_max")
-
- result["port_range_min"] = body.get("port_range_min")
-
- result["protocol"] = body.get("protocol")
-
- result["remote_address_group_id"] = body.get("remote_address_group_id")
-
- result["remote_group_id"] = body.get("remote_group_id")
-
- result["remote_ip_prefix"] = body.get("remote_ip_prefix")
-
- result["security_group_id"] = body.get("security_group_id")
-
- return result
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_subnet.py b/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_subnet.py
deleted file mode 100644
index ccf18050..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/huawei/hwc_vpc_subnet.py
+++ /dev/null
@@ -1,734 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright (C) 2019 Huawei
-# GNU General Public License v3.0+ (see COPYING or
-# https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-###############################################################################
-# Documentation
-###############################################################################
-
-DOCUMENTATION = '''
----
-module: hwc_vpc_subnet
-description:
- - subnet management.
-short_description: Creates a resource of Vpc/Subnet in Huawei Cloud
-version_added: '0.2.0'
-author: Huawei Inc. (@huaweicloud)
-requirements:
- - keystoneauth1 >= 3.6.0
-options:
- state:
- description:
- - Whether the given object should exist in Huawei Cloud.
- type: str
- choices: ['present', 'absent']
- default: 'present'
- timeouts:
- description:
- - The timeouts for each operations.
- type: dict
- suboptions:
- create:
- description:
- - The timeouts for create operation.
- type: str
- default: '15m'
- update:
- description:
- - The timeouts for update operation.
- type: str
- default: '15m'
- cidr:
- description:
- - Specifies the subnet CIDR block. The value must be within the VPC
- CIDR block and be in CIDR format. The subnet mask cannot be
- greater than 28. Cannot be changed after creating the subnet.
- type: str
- required: true
- gateway_ip:
- description:
- - Specifies the gateway of the subnet. The value must be an IP
- address in the subnet. Cannot be changed after creating the subnet.
- type: str
- required: true
- name:
- description:
- - Specifies the subnet name. The value is a string of 1 to 64
- characters that can contain letters, digits, underscores C(_),
- hyphens (-), and periods (.).
- type: str
- required: true
- vpc_id:
- description:
- - Specifies the ID of the VPC to which the subnet belongs. Cannot
- be changed after creating the subnet.
- type: str
- required: true
- availability_zone:
- description:
- - Specifies the AZ to which the subnet belongs. Cannot be changed
- after creating the subnet.
- type: str
- required: false
- dhcp_enable:
- description:
- - Specifies whether DHCP is enabled for the subnet. The value can
- be true (enabled) or false(disabled), and default value is true.
- If this parameter is set to false, newly created ECSs cannot
- obtain IP addresses, and usernames and passwords cannot be
- injected using Cloud-init.
- type: bool
- required: false
- dns_address:
- description:
- - Specifies the DNS server addresses for subnet. The address
- in the head will be used first.
- type: list
- elements: str
- required: false
-extends_documentation_fragment:
-- community.general.hwc
-
-'''
-
-EXAMPLES = '''
-# create subnet
-- name: Create vpc
- hwc_network_vpc:
- cidr: "192.168.100.0/24"
- name: "ansible_network_vpc_test"
- register: vpc
-- name: Create subnet
- community.general.hwc_vpc_subnet:
- vpc_id: "{{ vpc.id }}"
- cidr: "192.168.100.0/26"
- gateway_ip: "192.168.100.32"
- name: "ansible_network_subnet_test"
- dhcp_enable: True
-'''
-
-RETURN = '''
- cidr:
- description:
- - Specifies the subnet CIDR block. The value must be within the VPC
- CIDR block and be in CIDR format. The subnet mask cannot be
- greater than 28.
- type: str
- returned: success
- gateway_ip:
- description:
- - Specifies the gateway of the subnet. The value must be an IP
- address in the subnet.
- type: str
- returned: success
- name:
- description:
- - Specifies the subnet name. The value is a string of 1 to 64
- characters that can contain letters, digits, underscores C(_),
- hyphens (-), and periods (.).
- type: str
- returned: success
- vpc_id:
- description:
- - Specifies the ID of the VPC to which the subnet belongs.
- type: str
- returned: success
- availability_zone:
- description:
- - Specifies the AZ to which the subnet belongs.
- type: str
- returned: success
- dhcp_enable:
- description:
- - Specifies whether DHCP is enabled for the subnet. The value can
- be true (enabled) or false(disabled), and default value is true.
- If this parameter is set to false, newly created ECSs cannot
- obtain IP addresses, and usernames and passwords cannot be
- injected using Cloud-init.
- type: bool
- returned: success
- dns_address:
- description:
- - Specifies the DNS server addresses for subnet. The address
- in the head will be used first.
- type: list
- returned: success
-'''
-
-from ansible_collections.community.general.plugins.module_utils.hwc_utils import (
- Config, HwcClientException, HwcClientException404, HwcModule,
- are_different_dicts, build_path, get_region, is_empty_value,
- navigate_value, wait_to_finish)
-
-
-def build_module():
- return HwcModule(
- argument_spec=dict(
- state=dict(default='present', choices=['present', 'absent'],
- type='str'),
- timeouts=dict(type='dict', options=dict(
- create=dict(default='15m', type='str'),
- update=dict(default='15m', type='str'),
- ), default=dict()),
- cidr=dict(type='str', required=True),
- gateway_ip=dict(type='str', required=True),
- name=dict(type='str', required=True),
- vpc_id=dict(type='str', required=True),
- availability_zone=dict(type='str'),
- dhcp_enable=dict(type='bool'),
- dns_address=dict(type='list', elements='str')
- ),
- supports_check_mode=True,
- )
-
-
-def main():
- """Main function"""
-
- module = build_module()
- config = Config(module, "vpc")
-
- try:
- resource = None
- if module.params.get('id'):
- resource = True
- else:
- v = search_resource(config)
- if len(v) > 1:
- raise Exception("Found more than one resource(%s)" % ", ".join([
- navigate_value(i, ["id"]) for i in v]))
-
- if len(v) == 1:
- resource = v[0]
- module.params['id'] = navigate_value(resource, ["id"])
-
- result = {}
- changed = False
- if module.params['state'] == 'present':
- if resource is None:
- if not module.check_mode:
- create(config)
- changed = True
-
- current = read_resource(config, exclude_output=True)
- expect = user_input_parameters(module)
- if are_different_dicts(expect, current):
- if not module.check_mode:
- update(config)
- changed = True
-
- result = read_resource(config)
- result['id'] = module.params.get('id')
- else:
- if resource:
- if not module.check_mode:
- delete(config)
- changed = True
-
- except Exception as ex:
- module.fail_json(msg=str(ex))
-
- else:
- result['changed'] = changed
- module.exit_json(**result)
-
-
-def user_input_parameters(module):
- return {
- "availability_zone": module.params.get("availability_zone"),
- "cidr": module.params.get("cidr"),
- "dhcp_enable": module.params.get("dhcp_enable"),
- "dns_address": module.params.get("dns_address"),
- "gateway_ip": module.params.get("gateway_ip"),
- "name": module.params.get("name"),
- "vpc_id": module.params.get("vpc_id"),
- }
-
-
-def create(config):
- module = config.module
- client = config.client(get_region(module), "vpc", "project")
- timeout = 60 * int(module.params['timeouts']['create'].rstrip('m'))
- opts = user_input_parameters(module)
-
- params = build_create_parameters(opts)
- r = send_create_request(module, params, client)
- obj = async_wait_create(config, r, client, timeout)
- module.params['id'] = navigate_value(obj, ["subnet", "id"])
-
-
-def update(config):
- module = config.module
- client = config.client(get_region(module), "vpc", "project")
- timeout = 60 * int(module.params['timeouts']['update'].rstrip('m'))
- opts = user_input_parameters(module)
-
- params = build_update_parameters(opts)
- if params:
- r = send_update_request(module, params, client)
- async_wait_update(config, r, client, timeout)
-
-
-def delete(config):
- module = config.module
- client = config.client(get_region(module), "vpc", "project")
-
- send_delete_request(module, None, client)
-
- url = build_path(module, "subnets/{id}")
-
- def _refresh_status():
- try:
- client.get(url)
- except HwcClientException404:
- return True, "Done"
-
- except Exception:
- return None, ""
-
- return True, "Pending"
-
- timeout = 60 * int(module.params['timeouts']['create'].rstrip('m'))
- try:
- wait_to_finish(["Done"], ["Pending"], _refresh_status, timeout)
- except Exception as ex:
- module.fail_json(msg="module(hwc_vpc_subnet): error "
- "waiting for api(delete) to "
- "be done, error= %s" % str(ex))
-
-
-def read_resource(config, exclude_output=False):
- module = config.module
- client = config.client(get_region(module), "vpc", "project")
-
- res = {}
-
- r = send_read_request(module, client)
- res["read"] = fill_read_resp_body(r)
-
- return update_properties(module, res, None, exclude_output)
-
-
-def _build_query_link(opts):
- query_link = "?marker={marker}&limit=10"
- v = navigate_value(opts, ["vpc_id"])
- if v:
- query_link += "&vpc_id=" + str(v)
-
- return query_link
-
-
-def search_resource(config):
- module = config.module
- client = config.client(get_region(module), "vpc", "project")
- opts = user_input_parameters(module)
- identity_obj = _build_identity_object(opts)
- query_link = _build_query_link(opts)
- link = "subnets" + query_link
-
- result = []
- p = {'marker': ''}
- while True:
- url = link.format(**p)
- r = send_list_request(module, client, url)
- if not r:
- break
-
- for item in r:
- item = fill_list_resp_body(item)
- if not are_different_dicts(identity_obj, item):
- result.append(item)
-
- if len(result) > 1:
- break
-
- p['marker'] = r[-1].get('id')
-
- return result
-
-
-def build_create_parameters(opts):
- params = dict()
-
- v = navigate_value(opts, ["availability_zone"], None)
- if not is_empty_value(v):
- params["availability_zone"] = v
-
- v = navigate_value(opts, ["cidr"], None)
- if not is_empty_value(v):
- params["cidr"] = v
-
- v = navigate_value(opts, ["dhcp_enable"], None)
- if v is not None:
- params["dhcp_enable"] = v
-
- v = expand_create_dns_list(opts, None)
- if not is_empty_value(v):
- params["dnsList"] = v
-
- v = navigate_value(opts, ["gateway_ip"], None)
- if not is_empty_value(v):
- params["gateway_ip"] = v
-
- v = navigate_value(opts, ["name"], None)
- if not is_empty_value(v):
- params["name"] = v
-
- v = expand_create_primary_dns(opts, None)
- if not is_empty_value(v):
- params["primary_dns"] = v
-
- v = expand_create_secondary_dns(opts, None)
- if not is_empty_value(v):
- params["secondary_dns"] = v
-
- v = navigate_value(opts, ["vpc_id"], None)
- if not is_empty_value(v):
- params["vpc_id"] = v
-
- if not params:
- return params
-
- params = {"subnet": params}
-
- return params
-
-
-def expand_create_dns_list(d, array_index):
- v = navigate_value(d, ["dns_address"], array_index)
- return v if (v and len(v) > 2) else []
-
-
-def expand_create_primary_dns(d, array_index):
- v = navigate_value(d, ["dns_address"], array_index)
- return v[0] if v else ""
-
-
-def expand_create_secondary_dns(d, array_index):
- v = navigate_value(d, ["dns_address"], array_index)
- return v[1] if (v and len(v) > 1) else ""
-
-
-def send_create_request(module, params, client):
- url = "subnets"
- try:
- r = client.post(url, params)
- except HwcClientException as ex:
- msg = ("module(hwc_vpc_subnet): error running "
- "api(create), error: %s" % str(ex))
- module.fail_json(msg=msg)
-
- return r
-
-
-def async_wait_create(config, result, client, timeout):
- module = config.module
-
- path_parameters = {
- "subnet_id": ["subnet", "id"],
- }
- data = dict((key, navigate_value(result, path))
- for key, path in path_parameters.items())
-
- url = build_path(module, "subnets/{subnet_id}", data)
-
- def _query_status():
- r = None
- try:
- r = client.get(url, timeout=timeout)
- except HwcClientException:
- return None, ""
-
- try:
- s = navigate_value(r, ["subnet", "status"])
- return r, s
- except Exception:
- return None, ""
-
- try:
- return wait_to_finish(
- ["ACTIVE"],
- ["UNKNOWN"],
- _query_status, timeout)
- except Exception as ex:
- module.fail_json(msg="module(hwc_vpc_subnet): error "
- "waiting for api(create) to "
- "be done, error= %s" % str(ex))
-
-
-def build_update_parameters(opts):
- params = dict()
-
- v = navigate_value(opts, ["dhcp_enable"], None)
- if v is not None:
- params["dhcp_enable"] = v
-
- v = expand_update_dns_list(opts, None)
- if v is not None:
- params["dnsList"] = v
-
- v = navigate_value(opts, ["name"], None)
- if not is_empty_value(v):
- params["name"] = v
-
- v = expand_update_primary_dns(opts, None)
- if v is not None:
- params["primary_dns"] = v
-
- v = expand_update_secondary_dns(opts, None)
- if v is not None:
- params["secondary_dns"] = v
-
- if not params:
- return params
-
- params = {"subnet": params}
-
- return params
-
-
-def expand_update_dns_list(d, array_index):
- v = navigate_value(d, ["dns_address"], array_index)
- if v:
- if len(v) > 2:
- return v
- return None
- return []
-
-
-def expand_update_primary_dns(d, array_index):
- v = navigate_value(d, ["dns_address"], array_index)
- return v[0] if v else ""
-
-
-def expand_update_secondary_dns(d, array_index):
- v = navigate_value(d, ["dns_address"], array_index)
- return v[1] if (v and len(v) > 1) else ""
-
-
-def send_update_request(module, params, client):
- url = build_path(module, "vpcs/{vpc_id}/subnets/{id}")
-
- try:
- r = client.put(url, params)
- except HwcClientException as ex:
- msg = ("module(hwc_vpc_subnet): error running "
- "api(update), error: %s" % str(ex))
- module.fail_json(msg=msg)
-
- return r
-
-
-def async_wait_update(config, result, client, timeout):
- module = config.module
-
- path_parameters = {
- "subnet_id": ["subnet", "id"],
- }
- data = dict((key, navigate_value(result, path))
- for key, path in path_parameters.items())
-
- url = build_path(module, "subnets/{subnet_id}", data)
-
- def _query_status():
- r = None
- try:
- r = client.get(url, timeout=timeout)
- except HwcClientException:
- return None, ""
-
- try:
- s = navigate_value(r, ["subnet", "status"])
- return r, s
- except Exception:
- return None, ""
-
- try:
- return wait_to_finish(
- ["ACTIVE"],
- ["UNKNOWN"],
- _query_status, timeout)
- except Exception as ex:
- module.fail_json(msg="module(hwc_vpc_subnet): error "
- "waiting for api(update) to "
- "be done, error= %s" % str(ex))
-
-
-def send_delete_request(module, params, client):
- url = build_path(module, "vpcs/{vpc_id}/subnets/{id}")
-
- try:
- r = client.delete(url, params)
- except HwcClientException as ex:
- msg = ("module(hwc_vpc_subnet): error running "
- "api(delete), error: %s" % str(ex))
- module.fail_json(msg=msg)
-
- return r
-
-
-def send_read_request(module, client):
- url = build_path(module, "subnets/{id}")
-
- r = None
- try:
- r = client.get(url)
- except HwcClientException as ex:
- msg = ("module(hwc_vpc_subnet): error running "
- "api(read), error: %s" % str(ex))
- module.fail_json(msg=msg)
-
- return navigate_value(r, ["subnet"], None)
-
-
-def fill_read_resp_body(body):
- result = dict()
-
- result["availability_zone"] = body.get("availability_zone")
-
- result["cidr"] = body.get("cidr")
-
- result["dhcp_enable"] = body.get("dhcp_enable")
-
- result["dnsList"] = body.get("dnsList")
-
- result["gateway_ip"] = body.get("gateway_ip")
-
- result["id"] = body.get("id")
-
- result["name"] = body.get("name")
-
- result["neutron_network_id"] = body.get("neutron_network_id")
-
- result["neutron_subnet_id"] = body.get("neutron_subnet_id")
-
- result["primary_dns"] = body.get("primary_dns")
-
- result["secondary_dns"] = body.get("secondary_dns")
-
- result["status"] = body.get("status")
-
- result["vpc_id"] = body.get("vpc_id")
-
- return result
-
-
-def update_properties(module, response, array_index, exclude_output=False):
- r = user_input_parameters(module)
-
- v = navigate_value(response, ["read", "availability_zone"], array_index)
- r["availability_zone"] = v
-
- v = navigate_value(response, ["read", "cidr"], array_index)
- r["cidr"] = v
-
- v = navigate_value(response, ["read", "dhcp_enable"], array_index)
- r["dhcp_enable"] = v
-
- v = navigate_value(response, ["read", "dnsList"], array_index)
- r["dns_address"] = v
-
- v = navigate_value(response, ["read", "gateway_ip"], array_index)
- r["gateway_ip"] = v
-
- v = navigate_value(response, ["read", "name"], array_index)
- r["name"] = v
-
- v = navigate_value(response, ["read", "vpc_id"], array_index)
- r["vpc_id"] = v
-
- return r
-
-
-def send_list_request(module, client, url):
-
- r = None
- try:
- r = client.get(url)
- except HwcClientException as ex:
- msg = ("module(hwc_vpc_subnet): error running "
- "api(list), error: %s" % str(ex))
- module.fail_json(msg=msg)
-
- return navigate_value(r, ["subnets"], None)
-
-
-def _build_identity_object(all_opts):
- result = dict()
-
- v = navigate_value(all_opts, ["availability_zone"], None)
- result["availability_zone"] = v
-
- v = navigate_value(all_opts, ["cidr"], None)
- result["cidr"] = v
-
- v = navigate_value(all_opts, ["dhcp_enable"], None)
- result["dhcp_enable"] = v
-
- v = navigate_value(all_opts, ["dns_address"], None)
- result["dnsList"] = v
-
- v = navigate_value(all_opts, ["gateway_ip"], None)
- result["gateway_ip"] = v
-
- result["id"] = None
-
- v = navigate_value(all_opts, ["name"], None)
- result["name"] = v
-
- result["neutron_network_id"] = None
-
- result["neutron_subnet_id"] = None
-
- result["primary_dns"] = None
-
- result["secondary_dns"] = None
-
- result["status"] = None
-
- v = navigate_value(all_opts, ["vpc_id"], None)
- result["vpc_id"] = v
-
- return result
-
-
-def fill_list_resp_body(body):
- result = dict()
-
- result["availability_zone"] = body.get("availability_zone")
-
- result["cidr"] = body.get("cidr")
-
- result["dhcp_enable"] = body.get("dhcp_enable")
-
- result["dnsList"] = body.get("dnsList")
-
- result["gateway_ip"] = body.get("gateway_ip")
-
- result["id"] = body.get("id")
-
- result["name"] = body.get("name")
-
- result["neutron_network_id"] = body.get("neutron_network_id")
-
- result["neutron_subnet_id"] = body.get("neutron_subnet_id")
-
- result["primary_dns"] = body.get("primary_dns")
-
- result["secondary_dns"] = body.get("secondary_dns")
-
- result["status"] = body.get("status")
-
- result["vpc_id"] = body.get("vpc_id")
-
- return result
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/linode/linode.py b/ansible_collections/community/general/plugins/modules/cloud/linode/linode.py
deleted file mode 100644
index c627fb70..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/linode/linode.py
+++ /dev/null
@@ -1,688 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-DOCUMENTATION = '''
----
-module: linode
-short_description: Manage instances on the Linode Public Cloud
-description:
- - Manage Linode Public Cloud instances and optionally wait for it to be 'running'.
-options:
- state:
- description:
- - Indicate desired state of the resource
- choices: [ absent, active, deleted, present, restarted, started, stopped ]
- default: present
- type: str
- api_key:
- description:
- - Linode API key.
- - C(LINODE_API_KEY) env variable can be used instead.
- type: str
- required: yes
- name:
- description:
- - Name to give the instance (alphanumeric, dashes, underscore).
- - To keep sanity on the Linode Web Console, name is prepended with C(LinodeID-).
- required: true
- type: str
- displaygroup:
- description:
- - Add the instance to a Display Group in Linode Manager.
- type: str
- linode_id:
- description:
- - Unique ID of a linode server. This value is read-only in the sense that
- if you specify it on creation of a Linode it will not be used. The
- Linode API generates these IDs and we can those generated value here to
- reference a Linode more specifically. This is useful for idempotence.
- aliases: [ lid ]
- type: int
- additional_disks:
- description:
- - List of dictionaries for creating additional disks that are added to the Linode configuration settings.
- - Dictionary takes Size, Label, Type. Size is in MB.
- type: list
- elements: dict
- alert_bwin_enabled:
- description:
- - Set status of bandwidth in alerts.
- type: bool
- alert_bwin_threshold:
- description:
- - Set threshold in MB of bandwidth in alerts.
- type: int
- alert_bwout_enabled:
- description:
- - Set status of bandwidth out alerts.
- type: bool
- alert_bwout_threshold:
- description:
- - Set threshold in MB of bandwidth out alerts.
- type: int
- alert_bwquota_enabled:
- description:
- - Set status of bandwidth quota alerts as percentage of network transfer quota.
- type: bool
- alert_bwquota_threshold:
- description:
- - Set threshold in MB of bandwidth quota alerts.
- type: int
- alert_cpu_enabled:
- description:
- - Set status of receiving CPU usage alerts.
- type: bool
- alert_cpu_threshold:
- description:
- - Set percentage threshold for receiving CPU usage alerts. Each CPU core adds 100% to total.
- type: int
- alert_diskio_enabled:
- description:
- - Set status of receiving disk IO alerts.
- type: bool
- alert_diskio_threshold:
- description:
- - Set threshold for average IO ops/sec over 2 hour period.
- type: int
- backupsenabled:
- description:
- - Deprecated parameter, it will be removed in community.general C(5.0.0).
- - To enable backups pass values to either I(backupweeklyday) or I(backupwindow).
- type: int
- backupweeklyday:
- description:
- - Day of the week to take backups.
- type: int
- backupwindow:
- description:
- - The time window in which backups will be taken.
- type: int
- plan:
- description:
- - plan to use for the instance (Linode plan)
- type: int
- payment_term:
- description:
- - payment term to use for the instance (payment term in months)
- default: 1
- choices: [ 1, 12, 24 ]
- type: int
- password:
- description:
- - root password to apply to a new server (auto generated if missing)
- type: str
- private_ip:
- description:
- - Add private IPv4 address when Linode is created.
- - Default is C(false).
- type: bool
- ssh_pub_key:
- description:
- - SSH public key applied to root user
- type: str
- swap:
- description:
- - swap size in MB
- default: 512
- type: int
- distribution:
- description:
- - distribution to use for the instance (Linode Distribution)
- type: int
- datacenter:
- description:
- - datacenter to create an instance in (Linode Datacenter)
- type: int
- kernel_id:
- description:
- - kernel to use for the instance (Linode Kernel)
- type: int
- wait:
- description:
- - wait for the instance to be in state C(running) before returning
- type: bool
- default: true
- wait_timeout:
- description:
- - how long before wait gives up, in seconds
- default: 300
- type: int
- watchdog:
- description:
- - Set status of Lassie watchdog.
- type: bool
- default: "True"
-requirements:
- - python >= 2.6
- - linode-python
-author:
-- Vincent Viallet (@zbal)
-notes:
- - Please note, linode-python does not have python 3 support.
- - This module uses the now deprecated v3 of the Linode API.
- - Please review U(https://www.linode.com/api/linode) for determining the required parameters.
-'''
-
-EXAMPLES = '''
-
-- name: Create a new Linode
- community.general.linode:
- name: linode-test1
- plan: 1
- datacenter: 7
- distribution: 129
- state: present
- register: linode_creation
-
-- name: Create a server with a private IP Address
- community.general.linode:
- module: linode
- api_key: 'longStringFromLinodeApi'
- name: linode-test1
- plan: 1
- datacenter: 2
- distribution: 99
- password: 'superSecureRootPassword'
- private_ip: yes
- ssh_pub_key: 'ssh-rsa qwerty'
- swap: 768
- wait: yes
- wait_timeout: 600
- state: present
- delegate_to: localhost
- register: linode_creation
-
-- name: Fully configure new server
- community.general.linode:
- api_key: 'longStringFromLinodeApi'
- name: linode-test1
- plan: 4
- datacenter: 2
- distribution: 99
- kernel_id: 138
- password: 'superSecureRootPassword'
- private_ip: yes
- ssh_pub_key: 'ssh-rsa qwerty'
- swap: 768
- wait: yes
- wait_timeout: 600
- state: present
- alert_bwquota_enabled: True
- alert_bwquota_threshold: 80
- alert_bwin_enabled: True
- alert_bwin_threshold: 10
- alert_cpu_enabled: True
- alert_cpu_threshold: 210
- alert_bwout_enabled: True
- alert_bwout_threshold: 10
- alert_diskio_enabled: True
- alert_diskio_threshold: 10000
- backupweeklyday: 1
- backupwindow: 2
- displaygroup: 'test'
- additional_disks:
- - {Label: 'disk1', Size: 2500, Type: 'raw'}
- - {Label: 'newdisk', Size: 2000}
- watchdog: True
- delegate_to: localhost
- register: linode_creation
-
-- name: Ensure a running server (create if missing)
- community.general.linode:
- api_key: 'longStringFromLinodeApi'
- name: linode-test1
- plan: 1
- datacenter: 2
- distribution: 99
- password: 'superSecureRootPassword'
- ssh_pub_key: 'ssh-rsa qwerty'
- swap: 768
- wait: yes
- wait_timeout: 600
- state: present
- delegate_to: localhost
- register: linode_creation
-
-- name: Delete a server
- community.general.linode:
- api_key: 'longStringFromLinodeApi'
- name: linode-test1
- linode_id: "{{ linode_creation.instance.id }}"
- state: absent
- delegate_to: localhost
-
-- name: Stop a server
- community.general.linode:
- api_key: 'longStringFromLinodeApi'
- name: linode-test1
- linode_id: "{{ linode_creation.instance.id }}"
- state: stopped
- delegate_to: localhost
-
-- name: Reboot a server
- community.general.linode:
- api_key: 'longStringFromLinodeApi'
- name: linode-test1
- linode_id: "{{ linode_creation.instance.id }}"
- state: restarted
- delegate_to: localhost
-'''
-
-import time
-import traceback
-
-LINODE_IMP_ERR = None
-try:
- from linode import api as linode_api
- HAS_LINODE = True
-except ImportError:
- LINODE_IMP_ERR = traceback.format_exc()
- HAS_LINODE = False
-
-from ansible.module_utils.basic import AnsibleModule, missing_required_lib, env_fallback
-
-
-def randompass():
- '''
- Generate a long random password that comply to Linode requirements
- '''
- # Linode API currently requires the following:
- # It must contain at least two of these four character classes:
- # lower case letters - upper case letters - numbers - punctuation
- # we play it safe :)
- import random
- import string
- # as of python 2.4, this reseeds the PRNG from urandom
- random.seed()
- lower = ''.join(random.choice(string.ascii_lowercase) for x in range(6))
- upper = ''.join(random.choice(string.ascii_uppercase) for x in range(6))
- number = ''.join(random.choice(string.digits) for x in range(6))
- punct = ''.join(random.choice(string.punctuation) for x in range(6))
- p = lower + upper + number + punct
- return ''.join(random.sample(p, len(p)))
-
-
-def getInstanceDetails(api, server):
- '''
- Return the details of an instance, populating IPs, etc.
- '''
- instance = {'id': server['LINODEID'],
- 'name': server['LABEL'],
- 'public': [],
- 'private': []}
-
- # Populate with ips
- for ip in api.linode_ip_list(LinodeId=server['LINODEID']):
- if ip['ISPUBLIC'] and 'ipv4' not in instance:
- instance['ipv4'] = ip['IPADDRESS']
- instance['fqdn'] = ip['RDNS_NAME']
- if ip['ISPUBLIC']:
- instance['public'].append({'ipv4': ip['IPADDRESS'],
- 'fqdn': ip['RDNS_NAME'],
- 'ip_id': ip['IPADDRESSID']})
- else:
- instance['private'].append({'ipv4': ip['IPADDRESS'],
- 'fqdn': ip['RDNS_NAME'],
- 'ip_id': ip['IPADDRESSID']})
- return instance
-
-
-def linodeServers(module, api, state, name,
- displaygroup, plan, additional_disks, distribution,
- datacenter, kernel_id, linode_id, payment_term, password,
- private_ip, ssh_pub_key, swap, wait, wait_timeout, watchdog, **kwargs):
- instances = []
- changed = False
- new_server = False
- servers = []
- disks = []
- configs = []
- jobs = []
-
- # See if we can match an existing server details with the provided linode_id
- if linode_id:
- # For the moment we only consider linode_id as criteria for match
- # Later we can use more (size, name, etc.) and update existing
- servers = api.linode_list(LinodeId=linode_id)
- # Attempt to fetch details about disks and configs only if servers are
- # found with linode_id
- if servers:
- disks = api.linode_disk_list(LinodeId=linode_id)
- configs = api.linode_config_list(LinodeId=linode_id)
-
- # Act on the state
- if state in ('active', 'present', 'started'):
- # TODO: validate all the plan / distribution / datacenter are valid
-
- # Multi step process/validation:
- # - need linode_id (entity)
- # - need disk_id for linode_id - create disk from distrib
- # - need config_id for linode_id - create config (need kernel)
-
- # Any create step triggers a job that need to be waited for.
- if not servers:
- for arg in (name, plan, distribution, datacenter):
- if not arg:
- module.fail_json(msg='%s is required for %s state' % (arg, state))
- # Create linode entity
- new_server = True
-
- # Get size of all individually listed disks to subtract from Distribution disk
- used_disk_space = 0 if additional_disks is None else sum(disk['Size'] for disk in additional_disks)
-
- try:
- res = api.linode_create(DatacenterID=datacenter, PlanID=plan,
- PaymentTerm=payment_term)
- linode_id = res['LinodeID']
- # Update linode Label to match name
- api.linode_update(LinodeId=linode_id, Label='%s-%s' % (linode_id, name))
- # Update Linode with Ansible configuration options
- api.linode_update(LinodeId=linode_id, LPM_DISPLAYGROUP=displaygroup, WATCHDOG=watchdog, **kwargs)
- # Save server
- servers = api.linode_list(LinodeId=linode_id)
- except Exception as e:
- module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'])
-
- # Add private IP to Linode
- if private_ip:
- try:
- res = api.linode_ip_addprivate(LinodeID=linode_id)
- except Exception as e:
- module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'], exception=traceback.format_exc())
-
- if not disks:
- for arg in (name, linode_id, distribution):
- if not arg:
- module.fail_json(msg='%s is required for %s state' % (arg, state))
- # Create disks (1 from distrib, 1 for SWAP)
- new_server = True
- try:
- if not password:
- # Password is required on creation, if not provided generate one
- password = randompass()
- if not swap:
- swap = 512
- # Create data disk
- size = servers[0]['TOTALHD'] - used_disk_space - swap
-
- if ssh_pub_key:
- res = api.linode_disk_createfromdistribution(
- LinodeId=linode_id, DistributionID=distribution,
- rootPass=password, rootSSHKey=ssh_pub_key,
- Label='%s data disk (lid: %s)' % (name, linode_id),
- Size=size)
- else:
- res = api.linode_disk_createfromdistribution(
- LinodeId=linode_id, DistributionID=distribution,
- rootPass=password,
- Label='%s data disk (lid: %s)' % (name, linode_id),
- Size=size)
- jobs.append(res['JobID'])
- # Create SWAP disk
- res = api.linode_disk_create(LinodeId=linode_id, Type='swap',
- Label='%s swap disk (lid: %s)' % (name, linode_id),
- Size=swap)
- # Create individually listed disks at specified size
- if additional_disks:
- for disk in additional_disks:
- # If a disk Type is not passed in, default to ext4
- if disk.get('Type') is None:
- disk['Type'] = 'ext4'
- res = api.linode_disk_create(LinodeID=linode_id, Label=disk['Label'], Size=disk['Size'], Type=disk['Type'])
-
- jobs.append(res['JobID'])
- except Exception as e:
- # TODO: destroy linode ?
- module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'], exception=traceback.format_exc())
-
- if not configs:
- for arg in (name, linode_id, distribution):
- if not arg:
- module.fail_json(msg='%s is required for %s state' % (arg, state))
-
- # Check architecture
- for distrib in api.avail_distributions():
- if distrib['DISTRIBUTIONID'] != distribution:
- continue
- arch = '32'
- if distrib['IS64BIT']:
- arch = '64'
- break
-
- # Get latest kernel matching arch if kernel_id is not specified
- if not kernel_id:
- for kernel in api.avail_kernels():
- if not kernel['LABEL'].startswith('Latest %s' % arch):
- continue
- kernel_id = kernel['KERNELID']
- break
-
- # Get disk list
- disks_id = []
- for disk in api.linode_disk_list(LinodeId=linode_id):
- if disk['TYPE'] == 'ext3':
- disks_id.insert(0, str(disk['DISKID']))
- continue
- disks_id.append(str(disk['DISKID']))
- # Trick to get the 9 items in the list
- while len(disks_id) < 9:
- disks_id.append('')
- disks_list = ','.join(disks_id)
-
- # Create config
- new_server = True
- try:
- api.linode_config_create(LinodeId=linode_id, KernelId=kernel_id,
- Disklist=disks_list, Label='%s config' % name)
- configs = api.linode_config_list(LinodeId=linode_id)
- except Exception as e:
- module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'], exception=traceback.format_exc())
-
- # Start / Ensure servers are running
- for server in servers:
- # Refresh server state
- server = api.linode_list(LinodeId=server['LINODEID'])[0]
- # Ensure existing servers are up and running, boot if necessary
- if server['STATUS'] != 1:
- res = api.linode_boot(LinodeId=linode_id)
- jobs.append(res['JobID'])
- changed = True
-
- # wait here until the instances are up
- wait_timeout = time.time() + wait_timeout
- while wait and wait_timeout > time.time():
- # refresh the server details
- server = api.linode_list(LinodeId=server['LINODEID'])[0]
- # status:
- # -2: Boot failed
- # 1: Running
- if server['STATUS'] in (-2, 1):
- break
- time.sleep(5)
- if wait and wait_timeout <= time.time():
- # waiting took too long
- module.fail_json(msg='Timeout waiting on %s (lid: %s)' % (server['LABEL'], server['LINODEID']))
- # Get a fresh copy of the server details
- server = api.linode_list(LinodeId=server['LINODEID'])[0]
- if server['STATUS'] == -2:
- module.fail_json(msg='%s (lid: %s) failed to boot' %
- (server['LABEL'], server['LINODEID']))
- # From now on we know the task is a success
- # Build instance report
- instance = getInstanceDetails(api, server)
- # depending on wait flag select the status
- if wait:
- instance['status'] = 'Running'
- else:
- instance['status'] = 'Starting'
-
- # Return the root password if this is a new box and no SSH key
- # has been provided
- if new_server and not ssh_pub_key:
- instance['password'] = password
- instances.append(instance)
-
- elif state in ('stopped',):
- if not servers:
- module.fail_json(msg='Server (lid: %s) not found' % (linode_id))
-
- for server in servers:
- instance = getInstanceDetails(api, server)
- if server['STATUS'] != 2:
- try:
- res = api.linode_shutdown(LinodeId=linode_id)
- except Exception as e:
- module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'], exception=traceback.format_exc())
- instance['status'] = 'Stopping'
- changed = True
- else:
- instance['status'] = 'Stopped'
- instances.append(instance)
-
- elif state in ('restarted',):
- if not servers:
- module.fail_json(msg='Server (lid: %s) not found' % (linode_id))
-
- for server in servers:
- instance = getInstanceDetails(api, server)
- try:
- res = api.linode_reboot(LinodeId=server['LINODEID'])
- except Exception as e:
- module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'], exception=traceback.format_exc())
- instance['status'] = 'Restarting'
- changed = True
- instances.append(instance)
-
- elif state in ('absent', 'deleted'):
- for server in servers:
- instance = getInstanceDetails(api, server)
- try:
- api.linode_delete(LinodeId=server['LINODEID'], skipChecks=True)
- except Exception as e:
- module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'], exception=traceback.format_exc())
- instance['status'] = 'Deleting'
- changed = True
- instances.append(instance)
-
- # Ease parsing if only 1 instance
- if len(instances) == 1:
- module.exit_json(changed=changed, instance=instances[0])
-
- module.exit_json(changed=changed, instances=instances)
-
-
-def main():
- module = AnsibleModule(
- argument_spec=dict(
- state=dict(type='str', default='present',
- choices=['absent', 'active', 'deleted', 'present', 'restarted', 'started', 'stopped']),
- api_key=dict(type='str', no_log=True, required=True, fallback=(env_fallback, ['LINODE_API_KEY'])),
- name=dict(type='str', required=True),
- alert_bwin_enabled=dict(type='bool'),
- alert_bwin_threshold=dict(type='int'),
- alert_bwout_enabled=dict(type='bool'),
- alert_bwout_threshold=dict(type='int'),
- alert_bwquota_enabled=dict(type='bool'),
- alert_bwquota_threshold=dict(type='int'),
- alert_cpu_enabled=dict(type='bool'),
- alert_cpu_threshold=dict(type='int'),
- alert_diskio_enabled=dict(type='bool'),
- alert_diskio_threshold=dict(type='int'),
- backupsenabled=dict(type='int', removed_in_version='5.0.0', removed_from_collection='community.general'),
- backupweeklyday=dict(type='int'),
- backupwindow=dict(type='int'),
- displaygroup=dict(type='str', default=''),
- plan=dict(type='int'),
- additional_disks=dict(type='list', elements='dict'),
- distribution=dict(type='int'),
- datacenter=dict(type='int'),
- kernel_id=dict(type='int'),
- linode_id=dict(type='int', aliases=['lid']),
- payment_term=dict(type='int', default=1, choices=[1, 12, 24]),
- password=dict(type='str', no_log=True),
- private_ip=dict(type='bool'),
- ssh_pub_key=dict(type='str'),
- swap=dict(type='int', default=512),
- wait=dict(type='bool', default=True),
- wait_timeout=dict(type='int', default=300),
- watchdog=dict(type='bool', default=True),
- ),
- required_if=[
- ('state', 'restarted', ['linode_id']),
- ('state', 'stopped', ['linode_id']),
- ]
- )
-
- if not HAS_LINODE:
- module.fail_json(msg=missing_required_lib('linode-python'), exception=LINODE_IMP_ERR)
-
- state = module.params.get('state')
- api_key = module.params.get('api_key')
- name = module.params.get('name')
- alert_bwin_enabled = module.params.get('alert_bwin_enabled')
- alert_bwin_threshold = module.params.get('alert_bwin_threshold')
- alert_bwout_enabled = module.params.get('alert_bwout_enabled')
- alert_bwout_threshold = module.params.get('alert_bwout_threshold')
- alert_bwquota_enabled = module.params.get('alert_bwquota_enabled')
- alert_bwquota_threshold = module.params.get('alert_bwquota_threshold')
- alert_cpu_enabled = module.params.get('alert_cpu_enabled')
- alert_cpu_threshold = module.params.get('alert_cpu_threshold')
- alert_diskio_enabled = module.params.get('alert_diskio_enabled')
- alert_diskio_threshold = module.params.get('alert_diskio_threshold')
- backupweeklyday = module.params.get('backupweeklyday')
- backupwindow = module.params.get('backupwindow')
- displaygroup = module.params.get('displaygroup')
- plan = module.params.get('plan')
- additional_disks = module.params.get('additional_disks')
- distribution = module.params.get('distribution')
- datacenter = module.params.get('datacenter')
- kernel_id = module.params.get('kernel_id')
- linode_id = module.params.get('linode_id')
- payment_term = module.params.get('payment_term')
- password = module.params.get('password')
- private_ip = module.params.get('private_ip')
- ssh_pub_key = module.params.get('ssh_pub_key')
- swap = module.params.get('swap')
- wait = module.params.get('wait')
- wait_timeout = module.params.get('wait_timeout')
- watchdog = int(module.params.get('watchdog'))
-
- check_items = dict(
- alert_bwin_enabled=alert_bwin_enabled,
- alert_bwin_threshold=alert_bwin_threshold,
- alert_bwout_enabled=alert_bwout_enabled,
- alert_bwout_threshold=alert_bwout_threshold,
- alert_bwquota_enabled=alert_bwquota_enabled,
- alert_bwquota_threshold=alert_bwquota_threshold,
- alert_cpu_enabled=alert_cpu_enabled,
- alert_cpu_threshold=alert_cpu_threshold,
- alert_diskio_enabled=alert_diskio_enabled,
- alert_diskio_threshold=alert_diskio_threshold,
- backupweeklyday=backupweeklyday,
- backupwindow=backupwindow,
- )
-
- kwargs = dict((k, v) for k, v in check_items.items() if v is not None)
-
- # setup the auth
- try:
- api = linode_api.Api(api_key)
- api.test_echo()
- except Exception as e:
- module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'], exception=traceback.format_exc())
-
- linodeServers(module, api, state, name,
- displaygroup, plan,
- additional_disks, distribution, datacenter, kernel_id, linode_id,
- payment_term, password, private_ip, ssh_pub_key, swap, wait,
- wait_timeout, watchdog, **kwargs)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/linode/linode_v4.py b/ansible_collections/community/general/plugins/modules/cloud/linode/linode_v4.py
deleted file mode 100644
index fcf3725b..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/linode/linode_v4.py
+++ /dev/null
@@ -1,312 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# Copyright (c) 2017 Ansible Project
-# GNU General Public License v3.0+
-# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-DOCUMENTATION = '''
----
-module: linode_v4
-short_description: Manage instances on the Linode cloud.
-description: Manage instances on the Linode cloud.
-requirements:
- - python >= 2.7
- - linode_api4 >= 2.0.0
-author:
- - Luke Murphy (@decentral1se)
-notes:
- - No Linode resizing is currently implemented. This module will, in time,
- replace the current Linode module which uses deprecated API bindings on the
- Linode side.
-options:
- region:
- description:
- - The region of the instance. This is a required parameter only when
- creating Linode instances. See
- U(https://www.linode.com/docs/api/regions/).
- type: str
- image:
- description:
- - The image of the instance. This is a required parameter only when
- creating Linode instances. See
- U(https://www.linode.com/docs/api/images/).
- type: str
- type:
- description:
- - The type of the instance. This is a required parameter only when
- creating Linode instances. See
- U(https://www.linode.com/docs/api/linode-types/).
- type: str
- label:
- description:
- - The instance label. This label is used as the main determiner for
- idempotence for the module and is therefore mandatory.
- type: str
- required: true
- group:
- description:
- - The group that the instance should be marked under. Please note, that
- group labelling is deprecated but still supported. The encouraged
- method for marking instances is to use tags.
- type: str
- private_ip:
- description:
- - If C(true), the created Linode will have private networking enabled and
- assigned a private IPv4 address.
- type: bool
- default: false
- version_added: 3.0.0
- tags:
- description:
- - The tags that the instance should be marked under. See
- U(https://www.linode.com/docs/api/tags/).
- type: list
- elements: str
- root_pass:
- description:
- - The password for the root user. If not specified, one will be
- generated. This generated password will be available in the task
- success JSON.
- type: str
- authorized_keys:
- description:
- - A list of SSH public key parts to deploy for the root user.
- type: list
- elements: str
- state:
- description:
- - The desired instance state.
- type: str
- choices:
- - present
- - absent
- required: true
- access_token:
- description:
- - The Linode API v4 access token. It may also be specified by exposing
- the C(LINODE_ACCESS_TOKEN) environment variable. See
- U(https://www.linode.com/docs/api#access-and-authentication).
- required: true
- type: str
- stackscript_id:
- description:
- - The numeric ID of the StackScript to use when creating the instance.
- See U(https://www.linode.com/docs/api/stackscripts/).
- type: int
- version_added: 1.3.0
- stackscript_data:
- description:
- - An object containing arguments to any User Defined Fields present in
- the StackScript used when creating the instance.
- Only valid when a stackscript_id is provided.
- See U(https://www.linode.com/docs/api/stackscripts/).
- type: dict
- version_added: 1.3.0
-'''
-
-EXAMPLES = """
-- name: Create a new Linode.
- community.general.linode_v4:
- label: new-linode
- type: g6-nanode-1
- region: eu-west
- image: linode/debian9
- root_pass: passw0rd
- authorized_keys:
- - "ssh-rsa ..."
- stackscript_id: 1337
- stackscript_data:
- variable: value
- state: present
-
-- name: Delete that new Linode.
- community.general.linode_v4:
- label: new-linode
- state: absent
-"""
-
-RETURN = """
-instance:
- description: The instance description in JSON serialized form.
- returned: Always.
- type: dict
- sample: {
- "root_pass": "foobar", # if auto-generated
- "alerts": {
- "cpu": 90,
- "io": 10000,
- "network_in": 10,
- "network_out": 10,
- "transfer_quota": 80
- },
- "backups": {
- "enabled": false,
- "schedule": {
- "day": null,
- "window": null
- }
- },
- "created": "2018-09-26T08:12:33",
- "group": "Foobar Group",
- "hypervisor": "kvm",
- "id": 10480444,
- "image": "linode/centos7",
- "ipv4": [
- "130.132.285.233"
- ],
- "ipv6": "2a82:7e00::h03c:46ff:fe04:5cd2/64",
- "label": "lin-foo",
- "region": "eu-west",
- "specs": {
- "disk": 25600,
- "memory": 1024,
- "transfer": 1000,
- "vcpus": 1
- },
- "status": "running",
- "tags": [],
- "type": "g6-nanode-1",
- "updated": "2018-09-26T10:10:14",
- "watchdog_enabled": true
- }
-"""
-
-import traceback
-
-from ansible.module_utils.basic import AnsibleModule, env_fallback, missing_required_lib
-from ansible_collections.community.general.plugins.module_utils.linode import get_user_agent
-
-LINODE_IMP_ERR = None
-try:
- from linode_api4 import Instance, LinodeClient
- HAS_LINODE_DEPENDENCY = True
-except ImportError:
- LINODE_IMP_ERR = traceback.format_exc()
- HAS_LINODE_DEPENDENCY = False
-
-
-def create_linode(module, client, **kwargs):
- """Creates a Linode instance and handles return format."""
- if kwargs['root_pass'] is None:
- kwargs.pop('root_pass')
-
- try:
- response = client.linode.instance_create(**kwargs)
- except Exception as exception:
- module.fail_json(msg='Unable to query the Linode API. Saw: %s' % exception)
-
- try:
- if isinstance(response, tuple):
- instance, root_pass = response
- instance_json = instance._raw_json
- instance_json.update({'root_pass': root_pass})
- return instance_json
- else:
- return response._raw_json
- except TypeError:
- module.fail_json(msg='Unable to parse Linode instance creation response. Please raise a bug against this'
- ' module on https://github.com/ansible-collections/community.general/issues'
- )
-
-
-def maybe_instance_from_label(module, client):
- """Try to retrieve an instance based on a label."""
- try:
- label = module.params['label']
- result = client.linode.instances(Instance.label == label)
- return result[0]
- except IndexError:
- return None
- except Exception as exception:
- module.fail_json(msg='Unable to query the Linode API. Saw: %s' % exception)
-
-
-def initialise_module():
- """Initialise the module parameter specification."""
- return AnsibleModule(
- argument_spec=dict(
- label=dict(type='str', required=True),
- state=dict(
- type='str',
- required=True,
- choices=['present', 'absent']
- ),
- access_token=dict(
- type='str',
- required=True,
- no_log=True,
- fallback=(env_fallback, ['LINODE_ACCESS_TOKEN']),
- ),
- authorized_keys=dict(type='list', elements='str', no_log=False),
- group=dict(type='str'),
- image=dict(type='str'),
- private_ip=dict(type='bool', default=False),
- region=dict(type='str'),
- root_pass=dict(type='str', no_log=True),
- tags=dict(type='list', elements='str'),
- type=dict(type='str'),
- stackscript_id=dict(type='int'),
- stackscript_data=dict(type='dict'),
- ),
- supports_check_mode=False,
- required_one_of=(
- ['state', 'label'],
- ),
- required_together=(
- ['region', 'image', 'type'],
- )
- )
-
-
-def build_client(module):
- """Build a LinodeClient."""
- return LinodeClient(
- module.params['access_token'],
- user_agent=get_user_agent('linode_v4_module')
- )
-
-
-def main():
- """Module entrypoint."""
- module = initialise_module()
-
- if not HAS_LINODE_DEPENDENCY:
- module.fail_json(msg=missing_required_lib('linode-api4'), exception=LINODE_IMP_ERR)
-
- client = build_client(module)
- instance = maybe_instance_from_label(module, client)
-
- if module.params['state'] == 'present' and instance is not None:
- module.exit_json(changed=False, instance=instance._raw_json)
-
- elif module.params['state'] == 'present' and instance is None:
- instance_json = create_linode(
- module, client,
- authorized_keys=module.params['authorized_keys'],
- group=module.params['group'],
- image=module.params['image'],
- label=module.params['label'],
- private_ip=module.params['private_ip'],
- region=module.params['region'],
- root_pass=module.params['root_pass'],
- tags=module.params['tags'],
- ltype=module.params['type'],
- stackscript=module.params['stackscript_id'],
- stackscript_data=module.params['stackscript_data'],
- )
- module.exit_json(changed=True, instance=instance_json)
-
- elif module.params['state'] == 'absent' and instance is not None:
- instance.delete()
- module.exit_json(changed=True, instance=instance._raw_json)
-
- elif module.params['state'] == 'absent' and instance is None:
- module.exit_json(changed=False, instance={})
-
-
-if __name__ == "__main__":
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/lxc/lxc_container.py b/ansible_collections/community/general/plugins/modules/cloud/lxc/lxc_container.py
deleted file mode 100644
index c8c577ab..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/lxc/lxc_container.py
+++ /dev/null
@@ -1,1743 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2014, Kevin Carter
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = r'''
----
-module: lxc_container
-short_description: Manage LXC Containers
-description:
- - Management of LXC containers.
-author: "Kevin Carter (@cloudnull)"
-options:
- name:
- description:
- - Name of a container.
- type: str
- required: true
- backing_store:
- choices:
- - dir
- - lvm
- - loop
- - btrfs
- - overlayfs
- - zfs
- description:
- - Backend storage type for the container.
- type: str
- default: dir
- template:
- description:
- - Name of the template to use within an LXC create.
- type: str
- default: ubuntu
- template_options:
- description:
- - Template options when building the container.
- type: str
- config:
- description:
- - Path to the LXC configuration file.
- type: path
- lv_name:
- description:
- - Name of the logical volume, defaults to the container name.
- - If not specified, it defaults to C($CONTAINER_NAME).
- type: str
- vg_name:
- description:
- - If backend store is lvm, specify the name of the volume group.
- type: str
- default: lxc
- thinpool:
- description:
- - Use LVM thin pool called TP.
- type: str
- fs_type:
- description:
- - Create fstype TYPE.
- type: str
- default: ext4
- fs_size:
- description:
- - File system Size.
- type: str
- default: 5G
- directory:
- description:
- - Place rootfs directory under DIR.
- type: path
- zfs_root:
- description:
- - Create zfs under given zfsroot.
- type: str
- container_command:
- description:
- - Run a command within a container.
- type: str
- lxc_path:
- description:
- - Place container under PATH.
- type: path
- container_log:
- description:
- - Enable a container log for host actions to the container.
- type: bool
- default: 'no'
- container_log_level:
- choices:
- - Info
- - info
- - INFO
- - Error
- - error
- - ERROR
- - Debug
- - debug
- - DEBUG
- description:
- - Set the log level for a container where *container_log* was set.
- type: str
- required: false
- default: INFO
- clone_name:
- description:
- - Name of the new cloned server.
- - This is only used when state is clone.
- type: str
- clone_snapshot:
- description:
- - Create a snapshot a container when cloning.
- - This is not supported by all container storage backends.
- - Enabling this may fail if the backing store does not support snapshots.
- type: bool
- default: 'no'
- archive:
- description:
- - Create an archive of a container.
- - This will create a tarball of the running container.
- type: bool
- default: 'no'
- archive_path:
- description:
- - Path the save the archived container.
- - If the path does not exist the archive method will attempt to create it.
- type: path
- archive_compression:
- choices:
- - gzip
- - bzip2
- - none
- description:
- - Type of compression to use when creating an archive of a running
- container.
- type: str
- default: gzip
- state:
- choices:
- - started
- - stopped
- - restarted
- - absent
- - frozen
- - clone
- description:
- - Define the state of a container.
- - If you clone a container using I(clone_name) the newly cloned
- container created in a stopped state.
- - The running container will be stopped while the clone operation is
- happening and upon completion of the clone the original container
- state will be restored.
- type: str
- default: started
- container_config:
- description:
- - A list of C(key=value) options to use when configuring a container.
- type: list
- elements: str
-requirements:
- - 'lxc >= 1.0 # OS package'
- - 'python >= 2.6 # OS Package'
- - 'lxc-python2 >= 0.1 # PIP Package from https://github.com/lxc/python2-lxc'
-notes:
- - Containers must have a unique name. If you attempt to create a container
- with a name that already exists in the users namespace the module will
- simply return as "unchanged".
- - The "container_command" can be used with any state except "absent". If
- used with state "stopped" the container will be "started", the command
- executed, and then the container "stopped" again. Likewise if the state
- is "stopped" and the container does not exist it will be first created,
- "started", the command executed, and then "stopped". If you use a "|"
- in the variable you can use common script formatting within the variable
- itself The "container_command" option will always execute as BASH.
- When using "container_command" a log file is created in the /tmp/ directory
- which contains both stdout and stderr of any command executed.
- - If "archive" is **true** the system will attempt to create a compressed
- tarball of the running container. The "archive" option supports LVM backed
- containers and will create a snapshot of the running container when
- creating the archive.
- - If your distro does not have a package for "python2-lxc", which is a
- requirement for this module, it can be installed from source at
- "https://github.com/lxc/python2-lxc" or installed via pip using the package
- name lxc-python2.
-'''
-
-EXAMPLES = r"""
-- name: Create a started container
- community.general.lxc_container:
- name: test-container-started
- container_log: true
- template: ubuntu
- state: started
- template_options: --release trusty
-
-- name: Create a stopped container
- community.general.lxc_container:
- name: test-container-stopped
- container_log: true
- template: ubuntu
- state: stopped
- template_options: --release trusty
-
-- name: Create a frozen container
- community.general.lxc_container:
- name: test-container-frozen
- container_log: true
- template: ubuntu
- state: frozen
- template_options: --release trusty
- container_command: |
- echo 'hello world.' | tee /opt/started-frozen
-
-# Create filesystem container, configure it, and archive it, and start it.
-- name: Create filesystem container
- community.general.lxc_container:
- name: test-container-config
- backing_store: dir
- container_log: true
- template: ubuntu
- state: started
- archive: true
- archive_compression: none
- container_config:
- - "lxc.aa_profile=unconfined"
- - "lxc.cgroup.devices.allow=a *:* rmw"
- template_options: --release trusty
-
-# Create an lvm container, run a complex command in it, add additional
-# configuration to it, create an archive of it, and finally leave the container
-# in a frozen state. The container archive will be compressed using bzip2
-- name: Create a frozen lvm container
- community.general.lxc_container:
- name: test-container-lvm
- container_log: true
- template: ubuntu
- state: frozen
- backing_store: lvm
- template_options: --release trusty
- container_command: |
- apt-get update
- apt-get install -y vim lxc-dev
- echo 'hello world.' | tee /opt/started
- if [[ -f "/opt/started" ]]; then
- echo 'hello world.' | tee /opt/found-started
- fi
- container_config:
- - "lxc.aa_profile=unconfined"
- - "lxc.cgroup.devices.allow=a *:* rmw"
- archive: true
- archive_compression: bzip2
- register: lvm_container_info
-
-- name: Debug info on container "test-container-lvm"
- ansible.builtin.debug:
- var: lvm_container_info
-
-- name: Run a command in a container and ensure its in a "stopped" state.
- community.general.lxc_container:
- name: test-container-started
- state: stopped
- container_command: |
- echo 'hello world.' | tee /opt/stopped
-
-- name: Run a command in a container and ensure its it in a "frozen" state.
- community.general.lxc_container:
- name: test-container-stopped
- state: frozen
- container_command: |
- echo 'hello world.' | tee /opt/frozen
-
-- name: Start a container
- community.general.lxc_container:
- name: test-container-stopped
- state: started
-
-- name: Run a command in a container and then restart it
- community.general.lxc_container:
- name: test-container-started
- state: restarted
- container_command: |
- echo 'hello world.' | tee /opt/restarted
-
-- name: Run a complex command within a "running" container
- community.general.lxc_container:
- name: test-container-started
- container_command: |
- apt-get update
- apt-get install -y curl wget vim apache2
- echo 'hello world.' | tee /opt/started
- if [[ -f "/opt/started" ]]; then
- echo 'hello world.' | tee /opt/found-started
- fi
-
-# Create an archive of an existing container, save the archive to a defined
-# path and then destroy it.
-- name: Archive container
- community.general.lxc_container:
- name: test-container-started
- state: absent
- archive: true
- archive_path: /opt/archives
-
-# Create a container using overlayfs, create an archive of it, create a
-# snapshot clone of the container and and finally leave the container
-# in a frozen state. The container archive will be compressed using gzip.
-- name: Create an overlayfs container archive and clone it
- community.general.lxc_container:
- name: test-container-overlayfs
- container_log: true
- template: ubuntu
- state: started
- backing_store: overlayfs
- template_options: --release trusty
- clone_snapshot: true
- clone_name: test-container-overlayfs-clone-snapshot
- archive: true
- archive_compression: gzip
- register: clone_container_info
-
-- name: Debug info on container "test-container"
- ansible.builtin.debug:
- var: clone_container_info
-
-- name: Clone a container using snapshot
- community.general.lxc_container:
- name: test-container-overlayfs-clone-snapshot
- backing_store: overlayfs
- clone_name: test-container-overlayfs-clone-snapshot2
- clone_snapshot: true
-
-- name: Create a new container and clone it
- community.general.lxc_container:
- name: test-container-new-archive
- backing_store: dir
- clone_name: test-container-new-archive-clone
-
-- name: Archive and clone a container then destroy it
- community.general.lxc_container:
- name: test-container-new-archive
- state: absent
- clone_name: test-container-new-archive-destroyed-clone
- archive: true
- archive_compression: gzip
-
-- name: Start a cloned container.
- community.general.lxc_container:
- name: test-container-new-archive-destroyed-clone
- state: started
-
-- name: Destroy a container
- community.general.lxc_container:
- name: '{{ item }}'
- state: absent
- with_items:
- - test-container-stopped
- - test-container-started
- - test-container-frozen
- - test-container-lvm
- - test-container-config
- - test-container-overlayfs
- - test-container-overlayfs-clone
- - test-container-overlayfs-clone-snapshot
- - test-container-overlayfs-clone-snapshot2
- - test-container-new-archive
- - test-container-new-archive-clone
- - test-container-new-archive-destroyed-clone
-"""
-
-RETURN = r"""
-lxc_container:
- description: container information
- returned: success
- type: complex
- contains:
- name:
- description: name of the lxc container
- returned: success
- type: str
- sample: test_host
- init_pid:
- description: pid of the lxc init process
- returned: success
- type: int
- sample: 19786
- interfaces:
- description: list of the container's network interfaces
- returned: success
- type: list
- sample: [ "eth0", "lo" ]
- ips:
- description: list of ips
- returned: success
- type: list
- sample: [ "10.0.3.3" ]
- state:
- description: resulting state of the container
- returned: success
- type: str
- sample: "running"
- archive:
- description: resulting state of the container
- returned: success, when archive is true
- type: str
- sample: "/tmp/test-container-config.tar"
- clone:
- description: if the container was cloned
- returned: success, when clone_name is specified
- type: bool
- sample: True
-"""
-
-import os
-import os.path
-import re
-import shutil
-import subprocess
-import tempfile
-import time
-import shlex
-
-try:
- import lxc
-except ImportError:
- HAS_LXC = False
-else:
- HAS_LXC = True
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.parsing.convert_bool import BOOLEANS_FALSE, BOOLEANS_TRUE
-from ansible.module_utils.six.moves import xrange
-from ansible.module_utils.common.text.converters import to_text, to_bytes
-
-
-# LXC_COMPRESSION_MAP is a map of available compression types when creating
-# an archive of a container.
-LXC_COMPRESSION_MAP = {
- 'gzip': {
- 'extension': 'tar.tgz',
- 'argument': '-czf'
- },
- 'bzip2': {
- 'extension': 'tar.bz2',
- 'argument': '-cjf'
- },
- 'none': {
- 'extension': 'tar',
- 'argument': '-cf'
- }
-}
-
-
-# LXC_COMMAND_MAP is a map of variables that are available to a method based
-# on the state the container is in.
-LXC_COMMAND_MAP = {
- 'create': {
- 'variables': {
- 'config': '--config',
- 'template': '--template',
- 'backing_store': '--bdev',
- 'lxc_path': '--lxcpath',
- 'lv_name': '--lvname',
- 'vg_name': '--vgname',
- 'thinpool': '--thinpool',
- 'fs_type': '--fstype',
- 'fs_size': '--fssize',
- 'directory': '--dir',
- 'zfs_root': '--zfsroot'
- }
- },
- 'clone': {
- 'variables-lxc-copy': {
- 'backing_store': '--backingstorage',
- 'lxc_path': '--lxcpath',
- 'fs_size': '--fssize',
- 'name': '--name',
- 'clone_name': '--newname'
- },
- # lxc-clone is deprecated in favor of lxc-copy
- 'variables-lxc-clone': {
- 'backing_store': '--backingstore',
- 'lxc_path': '--lxcpath',
- 'fs_size': '--fssize',
- 'name': '--orig',
- 'clone_name': '--new'
- }
- }
-}
-
-
-# LXC_BACKING_STORE is a map of available storage backends and options that
-# are incompatible with the given storage backend.
-LXC_BACKING_STORE = {
- 'dir': [
- 'lv_name', 'vg_name', 'fs_type', 'fs_size', 'thinpool'
- ],
- 'lvm': [
- 'zfs_root'
- ],
- 'btrfs': [
- 'lv_name', 'vg_name', 'thinpool', 'zfs_root', 'fs_type', 'fs_size'
- ],
- 'loop': [
- 'lv_name', 'vg_name', 'thinpool', 'zfs_root'
- ],
- 'overlayfs': [
- 'lv_name', 'vg_name', 'fs_type', 'fs_size', 'thinpool', 'zfs_root'
- ],
- 'zfs': [
- 'lv_name', 'vg_name', 'fs_type', 'fs_size', 'thinpool'
- ]
-}
-
-
-# LXC_LOGGING_LEVELS is a map of available log levels
-LXC_LOGGING_LEVELS = {
- 'INFO': ['info', 'INFO', 'Info'],
- 'ERROR': ['error', 'ERROR', 'Error'],
- 'DEBUG': ['debug', 'DEBUG', 'Debug']
-}
-
-
-# LXC_ANSIBLE_STATES is a map of states that contain values of methods used
-# when a particular state is evoked.
-LXC_ANSIBLE_STATES = {
- 'started': '_started',
- 'stopped': '_stopped',
- 'restarted': '_restarted',
- 'absent': '_destroyed',
- 'frozen': '_frozen',
- 'clone': '_clone'
-}
-
-
-# This is used to attach to a running container and execute commands from
-# within the container on the host. This will provide local access to a
-# container without using SSH. The template will attempt to work within the
-# home directory of the user that was attached to the container and source
-# that users environment variables by default.
-ATTACH_TEMPLATE = """#!/usr/bin/env bash
-pushd "$(getent passwd $(whoami)|cut -f6 -d':')"
- if [[ -f ".bashrc" ]];then
- source .bashrc
- unset HOSTNAME
- fi
-popd
-
-# User defined command
-%(container_command)s
-"""
-
-
-def create_script(command):
- """Write out a script onto a target.
-
- This method should be backward compatible with Python 2.4+ when executing
- from within the container.
-
- :param command: command to run, this can be a script and can use spacing
- with newlines as separation.
- :type command: ``str``
- """
-
- (fd, script_file) = tempfile.mkstemp(prefix='lxc-attach-script')
- f = os.fdopen(fd, 'wb')
- try:
- f.write(to_bytes(ATTACH_TEMPLATE % {'container_command': command}, errors='surrogate_or_strict'))
- f.flush()
- finally:
- f.close()
-
- # Ensure the script is executable.
- os.chmod(script_file, int('0700', 8))
-
- # Output log file.
- stdout_file = os.fdopen(tempfile.mkstemp(prefix='lxc-attach-script-log')[0], 'ab')
-
- # Error log file.
- stderr_file = os.fdopen(tempfile.mkstemp(prefix='lxc-attach-script-err')[0], 'ab')
-
- # Execute the script command.
- try:
- subprocess.Popen(
- [script_file],
- stdout=stdout_file,
- stderr=stderr_file
- ).communicate()
- finally:
- # Close the log files.
- stderr_file.close()
- stdout_file.close()
-
- # Remove the script file upon completion of execution.
- os.remove(script_file)
-
-
-class LxcContainerManagement(object):
- def __init__(self, module):
- """Management of LXC containers via Ansible.
-
- :param module: Processed Ansible Module.
- :type module: ``object``
- """
- self.module = module
- self.state = self.module.params.get('state', None)
- self.state_change = False
- self.lxc_vg = None
- self.lxc_path = self.module.params.get('lxc_path', None)
- self.container_name = self.module.params['name']
- self.container = self.get_container_bind()
- self.archive_info = None
- self.clone_info = None
-
- def get_container_bind(self):
- return lxc.Container(name=self.container_name)
-
- @staticmethod
- def _roundup(num):
- """Return a rounded floating point number.
-
- :param num: Number to round up.
- :type: ``float``
- :returns: Rounded up number.
- :rtype: ``int``
- """
- num, part = str(num).split('.')
- num = int(num)
- if int(part) != 0:
- num += 1
- return num
-
- @staticmethod
- def _container_exists(container_name, lxc_path=None):
- """Check if a container exists.
-
- :param container_name: Name of the container.
- :type: ``str``
- :returns: True or False if the container is found.
- :rtype: ``bol``
- """
- if [i for i in lxc.list_containers(config_path=lxc_path) if i == container_name]:
- return True
- else:
- return False
-
- @staticmethod
- def _add_variables(variables_dict, build_command):
- """Return a command list with all found options.
-
- :param variables_dict: Pre-parsed optional variables used from a
- seed command.
- :type variables_dict: ``dict``
- :param build_command: Command to run.
- :type build_command: ``list``
- :returns: list of command options.
- :rtype: ``list``
- """
-
- for key, value in variables_dict.items():
- build_command.append(str(key))
- build_command.append(str(value))
- return build_command
-
- def _get_vars(self, variables):
- """Return a dict of all variables as found within the module.
-
- :param variables: Hash of all variables to find.
- :type variables: ``dict``
- """
-
- # Remove incompatible storage backend options.
- variables = variables.copy()
- for v in LXC_BACKING_STORE[self.module.params['backing_store']]:
- variables.pop(v, None)
-
- return_dict = dict()
- false_values = BOOLEANS_FALSE.union([None, ''])
- for k, v in variables.items():
- _var = self.module.params.get(k)
- if _var not in false_values:
- return_dict[v] = _var
- return return_dict
-
- def _config(self):
- """Configure an LXC container.
-
- Write new configuration values to the lxc config file. This will
- stop the container if it's running write the new options and then
- restart the container upon completion.
- """
-
- _container_config = self.module.params.get('container_config')
- if not _container_config:
- return False
-
- container_config_file = self.container.config_file_name
- with open(container_config_file, 'rb') as f:
- container_config = to_text(f.read(), errors='surrogate_or_strict').splitlines(True)
-
- parsed_options = [i.split('=', 1) for i in _container_config]
- config_change = False
- for key, value in parsed_options:
- key = key.strip()
- value = value.strip()
- new_entry = '%s = %s\n' % (key, value)
- keyre = re.compile(r'%s(\s+)?=' % key)
- for option_line in container_config:
- # Look for key in config
- if keyre.match(option_line):
- dummy, _value = option_line.split('=', 1)
- config_value = ' '.join(_value.split())
- line_index = container_config.index(option_line)
- # If the sanitized values don't match replace them
- if value != config_value:
- line_index += 1
- if new_entry not in container_config:
- config_change = True
- container_config.insert(line_index, new_entry)
- # Break the flow as values are written or not at this point
- break
- else:
- config_change = True
- container_config.append(new_entry)
-
- # If the config changed restart the container.
- if config_change:
- container_state = self._get_state()
- if container_state != 'stopped':
- self.container.stop()
-
- with open(container_config_file, 'wb') as f:
- f.writelines([to_bytes(line, errors='surrogate_or_strict') for line in container_config])
-
- self.state_change = True
- if container_state == 'running':
- self._container_startup()
- elif container_state == 'frozen':
- self._container_startup()
- self.container.freeze()
-
- def _container_create_clone(self):
- """Clone a new LXC container from an existing container.
-
- This method will clone an existing container to a new container using
- the `clone_name` variable as the new container name. The method will
- create a container if the container `name` does not exist.
-
- Note that cloning a container will ensure that the original container
- is "stopped" before the clone can be done. Because this operation can
- require a state change the method will return the original container
- to its prior state upon completion of the clone.
-
- Once the clone is complete the new container will be left in a stopped
- state.
- """
-
- # Ensure that the state of the original container is stopped
- container_state = self._get_state()
- if container_state != 'stopped':
- self.state_change = True
- self.container.stop()
-
- # lxc-clone is deprecated in favor of lxc-copy
- clone_vars = 'variables-lxc-copy'
- clone_cmd = self.module.get_bin_path('lxc-copy')
- if not clone_cmd:
- clone_vars = 'variables-lxc-clone'
- clone_cmd = self.module.get_bin_path('lxc-clone', True)
-
- build_command = [
- clone_cmd,
- ]
-
- build_command = self._add_variables(
- variables_dict=self._get_vars(
- variables=LXC_COMMAND_MAP['clone'][clone_vars]
- ),
- build_command=build_command
- )
-
- # Load logging for the instance when creating it.
- if self.module.params.get('clone_snapshot') in BOOLEANS_TRUE:
- build_command.append('--snapshot')
- # Check for backing_store == overlayfs if so force the use of snapshot
- # If overlay fs is used and snapshot is unset the clone command will
- # fail with an unsupported type.
- elif self.module.params.get('backing_store') == 'overlayfs':
- build_command.append('--snapshot')
-
- rc, return_data, err = self.module.run_command(build_command)
- if rc != 0:
- message = "Failed executing %s." % os.path.basename(clone_cmd)
- self.failure(
- err=err, rc=rc, msg=message, command=' '.join(
- build_command
- )
- )
- else:
- self.state_change = True
- # Restore the original state of the origin container if it was
- # not in a stopped state.
- if container_state == 'running':
- self.container.start()
- elif container_state == 'frozen':
- self.container.start()
- self.container.freeze()
-
- return True
-
- def _create(self):
- """Create a new LXC container.
-
- This method will build and execute a shell command to build the
- container. It would have been nice to simply use the lxc python library
- however at the time this was written the python library, in both py2
- and py3 didn't support some of the more advanced container create
- processes. These missing processes mainly revolve around backing
- LXC containers with block devices.
- """
-
- build_command = [
- self.module.get_bin_path('lxc-create', True),
- '--name', self.container_name,
- '--quiet'
- ]
-
- build_command = self._add_variables(
- variables_dict=self._get_vars(
- variables=LXC_COMMAND_MAP['create']['variables']
- ),
- build_command=build_command
- )
-
- # Load logging for the instance when creating it.
- if self.module.params.get('container_log') in BOOLEANS_TRUE:
- # Set the logging path to the /var/log/lxc if uid is root. else
- # set it to the home folder of the user executing.
- try:
- if os.getuid() != 0:
- log_path = os.getenv('HOME')
- else:
- if not os.path.isdir('/var/log/lxc/'):
- os.makedirs('/var/log/lxc/')
- log_path = '/var/log/lxc/'
- except OSError:
- log_path = os.getenv('HOME')
-
- build_command.extend([
- '--logfile',
- os.path.join(
- log_path, 'lxc-%s.log' % self.container_name
- ),
- '--logpriority',
- self.module.params.get(
- 'container_log_level'
- ).upper()
- ])
-
- # Add the template commands to the end of the command if there are any
- template_options = self.module.params.get('template_options', None)
- if template_options:
- build_command.append('--')
- build_command += shlex.split(template_options)
-
- rc, return_data, err = self.module.run_command(build_command)
- if rc != 0:
- message = "Failed executing lxc-create."
- self.failure(
- err=err, rc=rc, msg=message, command=' '.join(build_command)
- )
- else:
- self.state_change = True
-
- def _container_data(self):
- """Returns a dict of container information.
-
- :returns: container data
- :rtype: ``dict``
- """
-
- return {
- 'interfaces': self.container.get_interfaces(),
- 'ips': self.container.get_ips(),
- 'state': self._get_state(),
- 'init_pid': int(self.container.init_pid),
- 'name': self.container_name,
- }
-
- def _unfreeze(self):
- """Unfreeze a container.
-
- :returns: True or False based on if the container was unfrozen.
- :rtype: ``bol``
- """
-
- unfreeze = self.container.unfreeze()
- if unfreeze:
- self.state_change = True
- return unfreeze
-
- def _get_state(self):
- """Return the state of a container.
-
- If the container is not found the state returned is "absent"
-
- :returns: state of a container as a lower case string.
- :rtype: ``str``
- """
-
- if self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path):
- return str(self.container.state).lower()
- return str('absent')
-
- def _execute_command(self):
- """Execute a shell command."""
-
- container_command = self.module.params.get('container_command')
- if container_command:
- container_state = self._get_state()
- if container_state == 'frozen':
- self._unfreeze()
- elif container_state == 'stopped':
- self._container_startup()
-
- self.container.attach_wait(create_script, container_command)
- self.state_change = True
-
- def _container_startup(self, timeout=60):
- """Ensure a container is started.
-
- :param timeout: Time before the destroy operation is abandoned.
- :type timeout: ``int``
- """
-
- self.container = self.get_container_bind()
- for dummy in xrange(timeout):
- if self._get_state() != 'running':
- self.container.start()
- self.state_change = True
- # post startup sleep for 1 second.
- time.sleep(1)
- else:
- return True
- self.failure(
- lxc_container=self._container_data(),
- error='Failed to start container'
- ' [ %s ]' % self.container_name,
- rc=1,
- msg='The container [ %s ] failed to start. Check to lxc is'
- ' available and that the container is in a functional'
- ' state.' % self.container_name
- )
-
- def _check_archive(self):
- """Create a compressed archive of a container.
-
- This will store archive_info in as self.archive_info
- """
-
- if self.module.params.get('archive') in BOOLEANS_TRUE:
- self.archive_info = {
- 'archive': self._container_create_tar()
- }
-
- def _check_clone(self):
- """Create a compressed archive of a container.
-
- This will store archive_info in as self.archive_info
- """
-
- clone_name = self.module.params.get('clone_name')
- if clone_name:
- if not self._container_exists(container_name=clone_name, lxc_path=self.lxc_path):
- self.clone_info = {
- 'cloned': self._container_create_clone()
- }
- else:
- self.clone_info = {
- 'cloned': False
- }
-
- def _destroyed(self, timeout=60):
- """Ensure a container is destroyed.
-
- :param timeout: Time before the destroy operation is abandoned.
- :type timeout: ``int``
- """
-
- for dummy in xrange(timeout):
- if not self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path):
- break
-
- # Check if the container needs to have an archive created.
- self._check_archive()
-
- # Check if the container is to be cloned
- self._check_clone()
-
- if self._get_state() != 'stopped':
- self.state_change = True
- self.container.stop()
-
- if self.container.destroy():
- self.state_change = True
-
- # post destroy attempt sleep for 1 second.
- time.sleep(1)
- else:
- self.failure(
- lxc_container=self._container_data(),
- error='Failed to destroy container'
- ' [ %s ]' % self.container_name,
- rc=1,
- msg='The container [ %s ] failed to be destroyed. Check'
- ' that lxc is available and that the container is in a'
- ' functional state.' % self.container_name
- )
-
- def _frozen(self, count=0):
- """Ensure a container is frozen.
-
- If the container does not exist the container will be created.
-
- :param count: number of times this command has been called by itself.
- :type count: ``int``
- """
-
- self.check_count(count=count, method='frozen')
- if self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path):
- self._execute_command()
-
- # Perform any configuration updates
- self._config()
-
- container_state = self._get_state()
- if container_state == 'frozen':
- pass
- elif container_state == 'running':
- self.container.freeze()
- self.state_change = True
- else:
- self._container_startup()
- self.container.freeze()
- self.state_change = True
-
- # Check if the container needs to have an archive created.
- self._check_archive()
-
- # Check if the container is to be cloned
- self._check_clone()
- else:
- self._create()
- count += 1
- self._frozen(count)
-
- def _restarted(self, count=0):
- """Ensure a container is restarted.
-
- If the container does not exist the container will be created.
-
- :param count: number of times this command has been called by itself.
- :type count: ``int``
- """
-
- self.check_count(count=count, method='restart')
- if self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path):
- self._execute_command()
-
- # Perform any configuration updates
- self._config()
-
- if self._get_state() != 'stopped':
- self.container.stop()
- self.state_change = True
-
- # Run container startup
- self._container_startup()
-
- # Check if the container needs to have an archive created.
- self._check_archive()
-
- # Check if the container is to be cloned
- self._check_clone()
- else:
- self._create()
- count += 1
- self._restarted(count)
-
- def _stopped(self, count=0):
- """Ensure a container is stopped.
-
- If the container does not exist the container will be created.
-
- :param count: number of times this command has been called by itself.
- :type count: ``int``
- """
-
- self.check_count(count=count, method='stop')
- if self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path):
- self._execute_command()
-
- # Perform any configuration updates
- self._config()
-
- if self._get_state() != 'stopped':
- self.container.stop()
- self.state_change = True
-
- # Check if the container needs to have an archive created.
- self._check_archive()
-
- # Check if the container is to be cloned
- self._check_clone()
- else:
- self._create()
- count += 1
- self._stopped(count)
-
- def _started(self, count=0):
- """Ensure a container is started.
-
- If the container does not exist the container will be created.
-
- :param count: number of times this command has been called by itself.
- :type count: ``int``
- """
-
- self.check_count(count=count, method='start')
- if self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path):
- container_state = self._get_state()
- if container_state == 'running':
- pass
- elif container_state == 'frozen':
- self._unfreeze()
- elif not self._container_startup():
- self.failure(
- lxc_container=self._container_data(),
- error='Failed to start container'
- ' [ %s ]' % self.container_name,
- rc=1,
- msg='The container [ %s ] failed to start. Check to lxc is'
- ' available and that the container is in a functional'
- ' state.' % self.container_name
- )
-
- # Return data
- self._execute_command()
-
- # Perform any configuration updates
- self._config()
-
- # Check if the container needs to have an archive created.
- self._check_archive()
-
- # Check if the container is to be cloned
- self._check_clone()
- else:
- self._create()
- count += 1
- self._started(count)
-
- def _get_lxc_vg(self):
- """Return the name of the Volume Group used in LXC."""
-
- build_command = [
- self.module.get_bin_path('lxc-config', True),
- "lxc.bdev.lvm.vg"
- ]
- rc, vg, err = self.module.run_command(build_command)
- if rc != 0:
- self.failure(
- err=err,
- rc=rc,
- msg='Failed to read LVM VG from LXC config',
- command=' '.join(build_command)
- )
- else:
- return str(vg.strip())
-
- def _lvm_lv_list(self):
- """Return a list of all lv in a current vg."""
-
- vg = self._get_lxc_vg()
- build_command = [
- self.module.get_bin_path('lvs', True)
- ]
- rc, stdout, err = self.module.run_command(build_command)
- if rc != 0:
- self.failure(
- err=err,
- rc=rc,
- msg='Failed to get list of LVs',
- command=' '.join(build_command)
- )
-
- all_lvms = [i.split() for i in stdout.splitlines()][1:]
- return [lv_entry[0] for lv_entry in all_lvms if lv_entry[1] == vg]
-
- def _get_vg_free_pe(self, vg_name):
- """Return the available size of a given VG.
-
- :param vg_name: Name of volume.
- :type vg_name: ``str``
- :returns: size and measurement of an LV
- :type: ``tuple``
- """
-
- build_command = [
- 'vgdisplay',
- vg_name,
- '--units',
- 'g'
- ]
- rc, stdout, err = self.module.run_command(build_command)
- if rc != 0:
- self.failure(
- err=err,
- rc=rc,
- msg='failed to read vg %s' % vg_name,
- command=' '.join(build_command)
- )
-
- vg_info = [i.strip() for i in stdout.splitlines()][1:]
- free_pe = [i for i in vg_info if i.startswith('Free')]
- _free_pe = free_pe[0].split()
- return float(_free_pe[-2]), _free_pe[-1]
-
- def _get_lv_size(self, lv_name):
- """Return the available size of a given LV.
-
- :param lv_name: Name of volume.
- :type lv_name: ``str``
- :returns: size and measurement of an LV
- :type: ``tuple``
- """
-
- vg = self._get_lxc_vg()
- lv = os.path.join(vg, lv_name)
- build_command = [
- 'lvdisplay',
- lv,
- '--units',
- 'g'
- ]
- rc, stdout, err = self.module.run_command(build_command)
- if rc != 0:
- self.failure(
- err=err,
- rc=rc,
- msg='failed to read lv %s' % lv,
- command=' '.join(build_command)
- )
-
- lv_info = [i.strip() for i in stdout.splitlines()][1:]
- _free_pe = [i for i in lv_info if i.startswith('LV Size')]
- free_pe = _free_pe[0].split()
- return self._roundup(float(free_pe[-2])), free_pe[-1]
-
- def _lvm_snapshot_create(self, source_lv, snapshot_name,
- snapshot_size_gb=5):
- """Create an LVM snapshot.
-
- :param source_lv: Name of lv to snapshot
- :type source_lv: ``str``
- :param snapshot_name: Name of lv snapshot
- :type snapshot_name: ``str``
- :param snapshot_size_gb: Size of snapshot to create
- :type snapshot_size_gb: ``int``
- """
-
- vg = self._get_lxc_vg()
- free_space, messurement = self._get_vg_free_pe(vg_name=vg)
-
- if free_space < float(snapshot_size_gb):
- message = (
- 'Snapshot size [ %s ] is > greater than [ %s ] on volume group'
- ' [ %s ]' % (snapshot_size_gb, free_space, vg)
- )
- self.failure(
- error='Not enough space to create snapshot',
- rc=2,
- msg=message
- )
-
- # Create LVM Snapshot
- build_command = [
- self.module.get_bin_path('lvcreate', True),
- "-n",
- snapshot_name,
- "-s",
- os.path.join(vg, source_lv),
- "-L%sg" % snapshot_size_gb
- ]
- rc, stdout, err = self.module.run_command(build_command)
- if rc != 0:
- self.failure(
- err=err,
- rc=rc,
- msg='Failed to Create LVM snapshot %s/%s --> %s'
- % (vg, source_lv, snapshot_name)
- )
-
- def _lvm_lv_mount(self, lv_name, mount_point):
- """mount an lv.
-
- :param lv_name: name of the logical volume to mount
- :type lv_name: ``str``
- :param mount_point: path on the file system that is mounted.
- :type mount_point: ``str``
- """
-
- vg = self._get_lxc_vg()
-
- build_command = [
- self.module.get_bin_path('mount', True),
- "/dev/%s/%s" % (vg, lv_name),
- mount_point,
- ]
- rc, stdout, err = self.module.run_command(build_command)
- if rc != 0:
- self.failure(
- err=err,
- rc=rc,
- msg='failed to mountlvm lv %s/%s to %s'
- % (vg, lv_name, mount_point)
- )
-
- def _create_tar(self, source_dir):
- """Create an archive of a given ``source_dir`` to ``output_path``.
-
- :param source_dir: Path to the directory to be archived.
- :type source_dir: ``str``
- """
-
- old_umask = os.umask(int('0077', 8))
-
- archive_path = self.module.params.get('archive_path')
- if not os.path.isdir(archive_path):
- os.makedirs(archive_path)
-
- archive_compression = self.module.params.get('archive_compression')
- compression_type = LXC_COMPRESSION_MAP[archive_compression]
-
- # remove trailing / if present.
- archive_name = '%s.%s' % (
- os.path.join(
- archive_path,
- self.container_name
- ),
- compression_type['extension']
- )
-
- build_command = [
- self.module.get_bin_path('tar', True),
- '--directory=%s' % os.path.realpath(
- os.path.expanduser(source_dir)
- ),
- compression_type['argument'],
- archive_name,
- '.'
- ]
-
- rc, stdout, err = self.module.run_command(
- build_command
- )
-
- os.umask(old_umask)
-
- if rc != 0:
- self.failure(
- err=err,
- rc=rc,
- msg='failed to create tar archive',
- command=' '.join(build_command)
- )
-
- return archive_name
-
- def _lvm_lv_remove(self, lv_name):
- """Remove an LV.
-
- :param lv_name: The name of the logical volume
- :type lv_name: ``str``
- """
-
- vg = self._get_lxc_vg()
- build_command = [
- self.module.get_bin_path('lvremove', True),
- "-f",
- "%s/%s" % (vg, lv_name),
- ]
- rc, stdout, err = self.module.run_command(build_command)
- if rc != 0:
- self.failure(
- err=err,
- rc=rc,
- msg='Failed to remove LVM LV %s/%s' % (vg, lv_name),
- command=' '.join(build_command)
- )
-
- def _rsync_data(self, container_path, temp_dir):
- """Sync the container directory to the temp directory.
-
- :param container_path: path to the container container
- :type container_path: ``str``
- :param temp_dir: path to the temporary local working directory
- :type temp_dir: ``str``
- """
- # This loop is created to support overlayfs archives. This should
- # squash all of the layers into a single archive.
- fs_paths = container_path.split(':')
- if 'overlayfs' in fs_paths:
- fs_paths.pop(fs_paths.index('overlayfs'))
-
- for fs_path in fs_paths:
- # Set the path to the container data
- fs_path = os.path.dirname(fs_path)
-
- # Run the sync command
- build_command = [
- self.module.get_bin_path('rsync', True),
- '-aHAX',
- fs_path,
- temp_dir,
- ]
- rc, stdout, err = self.module.run_command(
- build_command,
- )
- if rc != 0:
- self.failure(
- err=err,
- rc=rc,
- msg='failed to perform archive',
- command=' '.join(build_command)
- )
-
- def _unmount(self, mount_point):
- """Unmount a file system.
-
- :param mount_point: path on the file system that is mounted.
- :type mount_point: ``str``
- """
-
- build_command = [
- self.module.get_bin_path('umount', True),
- mount_point,
- ]
- rc, stdout, err = self.module.run_command(build_command)
- if rc != 0:
- self.failure(
- err=err,
- rc=rc,
- msg='failed to unmount [ %s ]' % mount_point,
- command=' '.join(build_command)
- )
-
- def _overlayfs_mount(self, lowerdir, upperdir, mount_point):
- """mount an lv.
-
- :param lowerdir: name/path of the lower directory
- :type lowerdir: ``str``
- :param upperdir: name/path of the upper directory
- :type upperdir: ``str``
- :param mount_point: path on the file system that is mounted.
- :type mount_point: ``str``
- """
-
- build_command = [
- self.module.get_bin_path('mount', True),
- '-t', 'overlayfs',
- '-o', 'lowerdir=%s,upperdir=%s' % (lowerdir, upperdir),
- 'overlayfs',
- mount_point,
- ]
- rc, stdout, err = self.module.run_command(build_command)
- if rc != 0:
- self.failure(
- err=err,
- rc=rc,
- msg='failed to mount overlayfs:%s:%s to %s -- Command: %s'
- % (lowerdir, upperdir, mount_point, build_command)
- )
-
- def _container_create_tar(self):
- """Create a tar archive from an LXC container.
-
- The process is as follows:
- * Stop or Freeze the container
- * Create temporary dir
- * Copy container and config to temporary directory
- * If LVM backed:
- * Create LVM snapshot of LV backing the container
- * Mount the snapshot to tmpdir/rootfs
- * Restore the state of the container
- * Create tar of tmpdir
- * Clean up
- """
-
- # Create a temp dir
- temp_dir = tempfile.mkdtemp()
-
- # Set the name of the working dir, temp + container_name
- work_dir = os.path.join(temp_dir, self.container_name)
-
- # LXC container rootfs
- lxc_rootfs = self.container.get_config_item('lxc.rootfs')
-
- # Test if the containers rootfs is a block device
- block_backed = lxc_rootfs.startswith(os.path.join(os.sep, 'dev'))
-
- # Test if the container is using overlayfs
- overlayfs_backed = lxc_rootfs.startswith('overlayfs')
-
- mount_point = os.path.join(work_dir, 'rootfs')
-
- # Set the snapshot name if needed
- snapshot_name = '%s_lxc_snapshot' % self.container_name
-
- container_state = self._get_state()
- try:
- # Ensure the original container is stopped or frozen
- if container_state not in ['stopped', 'frozen']:
- if container_state == 'running':
- self.container.freeze()
- else:
- self.container.stop()
-
- # Sync the container data from the container_path to work_dir
- self._rsync_data(lxc_rootfs, temp_dir)
-
- if block_backed:
- if snapshot_name not in self._lvm_lv_list():
- if not os.path.exists(mount_point):
- os.makedirs(mount_point)
-
- # Take snapshot
- size, measurement = self._get_lv_size(
- lv_name=self.container_name
- )
- self._lvm_snapshot_create(
- source_lv=self.container_name,
- snapshot_name=snapshot_name,
- snapshot_size_gb=size
- )
-
- # Mount snapshot
- self._lvm_lv_mount(
- lv_name=snapshot_name,
- mount_point=mount_point
- )
- else:
- self.failure(
- err='snapshot [ %s ] already exists' % snapshot_name,
- rc=1,
- msg='The snapshot [ %s ] already exists. Please clean'
- ' up old snapshot of containers before continuing.'
- % snapshot_name
- )
- elif overlayfs_backed:
- lowerdir, upperdir = lxc_rootfs.split(':')[1:]
- self._overlayfs_mount(
- lowerdir=lowerdir,
- upperdir=upperdir,
- mount_point=mount_point
- )
-
- # Set the state as changed and set a new fact
- self.state_change = True
- return self._create_tar(source_dir=work_dir)
- finally:
- if block_backed or overlayfs_backed:
- # unmount snapshot
- self._unmount(mount_point)
-
- if block_backed:
- # Remove snapshot
- self._lvm_lv_remove(snapshot_name)
-
- # Restore original state of container
- if container_state == 'running':
- if self._get_state() == 'frozen':
- self.container.unfreeze()
- else:
- self.container.start()
-
- # Remove tmpdir
- shutil.rmtree(temp_dir)
-
- def check_count(self, count, method):
- if count > 1:
- self.failure(
- error='Failed to %s container' % method,
- rc=1,
- msg='The container [ %s ] failed to %s. Check to lxc is'
- ' available and that the container is in a functional'
- ' state.' % (self.container_name, method)
- )
-
- def failure(self, **kwargs):
- """Return a Failure when running an Ansible command.
-
- :param error: ``str`` Error that occurred.
- :param rc: ``int`` Return code while executing an Ansible command.
- :param msg: ``str`` Message to report.
- """
-
- self.module.fail_json(**kwargs)
-
- def run(self):
- """Run the main method."""
-
- action = getattr(self, LXC_ANSIBLE_STATES[self.state])
- action()
-
- outcome = self._container_data()
- if self.archive_info:
- outcome.update(self.archive_info)
-
- if self.clone_info:
- outcome.update(self.clone_info)
-
- self.module.exit_json(
- changed=self.state_change,
- lxc_container=outcome
- )
-
-
-def main():
- """Ansible Main module."""
-
- module = AnsibleModule(
- argument_spec=dict(
- name=dict(
- type='str',
- required=True
- ),
- template=dict(
- type='str',
- default='ubuntu'
- ),
- backing_store=dict(
- type='str',
- choices=list(LXC_BACKING_STORE.keys()),
- default='dir'
- ),
- template_options=dict(
- type='str'
- ),
- config=dict(
- type='path',
- ),
- vg_name=dict(
- type='str',
- default='lxc'
- ),
- thinpool=dict(
- type='str'
- ),
- fs_type=dict(
- type='str',
- default='ext4'
- ),
- fs_size=dict(
- type='str',
- default='5G'
- ),
- directory=dict(
- type='path'
- ),
- zfs_root=dict(
- type='str'
- ),
- lv_name=dict(
- type='str'
- ),
- lxc_path=dict(
- type='path'
- ),
- state=dict(
- choices=list(LXC_ANSIBLE_STATES.keys()),
- default='started'
- ),
- container_command=dict(
- type='str'
- ),
- container_config=dict(
- type='list',
- elements='str'
- ),
- container_log=dict(
- type='bool',
- default=False
- ),
- container_log_level=dict(
- choices=[n for i in LXC_LOGGING_LEVELS.values() for n in i],
- default='INFO'
- ),
- clone_name=dict(
- type='str',
- required=False
- ),
- clone_snapshot=dict(
- type='bool',
- default='false'
- ),
- archive=dict(
- type='bool',
- default=False
- ),
- archive_path=dict(
- type='path',
- ),
- archive_compression=dict(
- choices=list(LXC_COMPRESSION_MAP.keys()),
- default='gzip'
- )
- ),
- supports_check_mode=False,
- required_if=([
- ('archive', True, ['archive_path'])
- ]),
- )
-
- if not HAS_LXC:
- module.fail_json(
- msg='The `lxc` module is not importable. Check the requirements.'
- )
-
- lv_name = module.params.get('lv_name')
- if not lv_name:
- module.params['lv_name'] = module.params.get('name')
-
- lxc_manage = LxcContainerManagement(module=module)
- lxc_manage.run()
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/lxd/lxd_container.py b/ansible_collections/community/general/plugins/modules/cloud/lxd/lxd_container.py
deleted file mode 100644
index bd232668..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/lxd/lxd_container.py
+++ /dev/null
@@ -1,804 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2016, Hiroaki Nakamura
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: lxd_container
-short_description: Manage LXD instances
-description:
- - Management of LXD containers and virtual machines.
-author: "Hiroaki Nakamura (@hnakamur)"
-options:
- name:
- description:
- - Name of an instance.
- type: str
- required: true
- architecture:
- description:
- - 'The architecture for the instance (for example C(x86_64) or C(i686)).
- See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1).'
- type: str
- required: false
- config:
- description:
- - 'The config for the instance (for example C({"limits.cpu": "2"})).
- See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1).'
- - If the instance already exists and its "config" values in metadata
- obtained from the LXD API U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#instances-containers-and-virtual-machines)
- are different, this module tries to apply the configurations.
- - The keys starting with C(volatile.) are ignored for this comparison when I(ignore_volatile_options=true).
- type: dict
- required: false
- ignore_volatile_options:
- description:
- - If set to C(true), options starting with C(volatile.) are ignored. As a result,
- they are reapplied for each execution.
- - This default behavior can be changed by setting this option to C(false).
- - The current default value C(true) is deprecated since community.general 4.0.0,
- and will change to C(false) in community.general 6.0.0.
- type: bool
- required: false
- version_added: 3.7.0
- profiles:
- description:
- - Profile to be used by the instance.
- type: list
- elements: str
- devices:
- description:
- - 'The devices for the instance
- (for example C({ "rootfs": { "path": "/dev/kvm", "type": "unix-char" }})).
- See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1).'
- type: dict
- required: false
- ephemeral:
- description:
- - Whether or not the instance is ephemeral (for example C(true) or C(false)).
- See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1).
- required: false
- type: bool
- source:
- description:
- - 'The source for the instance
- (e.g. { "type": "image",
- "mode": "pull",
- "server": "https://images.linuxcontainers.org",
- "protocol": "lxd",
- "alias": "ubuntu/xenial/amd64" }).'
- - 'See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1) for complete API documentation.'
- - 'Note that C(protocol) accepts two choices: C(lxd) or C(simplestreams).'
- required: false
- type: dict
- state:
- choices:
- - started
- - stopped
- - restarted
- - absent
- - frozen
- description:
- - Define the state of an instance.
- required: false
- default: started
- type: str
- target:
- description:
- - For cluster deployments. Will attempt to create an instance on a target node.
- If the instance exists elsewhere in a cluster, then it will not be replaced or moved.
- The name should respond to same name of the node you see in C(lxc cluster list).
- type: str
- required: false
- version_added: 1.0.0
- timeout:
- description:
- - A timeout for changing the state of the instance.
- - This is also used as a timeout for waiting until IPv4 addresses
- are set to the all network interfaces in the instance after
- starting or restarting.
- required: false
- default: 30
- type: int
- type:
- description:
- - Instance type can be either C(virtual-machine) or C(container).
- required: false
- default: container
- choices:
- - container
- - virtual-machine
- type: str
- version_added: 4.1.0
- wait_for_ipv4_addresses:
- description:
- - If this is true, the C(lxd_container) waits until IPv4 addresses
- are set to the all network interfaces in the instance after
- starting or restarting.
- required: false
- default: false
- type: bool
- wait_for_container:
- description:
- - If set to C(true), the tasks will wait till the task reports a
- success status when performing container operations.
- default: false
- type: bool
- version_added: 4.4.0
- force_stop:
- description:
- - If this is true, the C(lxd_container) forces to stop the instance
- when it stops or restarts the instance.
- required: false
- default: false
- type: bool
- url:
- description:
- - The unix domain socket path or the https URL for the LXD server.
- required: false
- default: unix:/var/lib/lxd/unix.socket
- type: str
- snap_url:
- description:
- - The unix domain socket path when LXD is installed by snap package manager.
- required: false
- default: unix:/var/snap/lxd/common/lxd/unix.socket
- type: str
- client_key:
- description:
- - The client certificate key file path.
- - If not specified, it defaults to C(${HOME}/.config/lxc/client.key).
- required: false
- aliases: [ key_file ]
- type: path
- client_cert:
- description:
- - The client certificate file path.
- - If not specified, it defaults to C(${HOME}/.config/lxc/client.crt).
- required: false
- aliases: [ cert_file ]
- type: path
- trust_password:
- description:
- - The client trusted password.
- - 'You need to set this password on the LXD server before
- running this module using the following command:
- C(lxc config set core.trust_password ).
- See U(https://www.stgraber.org/2016/04/18/lxd-api-direct-interaction/).'
- - If trust_password is set, this module send a request for
- authentication before sending any requests.
- required: false
- type: str
-notes:
- - Instances can be a container or a virtual machine, both of them must have unique name. If you attempt to create an instance
- with a name that already existed in the users namespace the module will
- simply return as "unchanged".
- - There are two ways to run commands inside a container or virtual machine, using the command
- module or using the ansible lxd connection plugin bundled in Ansible >=
- 2.1, the later requires python to be installed in the instance which can
- be done with the command module.
- - You can copy a file from the host to the instance
- with the Ansible M(ansible.builtin.copy) and M(ansible.builtin.template) module and the `lxd` connection plugin.
- See the example below.
- - You can copy a file in the created instance to the localhost
- with `command=lxc file pull instance_name/dir/filename filename`.
- See the first example below.
-'''
-
-EXAMPLES = '''
-# An example for creating a Ubuntu container and install python
-- hosts: localhost
- connection: local
- tasks:
- - name: Create a started container
- community.general.lxd_container:
- name: mycontainer
- ignore_volatile_options: true
- state: started
- source:
- type: image
- mode: pull
- server: https://images.linuxcontainers.org
- protocol: lxd # if you get a 404, try setting protocol: simplestreams
- alias: ubuntu/xenial/amd64
- profiles: ["default"]
- wait_for_ipv4_addresses: true
- timeout: 600
-
- - name: Check python is installed in container
- delegate_to: mycontainer
- ansible.builtin.raw: dpkg -s python
- register: python_install_check
- failed_when: python_install_check.rc not in [0, 1]
- changed_when: false
-
- - name: Install python in container
- delegate_to: mycontainer
- ansible.builtin.raw: apt-get install -y python
- when: python_install_check.rc == 1
-
-# An example for creating an Ubuntu 14.04 container using an image fingerprint.
-# This requires changing 'server' and 'protocol' key values, replacing the
-# 'alias' key with with 'fingerprint' and supplying an appropriate value that
-# matches the container image you wish to use.
-- hosts: localhost
- connection: local
- tasks:
- - name: Create a started container
- community.general.lxd_container:
- name: mycontainer
- ignore_volatile_options: true
- state: started
- source:
- type: image
- mode: pull
- # Provides current (and older) Ubuntu images with listed fingerprints
- server: https://cloud-images.ubuntu.com/releases
- # Protocol used by 'ubuntu' remote (as shown by 'lxc remote list')
- protocol: simplestreams
- # This provides an Ubuntu 14.04 LTS amd64 image from 20150814.
- fingerprint: e9a8bdfab6dc
- profiles: ["default"]
- wait_for_ipv4_addresses: true
- timeout: 600
-
-# An example for deleting a container
-- hosts: localhost
- connection: local
- tasks:
- - name: Delete a container
- community.general.lxd_container:
- name: mycontainer
- state: absent
- type: container
-
-# An example for restarting a container
-- hosts: localhost
- connection: local
- tasks:
- - name: Restart a container
- community.general.lxd_container:
- name: mycontainer
- state: restarted
- type: container
-
-# An example for restarting a container using https to connect to the LXD server
-- hosts: localhost
- connection: local
- tasks:
- - name: Restart a container
- community.general.lxd_container:
- url: https://127.0.0.1:8443
- # These client_cert and client_key values are equal to the default values.
- #client_cert: "{{ lookup('env', 'HOME') }}/.config/lxc/client.crt"
- #client_key: "{{ lookup('env', 'HOME') }}/.config/lxc/client.key"
- trust_password: mypassword
- name: mycontainer
- state: restarted
-
-# Note your container must be in the inventory for the below example.
-#
-# [containers]
-# mycontainer ansible_connection=lxd
-#
-- hosts:
- - mycontainer
- tasks:
- - name: Copy /etc/hosts in the created container to localhost with name "mycontainer-hosts"
- ansible.builtin.fetch:
- src: /etc/hosts
- dest: /tmp/mycontainer-hosts
- flat: true
-
-# An example for LXD cluster deployments. This example will create two new container on specific
-# nodes - 'node01' and 'node02'. In 'target:', 'node01' and 'node02' are names of LXD cluster
-# members that LXD cluster recognizes, not ansible inventory names, see: 'lxc cluster list'.
-# LXD API calls can be made to any LXD member, in this example, we send API requests to
-#'node01.example.com', which matches ansible inventory name.
-- hosts: node01.example.com
- tasks:
- - name: Create LXD container
- community.general.lxd_container:
- name: new-container-1
- ignore_volatile_options: true
- state: started
- source:
- type: image
- mode: pull
- alias: ubuntu/xenial/amd64
- target: node01
-
- - name: Create container on another node
- community.general.lxd_container:
- name: new-container-2
- ignore_volatile_options: true
- state: started
- source:
- type: image
- mode: pull
- alias: ubuntu/xenial/amd64
- target: node02
-
-# An example for creating a virtual machine
-- hosts: localhost
- connection: local
- tasks:
- - name: Create container on another node
- community.general.lxd_container:
- name: new-vm-1
- type: virtual-machine
- state: started
- ignore_volatile_options: true
- wait_for_ipv4_addresses: true
- profiles: ["default"]
- source:
- protocol: simplestreams
- type: image
- mode: pull
- server: https://images.linuxcontainers.org
- alias: debian/11
- timeout: 600
-'''
-
-RETURN = '''
-addresses:
- description: Mapping from the network device name to a list of IPv4 addresses in the instance.
- returned: when state is started or restarted
- type: dict
- sample: {"eth0": ["10.155.92.191"]}
-old_state:
- description: The old state of the instance.
- returned: when state is started or restarted
- type: str
- sample: "stopped"
-logs:
- description: The logs of requests and responses.
- returned: when ansible-playbook is invoked with -vvvv.
- type: list
- sample: "(too long to be placed here)"
-actions:
- description: List of actions performed for the instance.
- returned: success
- type: list
- sample: '["create", "start"]'
-'''
-import datetime
-import os
-import time
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.lxd import LXDClient, LXDClientException
-from ansible.module_utils.six.moves.urllib.parse import urlencode
-
-# LXD_ANSIBLE_STATES is a map of states that contain values of methods used
-# when a particular state is evoked.
-LXD_ANSIBLE_STATES = {
- 'started': '_started',
- 'stopped': '_stopped',
- 'restarted': '_restarted',
- 'absent': '_destroyed',
- 'frozen': '_frozen'
-}
-
-# ANSIBLE_LXD_STATES is a map of states of lxd containers to the Ansible
-# lxc_container module state parameter value.
-ANSIBLE_LXD_STATES = {
- 'Running': 'started',
- 'Stopped': 'stopped',
- 'Frozen': 'frozen',
-}
-
-# ANSIBLE_LXD_DEFAULT_URL is a default value of the lxd endpoint
-ANSIBLE_LXD_DEFAULT_URL = 'unix:/var/lib/lxd/unix.socket'
-
-# CONFIG_PARAMS is a list of config attribute names.
-CONFIG_PARAMS = [
- 'architecture', 'config', 'devices', 'ephemeral', 'profiles', 'source'
-]
-
-
-class LXDContainerManagement(object):
- def __init__(self, module):
- """Management of LXC containers via Ansible.
-
- :param module: Processed Ansible Module.
- :type module: ``object``
- """
- self.module = module
- self.name = self.module.params['name']
- self._build_config()
-
- self.state = self.module.params['state']
-
- self.timeout = self.module.params['timeout']
- self.wait_for_ipv4_addresses = self.module.params['wait_for_ipv4_addresses']
- self.force_stop = self.module.params['force_stop']
- self.addresses = None
- self.target = self.module.params['target']
- self.wait_for_container = self.module.params['wait_for_container']
-
- self.type = self.module.params['type']
-
- # LXD Rest API provides additional endpoints for creating containers and virtual-machines.
- self.api_endpoint = None
- if self.type == 'container':
- self.api_endpoint = '/1.0/containers'
- elif self.type == 'virtual-machine':
- self.api_endpoint = '/1.0/virtual-machines'
-
- self.key_file = self.module.params.get('client_key')
- if self.key_file is None:
- self.key_file = '{0}/.config/lxc/client.key'.format(os.environ['HOME'])
- self.cert_file = self.module.params.get('client_cert')
- if self.cert_file is None:
- self.cert_file = '{0}/.config/lxc/client.crt'.format(os.environ['HOME'])
- self.debug = self.module._verbosity >= 4
-
- try:
- if self.module.params['url'] != ANSIBLE_LXD_DEFAULT_URL:
- self.url = self.module.params['url']
- elif os.path.exists(self.module.params['snap_url'].replace('unix:', '')):
- self.url = self.module.params['snap_url']
- else:
- self.url = self.module.params['url']
- except Exception as e:
- self.module.fail_json(msg=e.msg)
-
- try:
- self.client = LXDClient(
- self.url, key_file=self.key_file, cert_file=self.cert_file,
- debug=self.debug
- )
- except LXDClientException as e:
- self.module.fail_json(msg=e.msg)
- self.trust_password = self.module.params.get('trust_password', None)
- self.actions = []
-
- def _build_config(self):
- self.config = {}
- for attr in CONFIG_PARAMS:
- param_val = self.module.params.get(attr, None)
- if param_val is not None:
- self.config[attr] = param_val
-
- def _get_instance_json(self):
- return self.client.do(
- 'GET', '{0}/{1}'.format(self.api_endpoint, self.name),
- ok_error_codes=[404]
- )
-
- def _get_instance_state_json(self):
- return self.client.do(
- 'GET', '{0}/{1}/state'.format(self.api_endpoint, self.name),
- ok_error_codes=[404]
- )
-
- @staticmethod
- def _instance_json_to_module_state(resp_json):
- if resp_json['type'] == 'error':
- return 'absent'
- return ANSIBLE_LXD_STATES[resp_json['metadata']['status']]
-
- def _change_state(self, action, force_stop=False):
- body_json = {'action': action, 'timeout': self.timeout}
- if force_stop:
- body_json['force'] = True
- return self.client.do('PUT', '{0}/{1}/state'.format(self.api_endpoint, self.name), body_json=body_json)
-
- def _create_instance(self):
- config = self.config.copy()
- config['name'] = self.name
- if self.target:
- self.client.do('POST', '{0}?{1}'.format(self.api_endpoint, urlencode(dict(target=self.target))), config, wait_for_container=self.wait_for_container)
- else:
- self.client.do('POST', self.api_endpoint, config, wait_for_container=self.wait_for_container)
- self.actions.append('create')
-
- def _start_instance(self):
- self._change_state('start')
- self.actions.append('start')
-
- def _stop_instance(self):
- self._change_state('stop', self.force_stop)
- self.actions.append('stop')
-
- def _restart_instance(self):
- self._change_state('restart', self.force_stop)
- self.actions.append('restart')
-
- def _delete_instance(self):
- self.client.do('DELETE', '{0}/{1}'.format(self.api_endpoint, self.name))
- self.actions.append('delete')
-
- def _freeze_instance(self):
- self._change_state('freeze')
- self.actions.append('freeze')
-
- def _unfreeze_instance(self):
- self._change_state('unfreeze')
- self.actions.append('unfreez')
-
- def _instance_ipv4_addresses(self, ignore_devices=None):
- ignore_devices = ['lo'] if ignore_devices is None else ignore_devices
-
- resp_json = self._get_instance_state_json()
- network = resp_json['metadata']['network'] or {}
- network = dict((k, v) for k, v in network.items() if k not in ignore_devices) or {}
- addresses = dict((k, [a['address'] for a in v['addresses'] if a['family'] == 'inet']) for k, v in network.items()) or {}
- return addresses
-
- @staticmethod
- def _has_all_ipv4_addresses(addresses):
- return len(addresses) > 0 and all(len(v) > 0 for v in addresses.values())
-
- def _get_addresses(self):
- try:
- due = datetime.datetime.now() + datetime.timedelta(seconds=self.timeout)
- while datetime.datetime.now() < due:
- time.sleep(1)
- addresses = self._instance_ipv4_addresses()
- if self._has_all_ipv4_addresses(addresses):
- self.addresses = addresses
- return
- except LXDClientException as e:
- e.msg = 'timeout for getting IPv4 addresses'
- raise
-
- def _started(self):
- if self.old_state == 'absent':
- self._create_instance()
- self._start_instance()
- else:
- if self.old_state == 'frozen':
- self._unfreeze_instance()
- elif self.old_state == 'stopped':
- self._start_instance()
- if self._needs_to_apply_instance_configs():
- self._apply_instance_configs()
- if self.wait_for_ipv4_addresses:
- self._get_addresses()
-
- def _stopped(self):
- if self.old_state == 'absent':
- self._create_instance()
- else:
- if self.old_state == 'stopped':
- if self._needs_to_apply_instance_configs():
- self._start_instance()
- self._apply_instance_configs()
- self._stop_instance()
- else:
- if self.old_state == 'frozen':
- self._unfreeze_instance()
- if self._needs_to_apply_instance_configs():
- self._apply_instance_configs()
- self._stop_instance()
-
- def _restarted(self):
- if self.old_state == 'absent':
- self._create_instance()
- self._start_instance()
- else:
- if self.old_state == 'frozen':
- self._unfreeze_instance()
- if self._needs_to_apply_instance_configs():
- self._apply_instance_configs()
- self._restart_instance()
- if self.wait_for_ipv4_addresses:
- self._get_addresses()
-
- def _destroyed(self):
- if self.old_state != 'absent':
- if self.old_state == 'frozen':
- self._unfreeze_instance()
- if self.old_state != 'stopped':
- self._stop_instance()
- self._delete_instance()
-
- def _frozen(self):
- if self.old_state == 'absent':
- self._create_instance()
- self._start_instance()
- self._freeze_instance()
- else:
- if self.old_state == 'stopped':
- self._start_instance()
- if self._needs_to_apply_instance_configs():
- self._apply_instance_configs()
- self._freeze_instance()
-
- def _needs_to_change_instance_config(self, key):
- if key not in self.config:
- return False
- if key == 'config' and self.ignore_volatile_options: # the old behavior is to ignore configurations by keyword "volatile"
- old_configs = dict((k, v) for k, v in self.old_instance_json['metadata'][key].items() if not k.startswith('volatile.'))
- for k, v in self.config['config'].items():
- if k not in old_configs:
- return True
- if old_configs[k] != v:
- return True
- return False
- elif key == 'config': # next default behavior
- old_configs = dict((k, v) for k, v in self.old_instance_json['metadata'][key].items())
- for k, v in self.config['config'].items():
- if k not in old_configs:
- return True
- if old_configs[k] != v:
- return True
- return False
- else:
- old_configs = self.old_instance_json['metadata'][key]
- return self.config[key] != old_configs
-
- def _needs_to_apply_instance_configs(self):
- return (
- self._needs_to_change_instance_config('architecture') or
- self._needs_to_change_instance_config('config') or
- self._needs_to_change_instance_config('ephemeral') or
- self._needs_to_change_instance_config('devices') or
- self._needs_to_change_instance_config('profiles')
- )
-
- def _apply_instance_configs(self):
- old_metadata = self.old_instance_json['metadata']
- body_json = {
- 'architecture': old_metadata['architecture'],
- 'config': old_metadata['config'],
- 'devices': old_metadata['devices'],
- 'profiles': old_metadata['profiles']
- }
-
- if self._needs_to_change_instance_config('architecture'):
- body_json['architecture'] = self.config['architecture']
- if self._needs_to_change_instance_config('config'):
- for k, v in self.config['config'].items():
- body_json['config'][k] = v
- if self._needs_to_change_instance_config('ephemeral'):
- body_json['ephemeral'] = self.config['ephemeral']
- if self._needs_to_change_instance_config('devices'):
- body_json['devices'] = self.config['devices']
- if self._needs_to_change_instance_config('profiles'):
- body_json['profiles'] = self.config['profiles']
-
- self.client.do('PUT', '{0}/{1}'.format(self.api_endpoint, self.name), body_json=body_json)
- self.actions.append('apply_instance_configs')
-
- def run(self):
- """Run the main method."""
-
- try:
- if self.trust_password is not None:
- self.client.authenticate(self.trust_password)
- self.ignore_volatile_options = self.module.params.get('ignore_volatile_options')
-
- self.old_instance_json = self._get_instance_json()
- self.old_state = self._instance_json_to_module_state(self.old_instance_json)
- action = getattr(self, LXD_ANSIBLE_STATES[self.state])
- action()
-
- state_changed = len(self.actions) > 0
- result_json = {
- 'log_verbosity': self.module._verbosity,
- 'changed': state_changed,
- 'old_state': self.old_state,
- 'actions': self.actions
- }
- if self.client.debug:
- result_json['logs'] = self.client.logs
- if self.addresses is not None:
- result_json['addresses'] = self.addresses
- self.module.exit_json(**result_json)
- except LXDClientException as e:
- state_changed = len(self.actions) > 0
- fail_params = {
- 'msg': e.msg,
- 'changed': state_changed,
- 'actions': self.actions
- }
- if self.client.debug:
- fail_params['logs'] = e.kwargs['logs']
- self.module.fail_json(**fail_params)
-
-
-def main():
- """Ansible Main module."""
-
- module = AnsibleModule(
- argument_spec=dict(
- name=dict(
- type='str',
- required=True
- ),
- architecture=dict(
- type='str',
- ),
- config=dict(
- type='dict',
- ),
- ignore_volatile_options=dict(
- type='bool',
- ),
- devices=dict(
- type='dict',
- ),
- ephemeral=dict(
- type='bool',
- ),
- profiles=dict(
- type='list',
- elements='str',
- ),
- source=dict(
- type='dict',
- ),
- state=dict(
- choices=list(LXD_ANSIBLE_STATES.keys()),
- default='started'
- ),
- target=dict(
- type='str',
- ),
- timeout=dict(
- type='int',
- default=30
- ),
- type=dict(
- type='str',
- default='container',
- choices=['container', 'virtual-machine'],
- ),
- wait_for_container=dict(
- type='bool',
- default=False
- ),
- wait_for_ipv4_addresses=dict(
- type='bool',
- default=False
- ),
- force_stop=dict(
- type='bool',
- default=False
- ),
- url=dict(
- type='str',
- default=ANSIBLE_LXD_DEFAULT_URL
- ),
- snap_url=dict(
- type='str',
- default='unix:/var/snap/lxd/common/lxd/unix.socket'
- ),
- client_key=dict(
- type='path',
- aliases=['key_file']
- ),
- client_cert=dict(
- type='path',
- aliases=['cert_file']
- ),
- trust_password=dict(type='str', no_log=True)
- ),
- supports_check_mode=False,
- )
-
- if module.params['ignore_volatile_options'] is None:
- module.params['ignore_volatile_options'] = True
- module.deprecate(
- 'If the keyword "volatile" is used in a playbook in the config'
- 'section, a "changed" message will appear with every run, even without a change'
- 'to the playbook.'
- 'This will change in the future. Please test your scripts'
- 'by "ignore_volatile_options: false". To keep the old behavior, set that option explicitly to "true"',
- version='6.0.0', collection_name='community.general')
-
- lxd_manage = LXDContainerManagement(module=module)
- lxd_manage.run()
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/lxd/lxd_profile.py b/ansible_collections/community/general/plugins/modules/cloud/lxd/lxd_profile.py
deleted file mode 100644
index 3094898f..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/lxd/lxd_profile.py
+++ /dev/null
@@ -1,518 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2016, Hiroaki Nakamura
-# Copyright: (c) 2020, Frank Dornheim
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-DOCUMENTATION = '''
----
-module: lxd_profile
-short_description: Manage LXD profiles
-description:
- - Management of LXD profiles
-author: "Hiroaki Nakamura (@hnakamur)"
-options:
- name:
- description:
- - Name of a profile.
- required: true
- type: str
- description:
- description:
- - Description of the profile.
- type: str
- config:
- description:
- - 'The config for the container (e.g. {"limits.memory": "4GB"}).
- See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#patch-3)'
- - If the profile already exists and its "config" value in metadata
- obtained from
- GET /1.0/profiles/
- U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#get-19)
- are different, they this module tries to apply the configurations.
- - Not all config values are supported to apply the existing profile.
- Maybe you need to delete and recreate a profile.
- required: false
- type: dict
- devices:
- description:
- - 'The devices for the profile
- (e.g. {"rootfs": {"path": "/dev/kvm", "type": "unix-char"}).
- See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#patch-3)'
- required: false
- type: dict
- new_name:
- description:
- - A new name of a profile.
- - If this parameter is specified a profile will be renamed to this name.
- See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-11)
- required: false
- type: str
- merge_profile:
- description:
- - Merge the configuration of the present profile with the new desired configuration,
- instead of replacing it.
- required: false
- default: false
- type: bool
- version_added: 2.1.0
- state:
- choices:
- - present
- - absent
- description:
- - Define the state of a profile.
- required: false
- default: present
- type: str
- url:
- description:
- - The unix domain socket path or the https URL for the LXD server.
- required: false
- default: unix:/var/lib/lxd/unix.socket
- type: str
- snap_url:
- description:
- - The unix domain socket path when LXD is installed by snap package manager.
- required: false
- default: unix:/var/snap/lxd/common/lxd/unix.socket
- type: str
- client_key:
- description:
- - The client certificate key file path.
- - If not specified, it defaults to C($HOME/.config/lxc/client.key).
- required: false
- aliases: [ key_file ]
- type: path
- client_cert:
- description:
- - The client certificate file path.
- - If not specified, it defaults to C($HOME/.config/lxc/client.crt).
- required: false
- aliases: [ cert_file ]
- type: path
- trust_password:
- description:
- - The client trusted password.
- - You need to set this password on the LXD server before
- running this module using the following command.
- lxc config set core.trust_password
- See U(https://www.stgraber.org/2016/04/18/lxd-api-direct-interaction/)
- - If trust_password is set, this module send a request for
- authentication before sending any requests.
- required: false
- type: str
-notes:
- - Profiles must have a unique name. If you attempt to create a profile
- with a name that already existed in the users namespace the module will
- simply return as "unchanged".
-'''
-
-EXAMPLES = '''
-# An example for creating a profile
-- hosts: localhost
- connection: local
- tasks:
- - name: Create a profile
- community.general.lxd_profile:
- name: macvlan
- state: present
- config: {}
- description: my macvlan profile
- devices:
- eth0:
- nictype: macvlan
- parent: br0
- type: nic
-
-# An example for creating a profile via http connection
-- hosts: localhost
- connection: local
- tasks:
- - name: Create macvlan profile
- community.general.lxd_profile:
- url: https://127.0.0.1:8443
- # These client_cert and client_key values are equal to the default values.
- #client_cert: "{{ lookup('env', 'HOME') }}/.config/lxc/client.crt"
- #client_key: "{{ lookup('env', 'HOME') }}/.config/lxc/client.key"
- trust_password: mypassword
- name: macvlan
- state: present
- config: {}
- description: my macvlan profile
- devices:
- eth0:
- nictype: macvlan
- parent: br0
- type: nic
-
-# An example for modify/merge a profile
-- hosts: localhost
- connection: local
- tasks:
- - name: Merge a profile
- community.general.lxd_profile:
- merge_profile: true
- name: macvlan
- state: present
- config: {}
- description: my macvlan profile
- devices:
- eth0:
- nictype: macvlan
- parent: br0
- type: nic
-
-# An example for deleting a profile
-- hosts: localhost
- connection: local
- tasks:
- - name: Delete a profile
- community.general.lxd_profile:
- name: macvlan
- state: absent
-
-# An example for renaming a profile
-- hosts: localhost
- connection: local
- tasks:
- - name: Rename a profile
- community.general.lxd_profile:
- name: macvlan
- new_name: macvlan2
- state: present
-'''
-
-RETURN = '''
-old_state:
- description: The old state of the profile
- returned: success
- type: str
- sample: "absent"
-logs:
- description: The logs of requests and responses.
- returned: when ansible-playbook is invoked with -vvvv.
- type: list
- sample: "(too long to be placed here)"
-actions:
- description: List of actions performed for the profile.
- returned: success
- type: list
- sample: '["create"]'
-'''
-
-import os
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.lxd import LXDClient, LXDClientException
-
-# ANSIBLE_LXD_DEFAULT_URL is a default value of the lxd endpoint
-ANSIBLE_LXD_DEFAULT_URL = 'unix:/var/lib/lxd/unix.socket'
-
-# PROFILE_STATES is a list for states supported
-PROFILES_STATES = [
- 'present', 'absent'
-]
-
-# CONFIG_PARAMS is a list of config attribute names.
-CONFIG_PARAMS = [
- 'config', 'description', 'devices'
-]
-
-
-class LXDProfileManagement(object):
- def __init__(self, module):
- """Management of LXC containers via Ansible.
-
- :param module: Processed Ansible Module.
- :type module: ``object``
- """
- self.module = module
- self.name = self.module.params['name']
- self._build_config()
- self.state = self.module.params['state']
- self.new_name = self.module.params.get('new_name', None)
-
- self.key_file = self.module.params.get('client_key')
- if self.key_file is None:
- self.key_file = '{0}/.config/lxc/client.key'.format(os.environ['HOME'])
- self.cert_file = self.module.params.get('client_cert')
- if self.cert_file is None:
- self.cert_file = '{0}/.config/lxc/client.crt'.format(os.environ['HOME'])
- self.debug = self.module._verbosity >= 4
-
- try:
- if self.module.params['url'] != ANSIBLE_LXD_DEFAULT_URL:
- self.url = self.module.params['url']
- elif os.path.exists(self.module.params['snap_url'].replace('unix:', '')):
- self.url = self.module.params['snap_url']
- else:
- self.url = self.module.params['url']
- except Exception as e:
- self.module.fail_json(msg=e.msg)
-
- try:
- self.client = LXDClient(
- self.url, key_file=self.key_file, cert_file=self.cert_file,
- debug=self.debug
- )
- except LXDClientException as e:
- self.module.fail_json(msg=e.msg)
- self.trust_password = self.module.params.get('trust_password', None)
- self.actions = []
-
- def _build_config(self):
- self.config = {}
- for attr in CONFIG_PARAMS:
- param_val = self.module.params.get(attr, None)
- if param_val is not None:
- self.config[attr] = param_val
-
- def _get_profile_json(self):
- return self.client.do(
- 'GET', '/1.0/profiles/{0}'.format(self.name),
- ok_error_codes=[404]
- )
-
- @staticmethod
- def _profile_json_to_module_state(resp_json):
- if resp_json['type'] == 'error':
- return 'absent'
- return 'present'
-
- def _update_profile(self):
- if self.state == 'present':
- if self.old_state == 'absent':
- if self.new_name is None:
- self._create_profile()
- else:
- self.module.fail_json(
- msg='new_name must not be set when the profile does not exist and the state is present',
- changed=False)
- else:
- if self.new_name is not None and self.new_name != self.name:
- self._rename_profile()
- if self._needs_to_apply_profile_configs():
- self._apply_profile_configs()
- elif self.state == 'absent':
- if self.old_state == 'present':
- if self.new_name is None:
- self._delete_profile()
- else:
- self.module.fail_json(
- msg='new_name must not be set when the profile exists and the specified state is absent',
- changed=False)
-
- def _create_profile(self):
- config = self.config.copy()
- config['name'] = self.name
- self.client.do('POST', '/1.0/profiles', config)
- self.actions.append('create')
-
- def _rename_profile(self):
- config = {'name': self.new_name}
- self.client.do('POST', '/1.0/profiles/{0}'.format(self.name), config)
- self.actions.append('rename')
- self.name = self.new_name
-
- def _needs_to_change_profile_config(self, key):
- if key not in self.config:
- return False
- old_configs = self.old_profile_json['metadata'].get(key, None)
- return self.config[key] != old_configs
-
- def _needs_to_apply_profile_configs(self):
- return (
- self._needs_to_change_profile_config('config') or
- self._needs_to_change_profile_config('description') or
- self._needs_to_change_profile_config('devices')
- )
-
- def _merge_dicts(self, source, destination):
- """Merge Dictionarys
-
- Get a list of filehandle numbers from logger to be handed to
- DaemonContext.files_preserve
-
- Args:
- dict(source): source dict
- dict(destination): destination dict
- Kwargs:
- None
- Raises:
- None
- Returns:
- dict(destination): merged dict"""
- for key, value in source.items():
- if isinstance(value, dict):
- # get node or create one
- node = destination.setdefault(key, {})
- self._merge_dicts(value, node)
- else:
- destination[key] = value
- return destination
-
- def _merge_config(self, config):
- """ merge profile
-
- Merge Configuration of the present profile and the new desired configitems
-
- Args:
- dict(config): Dict with the old config in 'metadata' and new config in 'config'
- Kwargs:
- None
- Raises:
- None
- Returns:
- dict(config): new config"""
- # merge or copy the sections from the existing profile to 'config'
- for item in ['config', 'description', 'devices', 'name', 'used_by']:
- if item in config:
- config[item] = self._merge_dicts(config['metadata'][item], config[item])
- else:
- config[item] = config['metadata'][item]
- # merge or copy the sections from the ansible-task to 'config'
- return self._merge_dicts(self.config, config)
-
- def _generate_new_config(self, config):
- """ rebuild profile
-
- Rebuild the Profile by the configuration provided in the play.
- Existing configurations are discarded.
-
- This ist the default behavior.
-
- Args:
- dict(config): Dict with the old config in 'metadata' and new config in 'config'
- Kwargs:
- None
- Raises:
- None
- Returns:
- dict(config): new config"""
- for k, v in self.config.items():
- config[k] = v
- return config
-
- def _apply_profile_configs(self):
- """ Selection of the procedure: rebuild or merge
-
- The standard behavior is that all information not contained
- in the play is discarded.
-
- If "merge_profile" is provides in the play and "True", then existing
- configurations from the profile and new ones defined are merged.
-
- Args:
- None
- Kwargs:
- None
- Raises:
- None
- Returns:
- None"""
- config = self.old_profile_json.copy()
- if self.module.params['merge_profile']:
- config = self._merge_config(config)
- else:
- config = self._generate_new_config(config)
-
- # upload config to lxd
- self.client.do('PUT', '/1.0/profiles/{0}'.format(self.name), config)
- self.actions.append('apply_profile_configs')
-
- def _delete_profile(self):
- self.client.do('DELETE', '/1.0/profiles/{0}'.format(self.name))
- self.actions.append('delete')
-
- def run(self):
- """Run the main method."""
-
- try:
- if self.trust_password is not None:
- self.client.authenticate(self.trust_password)
-
- self.old_profile_json = self._get_profile_json()
- self.old_state = self._profile_json_to_module_state(self.old_profile_json)
- self._update_profile()
-
- state_changed = len(self.actions) > 0
- result_json = {
- 'changed': state_changed,
- 'old_state': self.old_state,
- 'actions': self.actions
- }
- if self.client.debug:
- result_json['logs'] = self.client.logs
- self.module.exit_json(**result_json)
- except LXDClientException as e:
- state_changed = len(self.actions) > 0
- fail_params = {
- 'msg': e.msg,
- 'changed': state_changed,
- 'actions': self.actions
- }
- if self.client.debug:
- fail_params['logs'] = e.kwargs['logs']
- self.module.fail_json(**fail_params)
-
-
-def main():
- """Ansible Main module."""
-
- module = AnsibleModule(
- argument_spec=dict(
- name=dict(
- type='str',
- required=True
- ),
- new_name=dict(
- type='str',
- ),
- config=dict(
- type='dict',
- ),
- description=dict(
- type='str',
- ),
- devices=dict(
- type='dict',
- ),
- merge_profile=dict(
- type='bool',
- default=False
- ),
- state=dict(
- choices=PROFILES_STATES,
- default='present'
- ),
- url=dict(
- type='str',
- default=ANSIBLE_LXD_DEFAULT_URL
- ),
- snap_url=dict(
- type='str',
- default='unix:/var/snap/lxd/common/lxd/unix.socket'
- ),
- client_key=dict(
- type='path',
- aliases=['key_file']
- ),
- client_cert=dict(
- type='path',
- aliases=['cert_file']
- ),
- trust_password=dict(type='str', no_log=True)
- ),
- supports_check_mode=False,
- )
-
- lxd_manage = LXDProfileManagement(module=module)
- lxd_manage.run()
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/memset/memset_dns_reload.py b/ansible_collections/community/general/plugins/modules/cloud/memset/memset_dns_reload.py
deleted file mode 100644
index 6eefe133..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/memset/memset_dns_reload.py
+++ /dev/null
@@ -1,183 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright (c) 2018, Simon Weald
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-DOCUMENTATION = '''
----
-module: memset_dns_reload
-author: "Simon Weald (@glitchcrab)"
-short_description: Request reload of Memset's DNS infrastructure,
-notes:
- - DNS reload requests are a best-effort service provided by Memset; these generally
- happen every 15 minutes by default, however you can request an immediate reload if
- later tasks rely on the records being created. An API key generated via the
- Memset customer control panel is required with the following minimum scope -
- I(dns.reload). If you wish to poll the job status to wait until the reload has
- completed, then I(job.status) is also required.
-description:
- - Request a reload of Memset's DNS infrastructure, and optionally poll until it finishes.
-options:
- api_key:
- required: true
- type: str
- description:
- - The API key obtained from the Memset control panel.
- poll:
- default: false
- type: bool
- description:
- - Boolean value, if set will poll the reload job's status and return
- when the job has completed (unless the 30 second timeout is reached first).
- If the timeout is reached then the task will not be marked as failed, but
- stderr will indicate that the polling failed.
-'''
-
-EXAMPLES = '''
-- name: Submit DNS reload and poll
- community.general.memset_dns_reload:
- api_key: 5eb86c9196ab03919abcf03857163741
- poll: True
- delegate_to: localhost
-'''
-
-RETURN = '''
----
-memset_api:
- description: Raw response from the Memset API.
- returned: always
- type: complex
- contains:
- error:
- description: Whether the job ended in error state.
- returned: always
- type: bool
- sample: true
- finished:
- description: Whether the job completed before the result was returned.
- returned: always
- type: bool
- sample: true
- id:
- description: Job ID.
- returned: always
- type: str
- sample: "c9cc8ad2a3e3fb8c63ed83c424928ef8"
- status:
- description: Job status.
- returned: always
- type: str
- sample: "DONE"
- type:
- description: Job type.
- returned: always
- type: str
- sample: "dns"
-'''
-
-from time import sleep
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.memset import memset_api_call
-
-
-def poll_reload_status(api_key=None, job_id=None, payload=None):
- '''
- We poll the `job.status` endpoint every 5 seconds up to a
- maximum of 6 times. This is a relatively arbitrary choice of
- timeout, however requests rarely take longer than 15 seconds
- to complete.
- '''
- memset_api, stderr, msg = None, None, None
- payload['id'] = job_id
-
- api_method = 'job.status'
- _has_failed, _msg, response = memset_api_call(api_key=api_key, api_method=api_method, payload=payload)
-
- while not response.json()['finished']:
- counter = 0
- while counter < 6:
- sleep(5)
- _has_failed, msg, response = memset_api_call(api_key=api_key, api_method=api_method, payload=payload)
- counter += 1
- if response.json()['error']:
- # the reload job was submitted but polling failed. Don't return this as an overall task failure.
- stderr = "Reload submitted successfully, but the Memset API returned a job error when attempting to poll the reload status."
- else:
- memset_api = response.json()
- msg = None
-
- return(memset_api, msg, stderr)
-
-
-def reload_dns(args=None):
- '''
- DNS reloads are a single API call and therefore there's not much
- which can go wrong outside of auth errors.
- '''
- retvals, payload = dict(), dict()
- has_changed, has_failed = False, False
- memset_api, msg, stderr = None, None, None
-
- api_method = 'dns.reload'
- has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method)
-
- if has_failed:
- # this is the first time the API is called; incorrect credentials will
- # manifest themselves at this point so we need to ensure the user is
- # informed of the reason.
- retvals['failed'] = has_failed
- retvals['memset_api'] = response.json()
- retvals['msg'] = msg
- return(retvals)
-
- # set changed to true if the reload request was accepted.
- has_changed = True
- memset_api = msg
- # empty msg var as we don't want to return the API's json response twice.
- msg = None
-
- if args['poll']:
- # hand off to the poll function.
- job_id = response.json()['id']
- memset_api, msg, stderr = poll_reload_status(api_key=args['api_key'], job_id=job_id, payload=payload)
-
- # assemble return variables.
- retvals['failed'] = has_failed
- retvals['changed'] = has_changed
- for val in ['msg', 'stderr', 'memset_api']:
- if val is not None:
- retvals[val] = eval(val)
-
- return(retvals)
-
-
-def main():
- global module
- module = AnsibleModule(
- argument_spec=dict(
- api_key=dict(required=True, type='str', no_log=True),
- poll=dict(required=False, default=False, type='bool')
- ),
- supports_check_mode=False
- )
-
- # populate the dict with the user-provided vars.
- args = dict()
- for key, arg in module.params.items():
- args[key] = arg
-
- retvals = reload_dns(args)
-
- if retvals['failed']:
- module.fail_json(**retvals)
- else:
- module.exit_json(**retvals)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/memset/memset_memstore_info.py b/ansible_collections/community/general/plugins/modules/cloud/memset/memset_memstore_info.py
deleted file mode 100644
index e880b460..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/memset/memset_memstore_info.py
+++ /dev/null
@@ -1,169 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright (c) 2018, Simon Weald
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-DOCUMENTATION = '''
----
-module: memset_memstore_info
-author: "Simon Weald (@glitchcrab)"
-short_description: Retrieve Memstore product usage information.
-notes:
- - An API key generated via the Memset customer control panel is needed with the
- following minimum scope - I(memstore.usage).
-description:
- - Retrieve Memstore product usage information.
- - This module was called C(memset_memstore_facts) before Ansible 2.9. The usage did not change.
-options:
- api_key:
- required: true
- type: str
- description:
- - The API key obtained from the Memset control panel.
- name:
- required: true
- type: str
- description:
- - The Memstore product name (i.e. C(mstestyaa1)).
-'''
-
-EXAMPLES = '''
-- name: Get usage for mstestyaa1
- community.general.memset_memstore_info:
- name: mstestyaa1
- api_key: 5eb86c9896ab03919abcf03857163741
- delegate_to: localhost
-'''
-
-RETURN = '''
----
-memset_api:
- description: Info from the Memset API
- returned: always
- type: complex
- contains:
- cdn_bandwidth:
- description: Dictionary of CDN bandwidth facts
- returned: always
- type: complex
- contains:
- bytes_out:
- description: Outbound CDN bandwidth for the last 24 hours in bytes
- returned: always
- type: int
- sample: 1000
- requests:
- description: Number of requests in the last 24 hours
- returned: always
- type: int
- sample: 10
- bytes_in:
- description: Inbound CDN bandwidth for the last 24 hours in bytes
- returned: always
- type: int
- sample: 1000
- containers:
- description: Number of containers
- returned: always
- type: int
- sample: 10
- bytes:
- description: Space used in bytes
- returned: always
- type: int
- sample: 3860997965
- objs:
- description: Number of objects
- returned: always
- type: int
- sample: 1000
- bandwidth:
- description: Dictionary of CDN bandwidth facts
- returned: always
- type: complex
- contains:
- bytes_out:
- description: Outbound bandwidth for the last 24 hours in bytes
- returned: always
- type: int
- sample: 1000
- requests:
- description: Number of requests in the last 24 hours
- returned: always
- type: int
- sample: 10
- bytes_in:
- description: Inbound bandwidth for the last 24 hours in bytes
- returned: always
- type: int
- sample: 1000
-'''
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.memset import memset_api_call
-
-
-def get_facts(args=None):
- '''
- Performs a simple API call and returns a JSON blob.
- '''
- retvals, payload = dict(), dict()
- has_changed, has_failed = False, False
- msg, stderr, memset_api = None, None, None
-
- payload['name'] = args['name']
-
- api_method = 'memstore.usage'
- has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
-
- if has_failed:
- # this is the first time the API is called; incorrect credentials will
- # manifest themselves at this point so we need to ensure the user is
- # informed of the reason.
- retvals['failed'] = has_failed
- retvals['msg'] = msg
- retvals['stderr'] = "API returned an error: {0}" . format(response.status_code)
- return(retvals)
-
- # we don't want to return the same thing twice
- msg = None
- memset_api = response.json()
-
- retvals['changed'] = has_changed
- retvals['failed'] = has_failed
- for val in ['msg', 'memset_api']:
- if val is not None:
- retvals[val] = eval(val)
-
- return(retvals)
-
-
-def main():
- global module
- module = AnsibleModule(
- argument_spec=dict(
- api_key=dict(required=True, type='str', no_log=True),
- name=dict(required=True, type='str')
- ),
- supports_check_mode=True,
- )
-
- # populate the dict with the user-provided vars.
- args = dict()
- for key, arg in module.params.items():
- args[key] = arg
-
- retvals = get_facts(args)
-
- if retvals['failed']:
- module.fail_json(**retvals)
- else:
- module.exit_json(**retvals)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/memset/memset_server_info.py b/ansible_collections/community/general/plugins/modules/cloud/memset/memset_server_info.py
deleted file mode 100644
index 853e2c88..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/memset/memset_server_info.py
+++ /dev/null
@@ -1,294 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright (c) 2018, Simon Weald
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-DOCUMENTATION = '''
----
-module: memset_server_info
-author: "Simon Weald (@glitchcrab)"
-short_description: Retrieve server information.
-notes:
- - An API key generated via the Memset customer control panel is needed with the
- following minimum scope - I(server.info).
-description:
- - Retrieve server information.
- - This module was called C(memset_server_facts) before Ansible 2.9. The usage did not change.
-options:
- api_key:
- required: true
- type: str
- description:
- - The API key obtained from the Memset control panel.
- name:
- required: true
- type: str
- description:
- - The server product name (i.e. C(testyaa1)).
-'''
-
-EXAMPLES = '''
-- name: Get details for testyaa1
- community.general.memset_server_info:
- name: testyaa1
- api_key: 5eb86c9896ab03919abcf03857163741
- delegate_to: localhost
-'''
-
-RETURN = '''
----
-memset_api:
- description: Info from the Memset API
- returned: always
- type: complex
- contains:
- backups:
- description: Whether this server has a backup service.
- returned: always
- type: bool
- sample: true
- control_panel:
- description: Whether the server has a control panel (i.e. cPanel).
- returned: always
- type: str
- sample: 'cpanel'
- data_zone:
- description: The data zone the server is in.
- returned: always
- type: str
- sample: 'Memset Public Cloud'
- expiry_date:
- description: Current expiry date of the server.
- returned: always
- type: str
- sample: '2018-08-10'
- firewall_rule_group:
- description: Details about the firewall group this server is in.
- returned: always
- type: dict
- sample: {
- "default_outbound_policy": "RETURN",
- "name": "testyaa-fw1",
- "nickname": "testyaa cPanel rules",
- "notes": "",
- "public": false,
- "rules": {
- "51d7db54d39c3544ef7c48baa0b9944f": {
- "action": "ACCEPT",
- "comment": "",
- "dest_ip6s": "any",
- "dest_ips": "any",
- "dest_ports": "any",
- "direction": "Inbound",
- "ip_version": "any",
- "ordering": 2,
- "protocols": "icmp",
- "rule_group_name": "testyaa-fw1",
- "rule_id": "51d7db54d39c3544ef7c48baa0b9944f",
- "source_ip6s": "any",
- "source_ips": "any",
- "source_ports": "any"
- }
- }
- }
- firewall_type:
- description: The type of firewall the server has (i.e. self-managed, managed).
- returned: always
- type: str
- sample: 'managed'
- host_name:
- description: The server's hostname.
- returned: always
- type: str
- sample: 'testyaa1.miniserver.com'
- ignore_monitoring_off:
- description: When true, Memset won't remind the customer that monitoring is disabled.
- returned: always
- type: bool
- sample: true
- ips:
- description: List of dictionaries of all IP addresses assigned to the server.
- returned: always
- type: list
- sample: [
- {
- "address": "1.2.3.4",
- "bytes_in_today": 1000.0,
- "bytes_in_yesterday": 2000.0,
- "bytes_out_today": 1000.0,
- "bytes_out_yesterday": 2000.0
- }
- ]
- monitor:
- description: Whether the server has monitoring enabled.
- returned: always
- type: bool
- sample: true
- monitoring_level:
- description: The server's monitoring level (i.e. basic).
- returned: always
- type: str
- sample: 'basic'
- name:
- description: Server name (same as the service name).
- returned: always
- type: str
- sample: 'testyaa1'
- network_zones:
- description: The network zone(s) the server is in.
- returned: always
- type: list
- sample: [ 'reading' ]
- nickname:
- description: Customer-set nickname for the server.
- returned: always
- type: str
- sample: 'database server'
- no_auto_reboot:
- description: Whether or not to reboot the server if monitoring detects it down.
- returned: always
- type: bool
- sample: true
- no_nrpe:
- description: Whether Memset should use NRPE to monitor this server.
- returned: always
- type: bool
- sample: true
- os:
- description: The server's Operating System.
- returned: always
- type: str
- sample: 'debian_stretch_64'
- penetration_patrol:
- description: Intrusion detection support level for this server.
- returned: always
- type: str
- sample: 'managed'
- penetration_patrol_alert_level:
- description: The alert level at which notifications are sent.
- returned: always
- type: int
- sample: 10
- primary_ip:
- description: Server's primary IP.
- returned: always
- type: str
- sample: '1.2.3.4'
- renewal_price_amount:
- description: Renewal cost for the server.
- returned: always
- type: str
- sample: '30.00'
- renewal_price_currency:
- description: Currency for renewal payments.
- returned: always
- type: str
- sample: 'GBP'
- renewal_price_vat:
- description: VAT rate for renewal payments
- returned: always
- type: str
- sample: '20'
- start_date:
- description: Server's start date.
- returned: always
- type: str
- sample: '2013-04-10'
- status:
- description: Current status of the server (i.e. live, onhold).
- returned: always
- type: str
- sample: 'LIVE'
- support_level:
- description: Support level included with the server.
- returned: always
- type: str
- sample: 'managed'
- type:
- description: What this server is (i.e. dedicated)
- returned: always
- type: str
- sample: 'miniserver'
- vlans:
- description: Dictionary of tagged and untagged VLANs this server is in.
- returned: always
- type: dict
- sample: {
- tagged: [],
- untagged: [ 'testyaa-vlan1', 'testyaa-vlan2' ]
- }
- vulnscan:
- description: Vulnerability scanning level.
- returned: always
- type: str
- sample: 'basic'
-'''
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.memset import memset_api_call
-
-
-def get_facts(args=None):
- '''
- Performs a simple API call and returns a JSON blob.
- '''
- retvals, payload = dict(), dict()
- has_changed, has_failed = False, False
- msg, stderr, memset_api = None, None, None
-
- payload['name'] = args['name']
-
- api_method = 'server.info'
- has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
-
- if has_failed:
- # this is the first time the API is called; incorrect credentials will
- # manifest themselves at this point so we need to ensure the user is
- # informed of the reason.
- retvals['failed'] = has_failed
- retvals['msg'] = msg
- retvals['stderr'] = "API returned an error: {0}" . format(response.status_code)
- return(retvals)
-
- # we don't want to return the same thing twice
- msg = None
- memset_api = response.json()
-
- retvals['changed'] = has_changed
- retvals['failed'] = has_failed
- for val in ['msg', 'memset_api']:
- if val is not None:
- retvals[val] = eval(val)
-
- return(retvals)
-
-
-def main():
- global module
- module = AnsibleModule(
- argument_spec=dict(
- api_key=dict(required=True, type='str', no_log=True),
- name=dict(required=True, type='str')
- ),
- supports_check_mode=True,
- )
-
- # populate the dict with the user-provided vars.
- args = dict()
- for key, arg in module.params.items():
- args[key] = arg
-
- retvals = get_facts(args)
-
- if retvals['failed']:
- module.fail_json(**retvals)
- else:
- module.exit_json(**retvals)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/memset/memset_zone.py b/ansible_collections/community/general/plugins/modules/cloud/memset/memset_zone.py
deleted file mode 100644
index 9ef798bd..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/memset/memset_zone.py
+++ /dev/null
@@ -1,311 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright (c) 2018, Simon Weald
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-DOCUMENTATION = '''
----
-module: memset_zone
-author: "Simon Weald (@glitchcrab)"
-short_description: Creates and deletes Memset DNS zones.
-notes:
- - Zones can be thought of as a logical group of domains, all of which share the
- same DNS records (i.e. they point to the same IP). An API key generated via the
- Memset customer control panel is needed with the following minimum scope -
- I(dns.zone_create), I(dns.zone_delete), I(dns.zone_list).
-description:
- - Manage DNS zones in a Memset account.
-options:
- state:
- required: true
- description:
- - Indicates desired state of resource.
- type: str
- choices: [ absent, present ]
- api_key:
- required: true
- description:
- - The API key obtained from the Memset control panel.
- type: str
- name:
- required: true
- description:
- - The zone nickname; usually the same as the main domain. Ensure this
- value has at most 250 characters.
- type: str
- aliases: [ nickname ]
- ttl:
- description:
- - The default TTL for all records created in the zone. This must be a
- valid int from U(https://www.memset.com/apidocs/methods_dns.html#dns.zone_create).
- type: int
- choices: [ 0, 300, 600, 900, 1800, 3600, 7200, 10800, 21600, 43200, 86400 ]
- force:
- required: false
- default: false
- type: bool
- description:
- - Forces deletion of a zone and all zone domains/zone records it contains.
-'''
-
-EXAMPLES = '''
-# Create the zone 'test'
-- name: Create zone
- community.general.memset_zone:
- name: test
- state: present
- api_key: 5eb86c9196ab03919abcf03857163741
- ttl: 300
- delegate_to: localhost
-
-# Force zone deletion
-- name: Force delete zone
- community.general.memset_zone:
- name: test
- state: absent
- api_key: 5eb86c9196ab03919abcf03857163741
- force: true
- delegate_to: localhost
-'''
-
-RETURN = '''
-memset_api:
- description: Zone info from the Memset API
- returned: when state == present
- type: complex
- contains:
- domains:
- description: List of domains in this zone
- returned: always
- type: list
- sample: []
- id:
- description: Zone id
- returned: always
- type: str
- sample: "b0bb1ce851aeea6feeb2dc32fe83bf9c"
- nickname:
- description: Zone name
- returned: always
- type: str
- sample: "example.com"
- records:
- description: List of DNS records for domains in this zone
- returned: always
- type: list
- sample: []
- ttl:
- description: Default TTL for domains in this zone
- returned: always
- type: int
- sample: 300
-'''
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.memset import check_zone
-from ansible_collections.community.general.plugins.module_utils.memset import get_zone_id
-from ansible_collections.community.general.plugins.module_utils.memset import memset_api_call
-
-
-def api_validation(args=None):
- '''
- Perform some validation which will be enforced by Memset's API (see:
- https://www.memset.com/apidocs/methods_dns.html#dns.zone_record_create)
- '''
- # zone domain length must be less than 250 chars.
- if len(args['name']) > 250:
- stderr = 'Zone name must be less than 250 characters in length.'
- module.fail_json(failed=True, msg=stderr, stderr=stderr)
-
-
-def check(args=None):
- '''
- Support for running with check mode.
- '''
- retvals = dict()
-
- api_method = 'dns.zone_list'
- has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method)
-
- zone_exists, counter = check_zone(data=response, name=args['name'])
-
- # set changed to true if the operation would cause a change.
- has_changed = ((zone_exists and args['state'] == 'absent') or (not zone_exists and args['state'] == 'present'))
-
- retvals['changed'] = has_changed
- retvals['failed'] = has_failed
-
- return(retvals)
-
-
-def create_zone(args=None, zone_exists=None, payload=None):
- '''
- At this point we already know whether the zone exists, so we
- just need to make the API reflect the desired state.
- '''
- has_changed, has_failed = False, False
- msg, memset_api = None, None
-
- if not zone_exists:
- payload['ttl'] = args['ttl']
- payload['nickname'] = args['name']
- api_method = 'dns.zone_create'
- has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
- if not has_failed:
- has_changed = True
- else:
- api_method = 'dns.zone_list'
- _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method)
- for zone in response.json():
- if zone['nickname'] == args['name']:
- break
- if zone['ttl'] != args['ttl']:
- # update the zone if the desired TTL is different.
- payload['id'] = zone['id']
- payload['ttl'] = args['ttl']
- api_method = 'dns.zone_update'
- has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
- if not has_failed:
- has_changed = True
-
- # populate return var with zone info.
- api_method = 'dns.zone_list'
- _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method)
-
- zone_exists, msg, counter, zone_id = get_zone_id(zone_name=args['name'], current_zones=response.json())
-
- if zone_exists:
- payload = dict()
- payload['id'] = zone_id
- api_method = 'dns.zone_info'
- _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
- memset_api = response.json()
-
- return(has_failed, has_changed, memset_api, msg)
-
-
-def delete_zone(args=None, zone_exists=None, payload=None):
- '''
- Deletion requires extra sanity checking as the zone cannot be
- deleted if it contains domains or records. Setting force=true
- will override this behaviour.
- '''
- has_changed, has_failed = False, False
- msg, memset_api = None, None
-
- if zone_exists:
- api_method = 'dns.zone_list'
- _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
- counter = 0
- for zone in response.json():
- if zone['nickname'] == args['name']:
- counter += 1
- if counter == 1:
- for zone in response.json():
- if zone['nickname'] == args['name']:
- zone_id = zone['id']
- domain_count = len(zone['domains'])
- record_count = len(zone['records'])
- if (domain_count > 0 or record_count > 0) and args['force'] is False:
- # we need to fail out if force was not explicitly set.
- stderr = 'Zone contains domains or records and force was not used.'
- has_failed = True
- has_changed = False
- module.fail_json(failed=has_failed, changed=has_changed, msg=msg, stderr=stderr, rc=1)
- api_method = 'dns.zone_delete'
- payload['id'] = zone_id
- has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
- if not has_failed:
- has_changed = True
- # return raw JSON from API in named var and then unset msg var so we aren't returning the same thing twice.
- memset_api = msg
- msg = None
- else:
- # zone names are not unique, so we cannot safely delete the requested
- # zone at this time.
- has_failed = True
- has_changed = False
- msg = 'Unable to delete zone as multiple zones with the same name exist.'
- else:
- has_failed, has_changed = False, False
-
- return(has_failed, has_changed, memset_api, msg)
-
-
-def create_or_delete(args=None):
- '''
- We need to perform some initial sanity checking and also look
- up required info before handing it off to create or delete.
- '''
- retvals, payload = dict(), dict()
- has_failed, has_changed = False, False
- msg, memset_api, stderr = None, None, None
-
- # get the zones and check if the relevant zone exists.
- api_method = 'dns.zone_list'
- _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method)
- if _has_failed:
- # this is the first time the API is called; incorrect credentials will
- # manifest themselves at this point so we need to ensure the user is
- # informed of the reason.
- retvals['failed'] = _has_failed
- retvals['msg'] = _msg
-
- return(retvals)
-
- zone_exists, _msg, counter, _zone_id = get_zone_id(zone_name=args['name'], current_zones=response.json())
-
- if args['state'] == 'present':
- has_failed, has_changed, memset_api, msg = create_zone(args=args, zone_exists=zone_exists, payload=payload)
-
- elif args['state'] == 'absent':
- has_failed, has_changed, memset_api, msg = delete_zone(args=args, zone_exists=zone_exists, payload=payload)
-
- retvals['failed'] = has_failed
- retvals['changed'] = has_changed
- for val in ['msg', 'stderr', 'memset_api']:
- if val is not None:
- retvals[val] = eval(val)
-
- return(retvals)
-
-
-def main():
- global module
- module = AnsibleModule(
- argument_spec=dict(
- state=dict(required=True, choices=['present', 'absent'], type='str'),
- api_key=dict(required=True, type='str', no_log=True),
- name=dict(required=True, aliases=['nickname'], type='str'),
- ttl=dict(required=False, default=0, choices=[0, 300, 600, 900, 1800, 3600, 7200, 10800, 21600, 43200, 86400], type='int'),
- force=dict(required=False, default=False, type='bool')
- ),
- supports_check_mode=True
- )
-
- # populate the dict with the user-provided vars.
- args = dict()
- for key, arg in module.params.items():
- args[key] = arg
- args['check_mode'] = module.check_mode
-
- # validate some API-specific limitations.
- api_validation(args=args)
-
- if module.check_mode:
- retvals = check(args)
- else:
- retvals = create_or_delete(args)
-
- if retvals['failed']:
- module.fail_json(**retvals)
- else:
- module.exit_json(**retvals)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/memset/memset_zone_domain.py b/ansible_collections/community/general/plugins/modules/cloud/memset/memset_zone_domain.py
deleted file mode 100644
index 4aa0eada..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/memset/memset_zone_domain.py
+++ /dev/null
@@ -1,266 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright (c) 2018, Simon Weald
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-DOCUMENTATION = '''
----
-module: memset_zone_domain
-author: "Simon Weald (@glitchcrab)"
-short_description: Create and delete domains in Memset DNS zones.
-notes:
- - Zone domains can be thought of as a collection of domains, all of which share the
- same DNS records (i.e. they point to the same IP). An API key generated via the
- Memset customer control panel is needed with the following minimum scope -
- I(dns.zone_domain_create), I(dns.zone_domain_delete), I(dns.zone_domain_list).
- - Currently this module can only create one domain at a time. Multiple domains should
- be created using C(with_items).
-description:
- - Manage DNS zone domains in a Memset account.
-options:
- state:
- default: present
- description:
- - Indicates desired state of resource.
- type: str
- choices: [ absent, present ]
- api_key:
- required: true
- description:
- - The API key obtained from the Memset control panel.
- type: str
- domain:
- required: true
- description:
- - The zone domain name. Ensure this value has at most 250 characters.
- type: str
- aliases: ['name']
- zone:
- required: true
- description:
- - The zone to add the domain to (this must already exist).
- type: str
-'''
-
-EXAMPLES = '''
-# Create the zone domain 'test.com'
-- name: Create zone domain
- community.general.memset_zone_domain:
- domain: test.com
- zone: testzone
- state: present
- api_key: 5eb86c9196ab03919abcf03857163741
- delegate_to: localhost
-'''
-
-RETURN = '''
-memset_api:
- description: Domain info from the Memset API
- returned: when changed or state == present
- type: complex
- contains:
- domain:
- description: Domain name
- returned: always
- type: str
- sample: "example.com"
- id:
- description: Domain ID
- returned: always
- type: str
- sample: "b0bb1ce851aeea6feeb2dc32fe83bf9c"
-'''
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.memset import get_zone_id
-from ansible_collections.community.general.plugins.module_utils.memset import check_zone_domain
-from ansible_collections.community.general.plugins.module_utils.memset import memset_api_call
-
-
-def api_validation(args=None):
- '''
- Perform some validation which will be enforced by Memset's API (see:
- https://www.memset.com/apidocs/methods_dns.html#dns.zone_domain_create)
- '''
- # zone domain length must be less than 250 chars
- if len(args['domain']) > 250:
- stderr = 'Zone domain must be less than 250 characters in length.'
- module.fail_json(failed=True, msg=stderr)
-
-
-def check(args=None):
- '''
- Support for running with check mode.
- '''
- retvals = dict()
- has_changed = False
-
- api_method = 'dns.zone_domain_list'
- has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method)
-
- domain_exists = check_zone_domain(data=response, domain=args['domain'])
-
- # set changed to true if the operation would cause a change.
- has_changed = ((domain_exists and args['state'] == 'absent') or (not domain_exists and args['state'] == 'present'))
-
- retvals['changed'] = has_changed
- retvals['failed'] = has_failed
-
- return(retvals)
-
-
-def create_zone_domain(args=None, zone_exists=None, zone_id=None, payload=None):
- '''
- At this point we already know whether the containing zone exists,
- so we just need to create the domain (or exit if it already exists).
- '''
- has_changed, has_failed = False, False
- msg = None
-
- api_method = 'dns.zone_domain_list'
- _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method)
-
- for zone_domain in response.json():
- if zone_domain['domain'] == args['domain']:
- # zone domain already exists, nothing to change.
- has_changed = False
- break
- else:
- # we need to create the domain
- api_method = 'dns.zone_domain_create'
- payload['domain'] = args['domain']
- payload['zone_id'] = zone_id
- has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
- if not has_failed:
- has_changed = True
-
- return(has_failed, has_changed, msg)
-
-
-def delete_zone_domain(args=None, payload=None):
- '''
- Deletion is pretty simple, domains are always unique so we
- we don't need to do any sanity checking to avoid deleting the
- wrong thing.
- '''
- has_changed, has_failed = False, False
- msg, memset_api = None, None
-
- api_method = 'dns.zone_domain_list'
- _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method)
-
- domain_exists = check_zone_domain(data=response, domain=args['domain'])
-
- if domain_exists:
- api_method = 'dns.zone_domain_delete'
- payload['domain'] = args['domain']
- has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
- if not has_failed:
- has_changed = True
- memset_api = response.json()
- # unset msg as we don't want to return unnecessary info to the user.
- msg = None
-
- return(has_failed, has_changed, memset_api, msg)
-
-
-def create_or_delete_domain(args=None):
- '''
- We need to perform some initial sanity checking and also look
- up required info before handing it off to create or delete.
- '''
- retvals, payload = dict(), dict()
- has_changed, has_failed = False, False
- msg, stderr, memset_api = None, None, None
-
- # get the zones and check if the relevant zone exists.
- api_method = 'dns.zone_list'
- has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method)
-
- if has_failed:
- # this is the first time the API is called; incorrect credentials will
- # manifest themselves at this point so we need to ensure the user is
- # informed of the reason.
- retvals['failed'] = has_failed
- retvals['msg'] = msg
- retvals['stderr'] = "API returned an error: {0}" . format(response.status_code)
- return(retvals)
-
- zone_exists, msg, counter, zone_id = get_zone_id(zone_name=args['zone'], current_zones=response.json())
-
- if not zone_exists:
- # the zone needs to be unique - this isn't a requirement of Memset's API but it
- # makes sense in the context of this module.
- has_failed = True
- if counter == 0:
- stderr = "DNS zone '{0}' does not exist, cannot create domain." . format(args['zone'])
- elif counter > 1:
- stderr = "{0} matches multiple zones, cannot create domain." . format(args['zone'])
-
- retvals['failed'] = has_failed
- retvals['msg'] = stderr
- return(retvals)
-
- if args['state'] == 'present':
- has_failed, has_changed, msg = create_zone_domain(args=args, zone_exists=zone_exists, zone_id=zone_id, payload=payload)
-
- if args['state'] == 'absent':
- has_failed, has_changed, memset_api, msg = delete_zone_domain(args=args, payload=payload)
-
- retvals['changed'] = has_changed
- retvals['failed'] = has_failed
- for val in ['msg', 'stderr', 'memset_api']:
- if val is not None:
- retvals[val] = eval(val)
-
- return(retvals)
-
-
-def main():
- global module
- module = AnsibleModule(
- argument_spec=dict(
- state=dict(default='present', choices=['present', 'absent'], type='str'),
- api_key=dict(required=True, type='str', no_log=True),
- domain=dict(required=True, aliases=['name'], type='str'),
- zone=dict(required=True, type='str')
- ),
- supports_check_mode=True
- )
-
- # populate the dict with the user-provided vars.
- args = dict()
- for key, arg in module.params.items():
- args[key] = arg
- args['check_mode'] = module.check_mode
-
- # validate some API-specific limitations.
- api_validation(args=args)
-
- if module.check_mode:
- retvals = check(args)
- else:
- retvals = create_or_delete_domain(args)
-
- # we would need to populate the return values with the API's response
- # in several places so it's easier to do it at the end instead.
- if not retvals['failed']:
- if args['state'] == 'present' and not module.check_mode:
- payload = dict()
- payload['domain'] = args['domain']
- api_method = 'dns.zone_domain_info'
- _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
- retvals['memset_api'] = response.json()
-
- if retvals['failed']:
- module.fail_json(**retvals)
- else:
- module.exit_json(**retvals)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/memset/memset_zone_record.py b/ansible_collections/community/general/plugins/modules/cloud/memset/memset_zone_record.py
deleted file mode 100644
index 981d2ac4..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/memset/memset_zone_record.py
+++ /dev/null
@@ -1,380 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright (c) 2018, Simon Weald
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-DOCUMENTATION = '''
----
-module: memset_zone_record
-author: "Simon Weald (@glitchcrab)"
-short_description: Create and delete records in Memset DNS zones.
-notes:
- - Zones can be thought of as a logical group of domains, all of which share the
- same DNS records (i.e. they point to the same IP). An API key generated via the
- Memset customer control panel is needed with the following minimum scope -
- I(dns.zone_create), I(dns.zone_delete), I(dns.zone_list).
- - Currently this module can only create one DNS record at a time. Multiple records
- should be created using C(with_items).
-description:
- - Manage DNS records in a Memset account.
-options:
- state:
- default: present
- description:
- - Indicates desired state of resource.
- type: str
- choices: [ absent, present ]
- api_key:
- required: true
- description:
- - The API key obtained from the Memset control panel.
- type: str
- address:
- required: true
- description:
- - The address for this record (can be IP or text string depending on record type).
- type: str
- aliases: [ ip, data ]
- priority:
- description:
- - C(SRV) and C(TXT) record priority, in the range 0 > 999 (inclusive).
- type: int
- record:
- required: false
- description:
- - The subdomain to create.
- type: str
- type:
- required: true
- description:
- - The type of DNS record to create.
- choices: [ A, AAAA, CNAME, MX, NS, SRV, TXT ]
- type: str
- relative:
- type: bool
- default: false
- description:
- - If set then the current domain is added onto the address field for C(CNAME), C(MX), C(NS)
- and C(SRV)record types.
- ttl:
- description:
- - The record's TTL in seconds (will inherit zone's TTL if not explicitly set). This must be a
- valid int from U(https://www.memset.com/apidocs/methods_dns.html#dns.zone_record_create).
- choices: [ 0, 300, 600, 900, 1800, 3600, 7200, 10800, 21600, 43200, 86400 ]
- type: int
- zone:
- required: true
- description:
- - The name of the zone to which to add the record to.
- type: str
-'''
-
-EXAMPLES = '''
-# Create DNS record for www.domain.com
-- name: Create DNS record
- community.general.memset_zone_record:
- api_key: dcf089a2896940da9ffefb307ef49ccd
- state: present
- zone: domain.com
- type: A
- record: www
- address: 1.2.3.4
- ttl: 300
- relative: false
- delegate_to: localhost
-
-# create an SPF record for domain.com
-- name: Create SPF record for domain.com
- community.general.memset_zone_record:
- api_key: dcf089a2896940da9ffefb307ef49ccd
- state: present
- zone: domain.com
- type: TXT
- address: "v=spf1 +a +mx +ip4:a1.2.3.4 ?all"
- delegate_to: localhost
-
-# create multiple DNS records
-- name: Create multiple DNS records
- community.general.memset_zone_record:
- api_key: dcf089a2896940da9ffefb307ef49ccd
- zone: "{{ item.zone }}"
- type: "{{ item.type }}"
- record: "{{ item.record }}"
- address: "{{ item.address }}"
- delegate_to: localhost
- with_items:
- - { 'zone': 'domain1.com', 'type': 'A', 'record': 'www', 'address': '1.2.3.4' }
- - { 'zone': 'domain2.com', 'type': 'A', 'record': 'mail', 'address': '4.3.2.1' }
-'''
-
-RETURN = '''
-memset_api:
- description: Record info from the Memset API.
- returned: when state == present
- type: complex
- contains:
- address:
- description: Record content (may be an IP, string or blank depending on record type).
- returned: always
- type: str
- sample: 1.1.1.1
- id:
- description: Record ID.
- returned: always
- type: str
- sample: "b0bb1ce851aeea6feeb2dc32fe83bf9c"
- priority:
- description: Priority for C(MX) and C(SRV) records.
- returned: always
- type: int
- sample: 10
- record:
- description: Name of record.
- returned: always
- type: str
- sample: "www"
- relative:
- description: Adds the current domain onto the address field for C(CNAME), C(MX), C(NS) and C(SRV) types.
- returned: always
- type: bool
- sample: False
- ttl:
- description: Record TTL.
- returned: always
- type: int
- sample: 10
- type:
- description: Record type.
- returned: always
- type: str
- sample: AAAA
- zone_id:
- description: Zone ID.
- returned: always
- type: str
- sample: "b0bb1ce851aeea6feeb2dc32fe83bf9c"
-'''
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.memset import get_zone_id
-from ansible_collections.community.general.plugins.module_utils.memset import memset_api_call
-from ansible_collections.community.general.plugins.module_utils.memset import get_zone_id
-
-
-def api_validation(args=None):
- '''
- Perform some validation which will be enforced by Memset's API (see:
- https://www.memset.com/apidocs/methods_dns.html#dns.zone_record_create)
- '''
- failed_validation = False
-
- # priority can only be integer 0 > 999
- if not 0 <= args['priority'] <= 999:
- failed_validation = True
- error = 'Priority must be in the range 0 > 999 (inclusive).'
- # data value must be max 250 chars
- if len(args['address']) > 250:
- failed_validation = True
- error = "Address must be less than 250 characters in length."
- # record value must be max 250 chars
- if args['record']:
- if len(args['record']) > 63:
- failed_validation = True
- error = "Record must be less than 63 characters in length."
- # relative isn't used for all record types
- if args['relative']:
- if args['type'] not in ['CNAME', 'MX', 'NS', 'SRV']:
- failed_validation = True
- error = "Relative is only valid for CNAME, MX, NS and SRV record types."
- # if any of the above failed then fail early
- if failed_validation:
- module.fail_json(failed=True, msg=error)
-
-
-def create_zone_record(args=None, zone_id=None, records=None, payload=None):
- '''
- Sanity checking has already occurred prior to this function being
- called, so we can go ahead and either create or update the record.
- As defaults are defined for all values in the argument_spec, this
- may cause some changes to occur as the defaults are enforced (if
- the user has only configured required variables).
- '''
- has_changed, has_failed = False, False
- msg, memset_api = None, None
-
- # assemble the new record.
- new_record = dict()
- new_record['zone_id'] = zone_id
- for arg in ['priority', 'address', 'relative', 'record', 'ttl', 'type']:
- new_record[arg] = args[arg]
-
- # if we have any matches, update them.
- if records:
- for zone_record in records:
- # record exists, add ID to payload.
- new_record['id'] = zone_record['id']
- if zone_record == new_record:
- # nothing to do; record is already correct so we populate
- # the return var with the existing record's details.
- memset_api = zone_record
- return(has_changed, has_failed, memset_api, msg)
- else:
- # merge dicts ensuring we change any updated values
- payload = zone_record.copy()
- payload.update(new_record)
- api_method = 'dns.zone_record_update'
- if args['check_mode']:
- has_changed = True
- # return the new record to the user in the returned var.
- memset_api = new_record
- return(has_changed, has_failed, memset_api, msg)
- has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
- if not has_failed:
- has_changed = True
- memset_api = new_record
- # empty msg as we don't want to return a boatload of json to the user.
- msg = None
- else:
- # no record found, so we need to create it
- api_method = 'dns.zone_record_create'
- payload = new_record
- if args['check_mode']:
- has_changed = True
- # populate the return var with the new record's details.
- memset_api = new_record
- return(has_changed, has_failed, memset_api, msg)
- has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
- if not has_failed:
- has_changed = True
- memset_api = new_record
- # empty msg as we don't want to return a boatload of json to the user.
- msg = None
-
- return(has_changed, has_failed, memset_api, msg)
-
-
-def delete_zone_record(args=None, records=None, payload=None):
- '''
- Matching records can be cleanly deleted without affecting other
- resource types, so this is pretty simple to achieve.
- '''
- has_changed, has_failed = False, False
- msg, memset_api = None, None
-
- # if we have any matches, delete them.
- if records:
- for zone_record in records:
- if args['check_mode']:
- has_changed = True
- return(has_changed, has_failed, memset_api, msg)
- payload['id'] = zone_record['id']
- api_method = 'dns.zone_record_delete'
- has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
- if not has_failed:
- has_changed = True
- memset_api = zone_record
- # empty msg as we don't want to return a boatload of json to the user.
- msg = None
-
- return(has_changed, has_failed, memset_api, msg)
-
-
-def create_or_delete(args=None):
- '''
- We need to perform some initial sanity checking and also look
- up required info before handing it off to create or delete functions.
- Check mode is integrated into the create or delete functions.
- '''
- has_failed, has_changed = False, False
- msg, memset_api, stderr = None, None, None
- retvals, payload = dict(), dict()
-
- # get the zones and check if the relevant zone exists.
- api_method = 'dns.zone_list'
- _has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method)
-
- if _has_failed:
- # this is the first time the API is called; incorrect credentials will
- # manifest themselves at this point so we need to ensure the user is
- # informed of the reason.
- retvals['failed'] = _has_failed
- retvals['msg'] = msg
- retvals['stderr'] = "API returned an error: {0}" . format(response.status_code)
- return(retvals)
-
- zone_exists, _msg, counter, zone_id = get_zone_id(zone_name=args['zone'], current_zones=response.json())
-
- if not zone_exists:
- has_failed = True
- if counter == 0:
- stderr = "DNS zone {0} does not exist." . format(args['zone'])
- elif counter > 1:
- stderr = "{0} matches multiple zones." . format(args['zone'])
- retvals['failed'] = has_failed
- retvals['msg'] = stderr
- retvals['stderr'] = stderr
- return(retvals)
-
- # get a list of all records ( as we can't limit records by zone)
- api_method = 'dns.zone_record_list'
- _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method)
-
- # find any matching records
- records = [record for record in response.json() if record['zone_id'] == zone_id
- and record['record'] == args['record'] and record['type'] == args['type']]
-
- if args['state'] == 'present':
- has_changed, has_failed, memset_api, msg = create_zone_record(args=args, zone_id=zone_id, records=records, payload=payload)
-
- if args['state'] == 'absent':
- has_changed, has_failed, memset_api, msg = delete_zone_record(args=args, records=records, payload=payload)
-
- retvals['changed'] = has_changed
- retvals['failed'] = has_failed
- for val in ['msg', 'stderr', 'memset_api']:
- if val is not None:
- retvals[val] = eval(val)
-
- return(retvals)
-
-
-def main():
- global module
- module = AnsibleModule(
- argument_spec=dict(
- state=dict(required=False, default='present', choices=['present', 'absent'], type='str'),
- api_key=dict(required=True, type='str', no_log=True),
- zone=dict(required=True, type='str'),
- type=dict(required=True, choices=['A', 'AAAA', 'CNAME', 'MX', 'NS', 'SRV', 'TXT'], type='str'),
- address=dict(required=True, aliases=['ip', 'data'], type='str'),
- record=dict(required=False, default='', type='str'),
- ttl=dict(required=False, default=0, choices=[0, 300, 600, 900, 1800, 3600, 7200, 10800, 21600, 43200, 86400], type='int'),
- priority=dict(required=False, default=0, type='int'),
- relative=dict(required=False, default=False, type='bool')
- ),
- supports_check_mode=True
- )
-
- # populate the dict with the user-provided vars.
- args = dict()
- for key, arg in module.params.items():
- args[key] = arg
- args['check_mode'] = module.check_mode
-
- # perform some Memset API-specific validation
- api_validation(args=args)
-
- retvals = create_or_delete(args)
-
- if retvals['failed']:
- module.fail_json(**retvals)
- else:
- module.exit_json(**retvals)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/misc/cloud_init_data_facts.py b/ansible_collections/community/general/plugins/modules/cloud/misc/cloud_init_data_facts.py
deleted file mode 100644
index 1b44c50c..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/misc/cloud_init_data_facts.py
+++ /dev/null
@@ -1,129 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# (c) 2018, René Moser
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: cloud_init_data_facts
-short_description: Retrieve facts of cloud-init.
-description:
- - Gathers facts by reading the status.json and result.json of cloud-init.
-author: René Moser (@resmo)
-options:
- filter:
- description:
- - Filter facts
- type: str
- choices: [ status, result ]
-notes:
- - See http://cloudinit.readthedocs.io/ for more information about cloud-init.
-'''
-
-EXAMPLES = '''
-- name: Gather all facts of cloud init
- community.general.cloud_init_data_facts:
- register: result
-
-- ansible.builtin.debug:
- var: result
-
-- name: Wait for cloud init to finish
- community.general.cloud_init_data_facts:
- filter: status
- register: res
- until: "res.cloud_init_data_facts.status.v1.stage is defined and not res.cloud_init_data_facts.status.v1.stage"
- retries: 50
- delay: 5
-'''
-
-RETURN = '''
----
-cloud_init_data_facts:
- description: Facts of result and status.
- returned: success
- type: dict
- sample: '{
- "status": {
- "v1": {
- "datasource": "DataSourceCloudStack",
- "errors": []
- },
- "result": {
- "v1": {
- "datasource": "DataSourceCloudStack",
- "init": {
- "errors": [],
- "finished": 1522066377.0185432,
- "start": 1522066375.2648022
- },
- "init-local": {
- "errors": [],
- "finished": 1522066373.70919,
- "start": 1522066373.4726632
- },
- "modules-config": {
- "errors": [],
- "finished": 1522066380.9097016,
- "start": 1522066379.0011985
- },
- "modules-final": {
- "errors": [],
- "finished": 1522066383.56594,
- "start": 1522066382.3449218
- },
- "stage": null
- }
- }'
-'''
-
-import os
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.common.text.converters import to_text
-
-
-CLOUD_INIT_PATH = "/var/lib/cloud/data"
-
-
-def gather_cloud_init_data_facts(module):
- res = {
- 'cloud_init_data_facts': dict()
- }
-
- for i in ['result', 'status']:
- filter = module.params.get('filter')
- if filter is None or filter == i:
- res['cloud_init_data_facts'][i] = dict()
- json_file = os.path.join(CLOUD_INIT_PATH, i + '.json')
-
- if os.path.exists(json_file):
- f = open(json_file, 'rb')
- contents = to_text(f.read(), errors='surrogate_or_strict')
- f.close()
-
- if contents:
- res['cloud_init_data_facts'][i] = module.from_json(contents)
- return res
-
-
-def main():
- module = AnsibleModule(
- argument_spec=dict(
- filter=dict(choices=['result', 'status']),
- ),
- supports_check_mode=True,
- )
-
- facts = gather_cloud_init_data_facts(module)
- result = dict(changed=False, ansible_facts=facts, **facts)
- module.exit_json(**result)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox.py b/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox.py
deleted file mode 100644
index 662e8348..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox.py
+++ /dev/null
@@ -1,780 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-DOCUMENTATION = '''
----
-module: proxmox
-short_description: management of instances in Proxmox VE cluster
-description:
- - allows you to create/delete/stop instances in Proxmox VE cluster
- - Starting in Ansible 2.1, it automatically detects containerization type (lxc for PVE 4, openvz for older)
- - Since community.general 4.0.0 on, there are no more default values, see I(proxmox_default_behavior).
-options:
- password:
- description:
- - the instance root password
- type: str
- hostname:
- description:
- - the instance hostname
- - required only for C(state=present)
- - must be unique if vmid is not passed
- type: str
- ostemplate:
- description:
- - the template for VM creating
- - required only for C(state=present)
- type: str
- disk:
- description:
- - This option was previously described as "hard disk size in GB for instance" however several formats describing
- a lxc mount are permitted.
- - Older versions of Proxmox will accept a numeric value for size using the I(storage) parameter to automatically
- choose which storage to allocate from, however new versions enforce the C(:) syntax.
- - "Additional options are available by using some combination of the following key-value pairs as a
- comma-delimited list C([volume=] [,acl=<1|0>] [,mountoptions=] [,quota=<1|0>]
- [,replicate=<1|0>] [,ro=<1|0>] [,shared=<1|0>] [,size=])."
- - See U(https://pve.proxmox.com/wiki/Linux_Container) for a full description.
- - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(3).
- type: str
- cores:
- description:
- - Specify number of cores per socket.
- - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(1).
- type: int
- cpus:
- description:
- - numbers of allocated cpus for instance
- - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(1).
- type: int
- memory:
- description:
- - memory size in MB for instance
- - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(512).
- type: int
- swap:
- description:
- - swap memory size in MB for instance
- - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(0).
- type: int
- netif:
- description:
- - specifies network interfaces for the container. As a hash/dictionary defining interfaces.
- type: dict
- features:
- description:
- - Specifies a list of features to be enabled. For valid options, see U(https://pve.proxmox.com/wiki/Linux_Container#pct_options).
- - Some features require the use of a privileged container.
- type: list
- elements: str
- version_added: 2.0.0
- mounts:
- description:
- - specifies additional mounts (separate disks) for the container. As a hash/dictionary defining mount points
- type: dict
- ip_address:
- description:
- - specifies the address the container will be assigned
- type: str
- onboot:
- description:
- - specifies whether a VM will be started during system bootup
- - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(no).
- type: bool
- storage:
- description:
- - target storage
- type: str
- default: 'local'
- cpuunits:
- description:
- - CPU weight for a VM
- - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(1000).
- type: int
- nameserver:
- description:
- - sets DNS server IP address for a container
- type: str
- searchdomain:
- description:
- - sets DNS search domain for a container
- type: str
- timeout:
- description:
- - timeout for operations
- type: int
- default: 30
- force:
- description:
- - forcing operations
- - can be used only with states C(present), C(stopped), C(restarted)
- - with C(state=present) force option allow to overwrite existing container
- - with states C(stopped) , C(restarted) allow to force stop instance
- type: bool
- default: 'no'
- purge:
- description:
- - Remove container from all related configurations.
- - For example backup jobs, replication jobs, or HA.
- - Related ACLs and Firewall entries will always be removed.
- - Used with state C(absent).
- type: bool
- default: false
- version_added: 2.3.0
- state:
- description:
- - Indicate desired state of the instance
- type: str
- choices: ['present', 'started', 'absent', 'stopped', 'restarted']
- default: present
- pubkey:
- description:
- - Public key to add to /root/.ssh/authorized_keys. This was added on Proxmox 4.2, it is ignored for earlier versions
- type: str
- unprivileged:
- description:
- - Indicate if the container should be unprivileged
- type: bool
- default: 'no'
- description:
- description:
- - Specify the description for the container. Only used on the configuration web interface.
- - This is saved as a comment inside the configuration file.
- type: str
- version_added: '0.2.0'
- hookscript:
- description:
- - Script that will be executed during various steps in the containers lifetime.
- type: str
- version_added: '0.2.0'
- proxmox_default_behavior:
- description:
- - As of community.general 4.0.0, various options no longer have default values.
- These default values caused problems when users expected different behavior from Proxmox
- by default or filled options which caused problems when set.
- - The value C(compatibility) (default before community.general 4.0.0) will ensure that the default values
- are used when the values are not explicitly specified by the user. The new default is C(no_defaults),
- which makes sure these options have no defaults.
- - This affects the I(disk), I(cores), I(cpus), I(memory), I(onboot), I(swap), I(cpuunits) options.
- type: str
- default: no_defaults
- choices:
- - compatibility
- - no_defaults
- version_added: "1.3.0"
- clone:
- description:
- - ID of the container to be cloned.
- - I(description), I(hostname), and I(pool) will be copied from the cloned container if not specified.
- - The type of clone created is defined by the I(clone_type) parameter.
- - This operator is only supported for Proxmox clusters that use LXC containerization (PVE version >= 4).
- type: int
- version_added: 4.3.0
- clone_type:
- description:
- - Type of the clone created.
- - C(full) creates a full clone, and I(storage) must be specified.
- - C(linked) creates a linked clone, and the cloned container must be a template container.
- - C(opportunistic) creates a linked clone if the cloned container is a template container, and a full clone if not.
- I(storage) may be specified, if not it will fall back to the default.
- type: str
- choices: ['full', 'linked', 'opportunistic']
- default: opportunistic
- version_added: 4.3.0
-author: Sergei Antipov (@UnderGreen)
-extends_documentation_fragment:
- - community.general.proxmox.documentation
- - community.general.proxmox.selection
-'''
-
-EXAMPLES = r'''
-- name: Create new container with minimal options
- community.general.proxmox:
- vmid: 100
- node: uk-mc02
- api_user: root@pam
- api_password: 1q2w3e
- api_host: node1
- password: 123456
- hostname: example.org
- ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
-
-- name: Create new container with hookscript and description
- community.general.proxmox:
- vmid: 100
- node: uk-mc02
- api_user: root@pam
- api_password: 1q2w3e
- api_host: node1
- password: 123456
- hostname: example.org
- ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
- hookscript: 'local:snippets/vm_hook.sh'
- description: created with ansible
-
-- name: Create new container automatically selecting the next available vmid.
- community.general.proxmox:
- node: 'uk-mc02'
- api_user: 'root@pam'
- api_password: '1q2w3e'
- api_host: 'node1'
- password: '123456'
- hostname: 'example.org'
- ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
-
-- name: Create new container with minimal options with force(it will rewrite existing container)
- community.general.proxmox:
- vmid: 100
- node: uk-mc02
- api_user: root@pam
- api_password: 1q2w3e
- api_host: node1
- password: 123456
- hostname: example.org
- ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
- force: yes
-
-- name: Create new container with minimal options use environment PROXMOX_PASSWORD variable(you should export it before)
- community.general.proxmox:
- vmid: 100
- node: uk-mc02
- api_user: root@pam
- api_host: node1
- password: 123456
- hostname: example.org
- ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
-
-- name: Create new container with minimal options defining network interface with dhcp
- community.general.proxmox:
- vmid: 100
- node: uk-mc02
- api_user: root@pam
- api_password: 1q2w3e
- api_host: node1
- password: 123456
- hostname: example.org
- ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
- netif: '{"net0":"name=eth0,ip=dhcp,ip6=dhcp,bridge=vmbr0"}'
-
-- name: Create new container with minimal options defining network interface with static ip
- community.general.proxmox:
- vmid: 100
- node: uk-mc02
- api_user: root@pam
- api_password: 1q2w3e
- api_host: node1
- password: 123456
- hostname: example.org
- ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
- netif: '{"net0":"name=eth0,gw=192.168.0.1,ip=192.168.0.2/24,bridge=vmbr0"}'
-
-- name: Create new container with minimal options defining a mount with 8GB
- community.general.proxmox:
- vmid: 100
- node: uk-mc02
- api_user: root@pam
- api_password: 1q2w3e
- api_host: node1
- password: 123456
- hostname: example.org
- ostemplate: local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
- mounts: '{"mp0":"local:8,mp=/mnt/test/"}'
-
-- name: Create new container with minimal options defining a cpu core limit
- community.general.proxmox:
- vmid: 100
- node: uk-mc02
- api_user: root@pam
- api_password: 1q2w3e
- api_host: node1
- password: 123456
- hostname: example.org
- ostemplate: local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
- cores: 2
-
-- name: Create a new container with nesting enabled and allows the use of CIFS/NFS inside the container.
- community.general.proxmox:
- vmid: 100
- node: uk-mc02
- api_user: root@pam
- api_password: 1q2w3e
- api_host: node1
- password: 123456
- hostname: example.org
- ostemplate: local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
- features:
- - nesting=1
- - mount=cifs,nfs
-
-- name: >
- Create a linked clone of the template container with id 100. The newly created container with be a
- linked clone, because no storage parameter is defined
- community.general.proxmox:
- vmid: 201
- node: uk-mc02
- api_user: root@pam
- api_password: 1q2w3e
- api_host: node1
- clone: 100
- hostname: clone.example.org
-
-- name: Create a full clone of the container with id 100
- community.general.proxmox:
- vmid: 201
- node: uk-mc02
- api_user: root@pam
- api_password: 1q2w3e
- api_host: node1
- clone: 100
- hostname: clone.example.org
- storage: local
-
-- name: Start container
- community.general.proxmox:
- vmid: 100
- api_user: root@pam
- api_password: 1q2w3e
- api_host: node1
- state: started
-
-- name: >
- Start container with mount. You should enter a 90-second timeout because servers
- with additional disks take longer to boot
- community.general.proxmox:
- vmid: 100
- api_user: root@pam
- api_password: 1q2w3e
- api_host: node1
- state: started
- timeout: 90
-
-- name: Stop container
- community.general.proxmox:
- vmid: 100
- api_user: root@pam
- api_password: 1q2w3e
- api_host: node1
- state: stopped
-
-- name: Stop container with force
- community.general.proxmox:
- vmid: 100
- api_user: root@pam
- api_password: 1q2w3e
- api_host: node1
- force: yes
- state: stopped
-
-- name: Restart container(stopped or mounted container you can't restart)
- community.general.proxmox:
- vmid: 100
- api_user: root@pam
- api_password: 1q2w3e
- api_host: node1
- state: restarted
-
-- name: Remove container
- community.general.proxmox:
- vmid: 100
- api_user: root@pam
- api_password: 1q2w3e
- api_host: node1
- state: absent
-'''
-
-import time
-import traceback
-
-from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
-
-from ansible.module_utils.basic import AnsibleModule, env_fallback
-from ansible.module_utils.common.text.converters import to_native
-
-from ansible_collections.community.general.plugins.module_utils.proxmox import (
- ansible_to_proxmox_bool, proxmox_auth_argument_spec, ProxmoxAnsible)
-
-VZ_TYPE = None
-
-
-class ProxmoxLxcAnsible(ProxmoxAnsible):
- def content_check(self, node, ostemplate, template_store):
- return [True for cnt in self.proxmox_api.nodes(node).storage(template_store).content.get() if cnt['volid'] == ostemplate]
-
- def is_template_container(self, node, vmid):
- """Check if the specified container is a template."""
- proxmox_node = self.proxmox_api.nodes(node)
- config = getattr(proxmox_node, VZ_TYPE)(vmid).config.get()
- return config['template']
-
- def create_instance(self, vmid, node, disk, storage, cpus, memory, swap, timeout, clone, **kwargs):
- proxmox_node = self.proxmox_api.nodes(node)
-
- # Remove all empty kwarg entries
- kwargs = dict((k, v) for k, v in kwargs.items() if v is not None)
-
- if VZ_TYPE == 'lxc':
- kwargs['cpulimit'] = cpus
- kwargs['rootfs'] = disk
- if 'netif' in kwargs:
- kwargs.update(kwargs['netif'])
- del kwargs['netif']
- if 'mounts' in kwargs:
- kwargs.update(kwargs['mounts'])
- del kwargs['mounts']
- if 'pubkey' in kwargs:
- if self.version() >= LooseVersion('4.2'):
- kwargs['ssh-public-keys'] = kwargs['pubkey']
- del kwargs['pubkey']
- else:
- kwargs['cpus'] = cpus
- kwargs['disk'] = disk
-
- if clone is not None:
- if VZ_TYPE != 'lxc':
- self.module.fail_json(changed=False, msg="Clone operator is only supported for LXC enabled proxmox clusters.")
-
- clone_is_template = self.is_template_container(node, clone)
-
- # By default, create a full copy only when the cloned container is not a template.
- create_full_copy = not clone_is_template
-
- # Only accept parameters that are compatible with the clone endpoint.
- valid_clone_parameters = ['hostname', 'pool', 'description']
- if self.module.params['storage'] is not None and clone_is_template:
- # Cloning a template, so create a full copy instead of a linked copy
- create_full_copy = True
- elif self.module.params['storage'] is None and not clone_is_template:
- # Not cloning a template, but also no defined storage. This isn't possible.
- self.module.fail_json(changed=False, msg="Cloned container is not a template, storage needs to be specified.")
-
- if self.module.params['clone_type'] == 'linked':
- if not clone_is_template:
- self.module.fail_json(changed=False, msg="'linked' clone type is specified, but cloned container is not a template container.")
- # Don't need to do more, by default create_full_copy is set to false already
- elif self.module.params['clone_type'] == 'opportunistic':
- if not clone_is_template:
- # Cloned container is not a template, so we need our 'storage' parameter
- valid_clone_parameters.append('storage')
- elif self.module.params['clone_type'] == 'full':
- create_full_copy = True
- valid_clone_parameters.append('storage')
-
- clone_parameters = {}
-
- if create_full_copy:
- clone_parameters['full'] = '1'
- else:
- clone_parameters['full'] = '0'
- for param in valid_clone_parameters:
- if self.module.params[param] is not None:
- clone_parameters[param] = self.module.params[param]
-
- taskid = getattr(proxmox_node, VZ_TYPE)(clone).clone.post(newid=vmid, **clone_parameters)
- else:
- taskid = getattr(proxmox_node, VZ_TYPE).create(vmid=vmid, storage=storage, memory=memory, swap=swap, **kwargs)
-
- while timeout:
- if (proxmox_node.tasks(taskid).status.get()['status'] == 'stopped' and
- proxmox_node.tasks(taskid).status.get()['exitstatus'] == 'OK'):
- return True
- timeout -= 1
- if timeout == 0:
- self.module.fail_json(msg='Reached timeout while waiting for creating VM. Last line in task before timeout: %s' %
- proxmox_node.tasks(taskid).log.get()[:1])
-
- time.sleep(1)
- return False
-
- def start_instance(self, vm, vmid, timeout):
- taskid = getattr(self.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.start.post()
- while timeout:
- if (self.proxmox_api.nodes(vm['node']).tasks(taskid).status.get()['status'] == 'stopped' and
- self.proxmox_api.nodes(vm['node']).tasks(taskid).status.get()['exitstatus'] == 'OK'):
- return True
- timeout -= 1
- if timeout == 0:
- self.module.fail_json(msg='Reached timeout while waiting for starting VM. Last line in task before timeout: %s' %
- self.proxmox_api.nodes(vm['node']).tasks(taskid).log.get()[:1])
-
- time.sleep(1)
- return False
-
- def stop_instance(self, vm, vmid, timeout, force):
- if force:
- taskid = getattr(self.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.shutdown.post(forceStop=1)
- else:
- taskid = getattr(self.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.shutdown.post()
- while timeout:
- if (self.proxmox_api.nodes(vm['node']).tasks(taskid).status.get()['status'] == 'stopped' and
- self.proxmox_api.nodes(vm['node']).tasks(taskid).status.get()['exitstatus'] == 'OK'):
- return True
- timeout -= 1
- if timeout == 0:
- self.module.fail_json(msg='Reached timeout while waiting for stopping VM. Last line in task before timeout: %s' %
- self.proxmox_api.nodes(vm['node']).tasks(taskid).log.get()[:1])
-
- time.sleep(1)
- return False
-
- def umount_instance(self, vm, vmid, timeout):
- taskid = getattr(self.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.umount.post()
- while timeout:
- if (self.proxmox_api.nodes(vm['node']).tasks(taskid).status.get()['status'] == 'stopped' and
- self.proxmox_api.nodes(vm['node']).tasks(taskid).status.get()['exitstatus'] == 'OK'):
- return True
- timeout -= 1
- if timeout == 0:
- self.module.fail_json(msg='Reached timeout while waiting for unmounting VM. Last line in task before timeout: %s' %
- self.proxmox_api.nodes(vm['node']).tasks(taskid).log.get()[:1])
-
- time.sleep(1)
- return False
-
-
-def main():
- module_args = proxmox_auth_argument_spec()
- proxmox_args = dict(
- vmid=dict(type='int', required=False),
- node=dict(),
- pool=dict(),
- password=dict(no_log=True),
- hostname=dict(),
- ostemplate=dict(),
- disk=dict(type='str'),
- cores=dict(type='int'),
- cpus=dict(type='int'),
- memory=dict(type='int'),
- swap=dict(type='int'),
- netif=dict(type='dict'),
- mounts=dict(type='dict'),
- ip_address=dict(),
- onboot=dict(type='bool'),
- features=dict(type='list', elements='str'),
- storage=dict(default='local'),
- cpuunits=dict(type='int'),
- nameserver=dict(),
- searchdomain=dict(),
- timeout=dict(type='int', default=30),
- force=dict(type='bool', default=False),
- purge=dict(type='bool', default=False),
- state=dict(default='present', choices=['present', 'absent', 'stopped', 'started', 'restarted']),
- pubkey=dict(type='str', default=None),
- unprivileged=dict(type='bool', default=False),
- description=dict(type='str'),
- hookscript=dict(type='str'),
- proxmox_default_behavior=dict(type='str', default='no_defaults', choices=['compatibility', 'no_defaults']),
- clone=dict(type='int'),
- clone_type=dict(default='opportunistic', choices=['full', 'linked', 'opportunistic']),
- )
- module_args.update(proxmox_args)
-
- module = AnsibleModule(
- argument_spec=module_args,
- required_if=[
- ('state', 'present', ['node', 'hostname']),
- ('state', 'present', ('clone', 'ostemplate'), True), # Require one of clone and ostemplate. Together with mutually_exclusive this ensures that we
- # either clone a container or create a new one from a template file.
- ],
- required_together=[
- ('api_token_id', 'api_token_secret')
- ],
- required_one_of=[('api_password', 'api_token_id')],
- mutually_exclusive=[('clone', 'ostemplate')], # Creating a new container is done either by cloning an existing one, or based on a template.
- )
-
- proxmox = ProxmoxLxcAnsible(module)
-
- global VZ_TYPE
- VZ_TYPE = 'openvz' if proxmox.version() < LooseVersion('4.0') else 'lxc'
-
- state = module.params['state']
- vmid = module.params['vmid']
- node = module.params['node']
- disk = module.params['disk']
- cpus = module.params['cpus']
- memory = module.params['memory']
- swap = module.params['swap']
- storage = module.params['storage']
- hostname = module.params['hostname']
- if module.params['ostemplate'] is not None:
- template_store = module.params['ostemplate'].split(":")[0]
- timeout = module.params['timeout']
- clone = module.params['clone']
-
- if module.params['proxmox_default_behavior'] == 'compatibility':
- old_default_values = dict(
- disk="3",
- cores=1,
- cpus=1,
- memory=512,
- swap=0,
- onboot=False,
- cpuunits=1000,
- )
- for param, value in old_default_values.items():
- if module.params[param] is None:
- module.params[param] = value
-
- # If vmid not set get the Next VM id from ProxmoxAPI
- # If hostname is set get the VM id from ProxmoxAPI
- if not vmid and state == 'present':
- vmid = proxmox.get_nextvmid()
- elif not vmid and hostname:
- vmid = proxmox.get_vmid(hostname, choose_first_if_multiple=True)
- elif not vmid:
- module.exit_json(changed=False, msg="Vmid could not be fetched for the following action: %s" % state)
-
- # Create a new container
- if state == 'present' and clone is None:
- try:
- if proxmox.get_vm(vmid, ignore_missing=True) and not module.params['force']:
- module.exit_json(changed=False, msg="VM with vmid = %s is already exists" % vmid)
- # If no vmid was passed, there cannot be another VM named 'hostname'
- if (not module.params['vmid'] and
- proxmox.get_vmid(hostname, ignore_missing=True, choose_first_if_multiple=True) and
- not module.params['force']):
- vmid = proxmox.get_vmid(hostname, choose_first_if_multiple=True)
- module.exit_json(changed=False, msg="VM with hostname %s already exists and has ID number %s" % (hostname, vmid))
- elif not proxmox.get_node(node):
- module.fail_json(msg="node '%s' not exists in cluster" % node)
- elif not proxmox.content_check(node, module.params['ostemplate'], template_store):
- module.fail_json(msg="ostemplate '%s' not exists on node %s and storage %s"
- % (module.params['ostemplate'], node, template_store))
- except Exception as e:
- module.fail_json(msg="Pre-creation checks of {VZ_TYPE} VM {vmid} failed with exception: {e}".format(VZ_TYPE=VZ_TYPE, vmid=vmid, e=e))
-
- try:
- proxmox.create_instance(vmid, node, disk, storage, cpus, memory, swap, timeout, clone,
- cores=module.params['cores'],
- pool=module.params['pool'],
- password=module.params['password'],
- hostname=module.params['hostname'],
- ostemplate=module.params['ostemplate'],
- netif=module.params['netif'],
- mounts=module.params['mounts'],
- ip_address=module.params['ip_address'],
- onboot=ansible_to_proxmox_bool(module.params['onboot']),
- cpuunits=module.params['cpuunits'],
- nameserver=module.params['nameserver'],
- searchdomain=module.params['searchdomain'],
- force=ansible_to_proxmox_bool(module.params['force']),
- pubkey=module.params['pubkey'],
- features=",".join(module.params['features']) if module.params['features'] is not None else None,
- unprivileged=ansible_to_proxmox_bool(module.params['unprivileged']),
- description=module.params['description'],
- hookscript=module.params['hookscript'])
-
- module.exit_json(changed=True, msg="Deployed VM %s from template %s" % (vmid, module.params['ostemplate']))
- except Exception as e:
- module.fail_json(msg="Creation of %s VM %s failed with exception: %s" % (VZ_TYPE, vmid, e))
-
- # Clone a container
- elif state == 'present' and clone is not None:
- try:
- if proxmox.get_vm(vmid, ignore_missing=True) and not module.params['force']:
- module.exit_json(changed=False, msg="VM with vmid = %s is already exists" % vmid)
- # If no vmid was passed, there cannot be another VM named 'hostname'
- if (not module.params['vmid'] and
- proxmox.get_vmid(hostname, ignore_missing=True, choose_first_if_multiple=True) and
- not module.params['force']):
- vmid = proxmox.get_vmid(hostname, choose_first_if_multiple=True)
- module.exit_json(changed=False, msg="VM with hostname %s already exists and has ID number %s" % (hostname, vmid))
- if not proxmox.get_vm(clone, ignore_missing=True):
- module.exit_json(changed=False, msg="Container to be cloned does not exist")
- except Exception as e:
- module.fail_json(msg="Pre-clone checks of {VZ_TYPE} VM {vmid} failed with exception: {e}".format(VZ_TYPE=VZ_TYPE, vmid=vmid, e=e))
-
- try:
- proxmox.create_instance(vmid, node, disk, storage, cpus, memory, swap, timeout, clone)
-
- module.exit_json(changed=True, msg="Cloned VM %s from %s" % (vmid, clone))
- except Exception as e:
- module.fail_json(msg="Cloning %s VM %s failed with exception: %s" % (VZ_TYPE, vmid, e))
-
- elif state == 'started':
- try:
- vm = proxmox.get_vm(vmid)
- if getattr(proxmox.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'running':
- module.exit_json(changed=False, msg="VM %s is already running" % vmid)
-
- if proxmox.start_instance(vm, vmid, timeout):
- module.exit_json(changed=True, msg="VM %s started" % vmid)
- except Exception as e:
- module.fail_json(msg="starting of VM %s failed with exception: %s" % (vmid, e))
-
- elif state == 'stopped':
- try:
- vm = proxmox.get_vm(vmid)
-
- if getattr(proxmox.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'mounted':
- if module.params['force']:
- if proxmox.umount_instance(vm, vmid, timeout):
- module.exit_json(changed=True, msg="VM %s is shutting down" % vmid)
- else:
- module.exit_json(changed=False, msg=("VM %s is already shutdown, but mounted. "
- "You can use force option to umount it.") % vmid)
-
- if getattr(proxmox.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'stopped':
- module.exit_json(changed=False, msg="VM %s is already shutdown" % vmid)
-
- if proxmox.stop_instance(vm, vmid, timeout, force=module.params['force']):
- module.exit_json(changed=True, msg="VM %s is shutting down" % vmid)
- except Exception as e:
- module.fail_json(msg="stopping of VM %s failed with exception: %s" % (vmid, e))
-
- elif state == 'restarted':
- try:
- vm = proxmox.get_vm(vmid)
-
- vm_status = getattr(proxmox.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.current.get()['status']
- if vm_status in ['stopped', 'mounted']:
- module.exit_json(changed=False, msg="VM %s is not running" % vmid)
-
- if (proxmox.stop_instance(vm, vmid, timeout, force=module.params['force']) and
- proxmox.start_instance(vm, vmid, timeout)):
- module.exit_json(changed=True, msg="VM %s is restarted" % vmid)
- except Exception as e:
- module.fail_json(msg="restarting of VM %s failed with exception: %s" % (vmid, e))
-
- elif state == 'absent':
- try:
- vm = proxmox.get_vm(vmid, ignore_missing=True)
- if not vm:
- module.exit_json(changed=False, msg="VM %s does not exist" % vmid)
-
- vm_status = getattr(proxmox.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.current.get()['status']
- if vm_status == 'running':
- module.exit_json(changed=False, msg="VM %s is running. Stop it before deletion." % vmid)
-
- if vm_status == 'mounted':
- module.exit_json(changed=False, msg="VM %s is mounted. Stop it with force option before deletion." % vmid)
-
- delete_params = {}
-
- if module.params['purge']:
- delete_params['purge'] = 1
-
- taskid = getattr(proxmox.proxmox_api.nodes(vm['node']), VZ_TYPE).delete(vmid, **delete_params)
-
- while timeout:
- task_status = proxmox.proxmox_api.nodes(vm['node']).tasks(taskid).status.get()
- if (task_status['status'] == 'stopped' and task_status['exitstatus'] == 'OK'):
- module.exit_json(changed=True, msg="VM %s removed" % vmid)
- timeout -= 1
- if timeout == 0:
- module.fail_json(msg='Reached timeout while waiting for removing VM. Last line in task before timeout: %s'
- % proxmox.proxmox_api.nodes(vm['node']).tasks(taskid).log.get()[:1])
-
- time.sleep(1)
- except Exception as e:
- module.fail_json(msg="deletion of VM %s failed with exception: %s" % (vmid, to_native(e)))
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_domain_info.py b/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_domain_info.py
deleted file mode 100644
index 675b04a4..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_domain_info.py
+++ /dev/null
@@ -1,130 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright: Tristan Le Guern (@tleguern)
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: proxmox_domain_info
-short_description: Retrieve information about one or more Proxmox VE domains
-version_added: 1.3.0
-description:
- - Retrieve information about one or more Proxmox VE domains.
-options:
- domain:
- description:
- - Restrict results to a specific authentication realm.
- aliases: ['realm', 'name']
- type: str
-author: Tristan Le Guern (@tleguern)
-extends_documentation_fragment: community.general.proxmox.documentation
-'''
-
-
-EXAMPLES = '''
-- name: List existing domains
- community.general.proxmox_domain_info:
- api_host: helldorado
- api_user: root@pam
- api_password: "{{ password | default(omit) }}"
- api_token_id: "{{ token_id | default(omit) }}"
- api_token_secret: "{{ token_secret | default(omit) }}"
- register: proxmox_domains
-
-- name: Retrieve information about the pve domain
- community.general.proxmox_domain_info:
- api_host: helldorado
- api_user: root@pam
- api_password: "{{ password | default(omit) }}"
- api_token_id: "{{ token_id | default(omit) }}"
- api_token_secret: "{{ token_secret | default(omit) }}"
- domain: pve
- register: proxmox_domain_pve
-'''
-
-
-RETURN = '''
-proxmox_domains:
- description: List of authentication domains.
- returned: always, but can be empty
- type: list
- elements: dict
- contains:
- comment:
- description: Short description of the realm.
- returned: on success
- type: str
- realm:
- description: Realm name.
- returned: on success
- type: str
- type:
- description: Realm type.
- returned: on success
- type: str
- digest:
- description: Realm hash.
- returned: on success, can be absent
- type: str
-'''
-
-
-from ansible.module_utils.basic import AnsibleModule, missing_required_lib
-from ansible_collections.community.general.plugins.module_utils.proxmox import (
- proxmox_auth_argument_spec, ProxmoxAnsible)
-
-
-class ProxmoxDomainInfoAnsible(ProxmoxAnsible):
- def get_domain(self, realm):
- try:
- domain = self.proxmox_api.access.domains.get(realm)
- except Exception:
- self.module.fail_json(msg="Domain '%s' does not exist" % realm)
- domain['realm'] = realm
- return domain
-
- def get_domains(self):
- domains = self.proxmox_api.access.domains.get()
- return domains
-
-
-def proxmox_domain_info_argument_spec():
- return dict(
- domain=dict(type='str', aliases=['realm', 'name']),
- )
-
-
-def main():
- module_args = proxmox_auth_argument_spec()
- domain_info_args = proxmox_domain_info_argument_spec()
- module_args.update(domain_info_args)
-
- module = AnsibleModule(
- argument_spec=module_args,
- required_one_of=[('api_password', 'api_token_id')],
- required_together=[('api_token_id', 'api_token_secret')],
- supports_check_mode=True
- )
- result = dict(
- changed=False
- )
-
- proxmox = ProxmoxDomainInfoAnsible(module)
- domain = module.params['domain']
-
- if domain:
- domains = [proxmox.get_domain(realm=domain)]
- else:
- domains = proxmox.get_domains()
- result['proxmox_domains'] = domains
-
- module.exit_json(**result)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_group_info.py b/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_group_info.py
deleted file mode 100644
index 58b56e85..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_group_info.py
+++ /dev/null
@@ -1,140 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright: Tristan Le Guern
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: proxmox_group_info
-short_description: Retrieve information about one or more Proxmox VE groups
-version_added: 1.3.0
-description:
- - Retrieve information about one or more Proxmox VE groups
-options:
- group:
- description:
- - Restrict results to a specific group.
- aliases: ['groupid', 'name']
- type: str
-author: Tristan Le Guern (@tleguern)
-extends_documentation_fragment: community.general.proxmox.documentation
-'''
-
-
-EXAMPLES = '''
-- name: List existing groups
- community.general.proxmox_group_info:
- api_host: helldorado
- api_user: root@pam
- api_password: "{{ password | default(omit) }}"
- api_token_id: "{{ token_id | default(omit) }}"
- api_token_secret: "{{ token_secret | default(omit) }}"
- register: proxmox_groups
-
-- name: Retrieve information about the admin group
- community.general.proxmox_group_info:
- api_host: helldorado
- api_user: root@pam
- api_password: "{{ password | default(omit) }}"
- api_token_id: "{{ token_id | default(omit) }}"
- api_token_secret: "{{ token_secret | default(omit) }}"
- group: admin
- register: proxmox_group_admin
-'''
-
-
-RETURN = '''
-proxmox_groups:
- description: List of groups.
- returned: always, but can be empty
- type: list
- elements: dict
- contains:
- comment:
- description: Short description of the group.
- returned: on success, can be absent
- type: str
- groupid:
- description: Group name.
- returned: on success
- type: str
- users:
- description: List of users in the group.
- returned: on success
- type: list
- elements: str
-'''
-
-
-from ansible.module_utils.basic import AnsibleModule, missing_required_lib
-from ansible_collections.community.general.plugins.module_utils.proxmox import (
- proxmox_auth_argument_spec, ProxmoxAnsible)
-
-
-class ProxmoxGroupInfoAnsible(ProxmoxAnsible):
- def get_group(self, groupid):
- try:
- group = self.proxmox_api.access.groups.get(groupid)
- except Exception:
- self.module.fail_json(msg="Group '%s' does not exist" % groupid)
- group['groupid'] = groupid
- return ProxmoxGroup(group)
-
- def get_groups(self):
- groups = self.proxmox_api.access.groups.get()
- return [ProxmoxGroup(group) for group in groups]
-
-
-class ProxmoxGroup:
- def __init__(self, group):
- self.group = dict()
- # Data representation is not the same depending on API calls
- for k, v in group.items():
- if k == 'users' and isinstance(v, str):
- self.group['users'] = v.split(',')
- elif k == 'members':
- self.group['users'] = group['members']
- else:
- self.group[k] = v
-
-
-def proxmox_group_info_argument_spec():
- return dict(
- group=dict(type='str', aliases=['groupid', 'name']),
- )
-
-
-def main():
- module_args = proxmox_auth_argument_spec()
- group_info_args = proxmox_group_info_argument_spec()
- module_args.update(group_info_args)
-
- module = AnsibleModule(
- argument_spec=module_args,
- required_one_of=[('api_password', 'api_token_id')],
- required_together=[('api_token_id', 'api_token_secret')],
- supports_check_mode=True
- )
- result = dict(
- changed=False
- )
-
- proxmox = ProxmoxGroupInfoAnsible(module)
- group = module.params['group']
-
- if group:
- groups = [proxmox.get_group(groupid=group)]
- else:
- groups = proxmox.get_groups()
- result['proxmox_groups'] = [group.group for group in groups]
-
- module.exit_json(**result)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_kvm.py b/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_kvm.py
deleted file mode 100644
index 6bfb9e2e..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_kvm.py
+++ /dev/null
@@ -1,1408 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright: (c) 2016, Abdoul Bah (@helldorado)
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-DOCUMENTATION = r'''
----
-module: proxmox_kvm
-short_description: Management of Qemu(KVM) Virtual Machines in Proxmox VE cluster.
-description:
- - Allows you to create/delete/stop Qemu(KVM) Virtual Machines in Proxmox VE cluster.
- - Since community.general 4.0.0 on, there are no more default values, see I(proxmox_default_behavior).
-author: "Abdoul Bah (@helldorado) "
-options:
- acpi:
- description:
- - Specify if ACPI should be enabled/disabled.
- - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(yes).
- type: bool
- agent:
- description:
- - Specify if the QEMU Guest Agent should be enabled/disabled.
- type: bool
- args:
- description:
- - Pass arbitrary arguments to kvm.
- - This option is for experts only!
- - If I(proxmox_default_behavior) is set to C(compatiblity), this option has a default of
- C(-serial unix:/var/run/qemu-server/.serial,server,nowait).
- type: str
- autostart:
- description:
- - Specify if the VM should be automatically restarted after crash (currently ignored in PVE API).
- - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(no).
- type: bool
- balloon:
- description:
- - Specify the amount of RAM for the VM in MB.
- - Using zero disables the balloon driver.
- - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(0).
- type: int
- bios:
- description:
- - Specify the BIOS implementation.
- type: str
- choices: ['seabios', 'ovmf']
- boot:
- description:
- - Specify the boot order -> boot on floppy C(a), hard disk C(c), CD-ROM C(d), or network C(n).
- - You can combine to set order.
- - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(cnd).
- type: str
- bootdisk:
- description:
- - Enable booting from specified disk. C((ide|sata|scsi|virtio)\d+)
- type: str
- cicustom:
- description:
- - 'cloud-init: Specify custom files to replace the automatically generated ones at start.'
- type: str
- version_added: 1.3.0
- cipassword:
- description:
- - 'cloud-init: password of default user to create.'
- type: str
- version_added: 1.3.0
- citype:
- description:
- - 'cloud-init: Specifies the cloud-init configuration format.'
- - The default depends on the configured operating system type (C(ostype)).
- - We use the C(nocloud) format for Linux, and C(configdrive2) for Windows.
- type: str
- choices: ['nocloud', 'configdrive2']
- version_added: 1.3.0
- ciuser:
- description:
- - 'cloud-init: username of default user to create.'
- type: str
- version_added: 1.3.0
- clone:
- description:
- - Name of VM to be cloned. If C(vmid) is setted, C(clone) can take arbitrary value but required for initiating the clone.
- type: str
- cores:
- description:
- - Specify number of cores per socket.
- - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(1).
- type: int
- cpu:
- description:
- - Specify emulated CPU type.
- - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(kvm64).
- type: str
- cpulimit:
- description:
- - Specify if CPU usage will be limited. Value 0 indicates no CPU limit.
- - If the computer has 2 CPUs, it has total of '2' CPU time
- type: int
- cpuunits:
- description:
- - Specify CPU weight for a VM.
- - You can disable fair-scheduler configuration by setting this to 0
- - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(1000).
- type: int
- delete:
- description:
- - Specify a list of settings you want to delete.
- type: str
- description:
- description:
- - Specify the description for the VM. Only used on the configuration web interface.
- - This is saved as comment inside the configuration file.
- type: str
- digest:
- description:
- - Specify if to prevent changes if current configuration file has different SHA1 digest.
- - This can be used to prevent concurrent modifications.
- type: str
- efidisk0:
- description:
- - Specify a hash/dictionary of EFI disk options.
- - Requires I(bios=ovmf) to be set to be able to use it.
- type: dict
- suboptions:
- storage:
- description:
- - C(storage) is the storage identifier where to create the disk.
- type: str
- format:
- description:
- - C(format) is the drive's backing file's data format. Please refer to the Proxmox VE Administrator Guide,
- section Proxmox VE Storage (see U(https://pve.proxmox.com/pve-docs/chapter-pvesm.html) for the latest
- version, tables 3 to 14) to find out format supported by the provided storage backend.
- type: str
- efitype:
- description:
- - C(efitype) indicates the size of the EFI disk.
- - C(2m) will allow for a 2MB EFI disk, which will be enough to persist boot order and new boot entries.
- - C(4m) will allow for a 4MB EFI disk, which will additionally allow to store EFI keys in order to enable
- Secure Boot
- type: str
- choices:
- - 2m
- - 4m
- pre_enrolled_keys:
- description:
- - C(pre_enrolled_keys) indicates whether EFI keys for Secure Boot should be enrolled C(1) in the VM firmware
- upon creation or not (0).
- - If set to C(1), Secure Boot will also be enabled by default when the VM is created.
- type: bool
- version_added: 4.5.0
- force:
- description:
- - Allow to force stop VM.
- - Can be used with states C(stopped), C(restarted) and C(absent).
- - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(no).
- type: bool
- format:
- description:
- - Target drive's backing file's data format.
- - Used only with clone
- - Use I(format=unspecified) and I(full=false) for a linked clone.
- - Please refer to the Proxmox VE Administrator Guide, section Proxmox VE Storage (see
- U(https://pve.proxmox.com/pve-docs/chapter-pvesm.html) for the latest version, tables 3 to 14) to find out format
- supported by the provided storage backend.
- - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(qcow2).
- If I(proxmox_default_behavior) is set to C(no_defaults), not specifying this option is equivalent to setting it to C(unspecified).
- type: str
- choices: [ "cloop", "cow", "qcow", "qcow2", "qed", "raw", "vmdk", "unspecified" ]
- freeze:
- description:
- - Specify if PVE should freeze CPU at startup (use 'c' monitor command to start execution).
- type: bool
- full:
- description:
- - Create a full copy of all disk. This is always done when you clone a normal VM.
- - For VM templates, we try to create a linked clone by default.
- - Used only with clone
- type: bool
- default: 'yes'
- hostpci:
- description:
- - Specify a hash/dictionary of map host pci devices into guest. C(hostpci='{"key":"value", "key":"value"}').
- - Keys allowed are - C(hostpci[n]) where 0 ≤ n ≤ N.
- - Values allowed are - C("host="HOSTPCIID[;HOSTPCIID2...]",pcie="1|0",rombar="1|0",x-vga="1|0"").
- - The C(host) parameter is Host PCI device pass through. HOSTPCIID syntax is C(bus:dev.func) (hexadecimal numbers).
- - C(pcie=boolean) I(default=0) Choose the PCI-express bus (needs the q35 machine model).
- - C(rombar=boolean) I(default=1) Specify whether or not the device's ROM will be visible in the guest's memory map.
- - C(x-vga=boolean) I(default=0) Enable vfio-vga device support.
- - /!\ This option allows direct access to host hardware. So it is no longer possible to migrate such machines - use with special care.
- type: dict
- hotplug:
- description:
- - Selectively enable hotplug features.
- - This is a comma separated list of hotplug features C('network', 'disk', 'cpu', 'memory' and 'usb').
- - Value 0 disables hotplug completely and value 1 is an alias for the default C('network,disk,usb').
- type: str
- hugepages:
- description:
- - Enable/disable hugepages memory.
- type: str
- choices: ['any', '2', '1024']
- ide:
- description:
- - A hash/dictionary of volume used as IDE hard disk or CD-ROM. C(ide='{"key":"value", "key":"value"}').
- - Keys allowed are - C(ide[n]) where 0 ≤ n ≤ 3.
- - Values allowed are - C("storage:size,format=value").
- - C(storage) is the storage identifier where to create the disk.
- - C(size) is the size of the disk in GB.
- - C(format) is the drive's backing file's data format. C(qcow2|raw|subvol). Please refer to the Proxmox VE
- Administrator Guide, section Proxmox VE Storage (see U(https://pve.proxmox.com/pve-docs/chapter-pvesm.html) for
- the latest version, tables 3 to 14) to find out format supported by the provided storage backend.
- type: dict
- ipconfig:
- description:
- - 'cloud-init: Set the IP configuration.'
- - A hash/dictionary of network ip configurations. C(ipconfig='{"key":"value", "key":"value"}').
- - Keys allowed are - C(ipconfig[n]) where 0 ≤ n ≤ network interfaces.
- - Values allowed are - C("[gw=] [,gw6=] [,ip=] [,ip6=]").
- - 'cloud-init: Specify IP addresses and gateways for the corresponding interface.'
- - IP addresses use CIDR notation, gateways are optional but they should be in the same subnet of specified IP address.
- - The special string 'dhcp' can be used for IP addresses to use DHCP, in which case no explicit gateway should be provided.
- - For IPv6 the special string 'auto' can be used to use stateless autoconfiguration.
- - If cloud-init is enabled and neither an IPv4 nor an IPv6 address is specified, it defaults to using dhcp on IPv4.
- type: dict
- version_added: 1.3.0
- keyboard:
- description:
- - Sets the keyboard layout for VNC server.
- type: str
- kvm:
- description:
- - Enable/disable KVM hardware virtualization.
- - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(yes).
- type: bool
- localtime:
- description:
- - Sets the real time clock to local time.
- - This is enabled by default if ostype indicates a Microsoft OS.
- type: bool
- lock:
- description:
- - Lock/unlock the VM.
- type: str
- choices: ['migrate', 'backup', 'snapshot', 'rollback']
- machine:
- description:
- - Specifies the Qemu machine type.
- - type => C((pc|pc(-i440fx)?-\d+\.\d+(\.pxe)?|q35|pc-q35-\d+\.\d+(\.pxe)?))
- type: str
- memory:
- description:
- - Memory size in MB for instance.
- - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(512).
- type: int
- migrate_downtime:
- description:
- - Sets maximum tolerated downtime (in seconds) for migrations.
- type: int
- migrate_speed:
- description:
- - Sets maximum speed (in MB/s) for migrations.
- - A value of 0 is no limit.
- type: int
- name:
- description:
- - Specifies the VM name. Only used on the configuration web interface.
- - Required only for C(state=present).
- type: str
- nameservers:
- description:
- - 'cloud-init: DNS server IP address(es).'
- - If unset, PVE host settings are used.
- type: list
- elements: str
- version_added: 1.3.0
- net:
- description:
- - A hash/dictionary of network interfaces for the VM. C(net='{"key":"value", "key":"value"}').
- - Keys allowed are - C(net[n]) where 0 ≤ n ≤ N.
- - Values allowed are - C("model="XX:XX:XX:XX:XX:XX",bridge="value",rate="value",tag="value",firewall="1|0",trunks="vlanid"").
- - Model is one of C(e1000 e1000-82540em e1000-82544gc e1000-82545em i82551 i82557b i82559er ne2k_isa ne2k_pci pcnet rtl8139 virtio vmxnet3).
- - C(XX:XX:XX:XX:XX:XX) should be an unique MAC address. This is automatically generated if not specified.
- - The C(bridge) parameter can be used to automatically add the interface to a bridge device. The Proxmox VE standard bridge is called 'vmbr0'.
- - Option C(rate) is used to limit traffic bandwidth from and to this interface. It is specified as floating point number, unit is 'Megabytes per second'.
- - If you specify no bridge, we create a kvm 'user' (NATed) network device, which provides DHCP and DNS services.
- type: dict
- newid:
- description:
- - VMID for the clone. Used only with clone.
- - If newid is not set, the next available VM ID will be fetched from ProxmoxAPI.
- type: int
- numa:
- description:
- - A hash/dictionaries of NUMA topology. C(numa='{"key":"value", "key":"value"}').
- - Keys allowed are - C(numa[n]) where 0 ≤ n ≤ N.
- - Values allowed are - C("cpu="",hostnodes="",memory="number",policy="(bind|interleave|preferred)"").
- - C(cpus) CPUs accessing this NUMA node.
- - C(hostnodes) Host NUMA nodes to use.
- - C(memory) Amount of memory this NUMA node provides.
- - C(policy) NUMA allocation policy.
- type: dict
- numa_enabled:
- description:
- - Enables NUMA.
- type: bool
- onboot:
- description:
- - Specifies whether a VM will be started during system bootup.
- - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(yes).
- type: bool
- ostype:
- description:
- - Specifies guest operating system. This is used to enable special optimization/features for specific operating systems.
- - The l26 is Linux 2.6/3.X Kernel.
- - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(l26).
- type: str
- choices: ['other', 'wxp', 'w2k', 'w2k3', 'w2k8', 'wvista', 'win7', 'win8', 'win10', 'win11', 'l24', 'l26', 'solaris']
- parallel:
- description:
- - A hash/dictionary of map host parallel devices. C(parallel='{"key":"value", "key":"value"}').
- - Keys allowed are - (parallel[n]) where 0 ≤ n ≤ 2.
- - Values allowed are - C("/dev/parport\d+|/dev/usb/lp\d+").
- type: dict
- protection:
- description:
- - Enable/disable the protection flag of the VM. This will enable/disable the remove VM and remove disk operations.
- type: bool
- reboot:
- description:
- - Allow reboot. If set to C(yes), the VM exit on reboot.
- type: bool
- revert:
- description:
- - Revert a pending change.
- type: str
- sata:
- description:
- - A hash/dictionary of volume used as sata hard disk or CD-ROM. C(sata='{"key":"value", "key":"value"}').
- - Keys allowed are - C(sata[n]) where 0 ≤ n ≤ 5.
- - Values allowed are - C("storage:size,format=value").
- - C(storage) is the storage identifier where to create the disk.
- - C(size) is the size of the disk in GB.
- - C(format) is the drive's backing file's data format. C(qcow2|raw|subvol). Please refer to the Proxmox VE
- Administrator Guide, section Proxmox VE Storage (see U(https://pve.proxmox.com/pve-docs/chapter-pvesm.html) for
- the latest version, tables 3 to 14) to find out format supported by the provided storage backend.
- type: dict
- scsi:
- description:
- - A hash/dictionary of volume used as SCSI hard disk or CD-ROM. C(scsi='{"key":"value", "key":"value"}').
- - Keys allowed are - C(sata[n]) where 0 ≤ n ≤ 13.
- - Values allowed are - C("storage:size,format=value").
- - C(storage) is the storage identifier where to create the disk.
- - C(size) is the size of the disk in GB.
- - C(format) is the drive's backing file's data format. C(qcow2|raw|subvol). Please refer to the Proxmox VE
- Administrator Guide, section Proxmox VE Storage (see U(https://pve.proxmox.com/pve-docs/chapter-pvesm.html) for
- the latest version, tables 3 to 14) to find out format supported by the provided storage backend.
- type: dict
- scsihw:
- description:
- - Specifies the SCSI controller model.
- type: str
- choices: ['lsi', 'lsi53c810', 'virtio-scsi-pci', 'virtio-scsi-single', 'megasas', 'pvscsi']
- searchdomains:
- description:
- - 'cloud-init: Sets DNS search domain(s).'
- - If unset, PVE host settings are used.
- type: list
- elements: str
- version_added: 1.3.0
- serial:
- description:
- - A hash/dictionary of serial device to create inside the VM. C('{"key":"value", "key":"value"}').
- - Keys allowed are - serial[n](str; required) where 0 ≤ n ≤ 3.
- - Values allowed are - C((/dev/.+|socket)).
- - /!\ If you pass through a host serial device, it is no longer possible to migrate such machines - use with special care.
- type: dict
- shares:
- description:
- - Rets amount of memory shares for auto-ballooning. (0 - 50000).
- - The larger the number is, the more memory this VM gets.
- - The number is relative to weights of all other running VMs.
- - Using 0 disables auto-ballooning, this means no limit.
- type: int
- skiplock:
- description:
- - Ignore locks
- - Only root is allowed to use this option.
- type: bool
- smbios:
- description:
- - Specifies SMBIOS type 1 fields.
- type: str
- snapname:
- description:
- - The name of the snapshot. Used only with clone.
- type: str
- sockets:
- description:
- - Sets the number of CPU sockets. (1 - N).
- - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(1).
- type: int
- sshkeys:
- description:
- - 'cloud-init: SSH key to assign to the default user. NOT TESTED with multiple keys but a multi-line value should work.'
- type: str
- version_added: 1.3.0
- startdate:
- description:
- - Sets the initial date of the real time clock.
- - Valid format for date are C('now') or C('2016-09-25T16:01:21') or C('2016-09-25').
- type: str
- startup:
- description:
- - Startup and shutdown behavior. C([[order=]\d+] [,up=\d+] [,down=\d+]).
- - Order is a non-negative number defining the general startup order.
- - Shutdown in done with reverse ordering.
- type: str
- state:
- description:
- - Indicates desired state of the instance.
- - If C(current), the current state of the VM will be fetched. You can access it with C(results.status)
- type: str
- choices: ['present', 'started', 'absent', 'stopped', 'restarted','current']
- default: present
- storage:
- description:
- - Target storage for full clone.
- type: str
- tablet:
- description:
- - Enables/disables the USB tablet device.
- - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(no).
- type: bool
- tags:
- description:
- - List of tags to apply to the VM instance.
- - Tags must start with C([a-z0-9_]) followed by zero or more of the following characters C([a-z0-9_-+.]).
- - Tags are only available in Proxmox 6+.
- type: list
- elements: str
- version_added: 2.3.0
- target:
- description:
- - Target node. Only allowed if the original VM is on shared storage.
- - Used only with clone
- type: str
- tdf:
- description:
- - Enables/disables time drift fix.
- type: bool
- template:
- description:
- - Enables/disables the template.
- - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(no).
- type: bool
- timeout:
- description:
- - Timeout for operations.
- type: int
- default: 30
- update:
- description:
- - If C(yes), the VM will be updated with new value.
- - Cause of the operations of the API and security reasons, I have disabled the update of the following parameters
- - C(net, virtio, ide, sata, scsi). Per example updating C(net) update the MAC address and C(virtio) create always new disk...
- - Update of C(pool) is disabled. It needs an additional API endpoint not covered by this module.
- type: bool
- default: 'no'
- vcpus:
- description:
- - Sets number of hotplugged vcpus.
- type: int
- vga:
- description:
- - Select VGA type. If you want to use high resolution modes (>= 1280x1024x16) then you should use option 'std' or 'vmware'.
- - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(std).
- type: str
- choices: ['std', 'cirrus', 'vmware', 'qxl', 'serial0', 'serial1', 'serial2', 'serial3', 'qxl2', 'qxl3', 'qxl4']
- virtio:
- description:
- - A hash/dictionary of volume used as VIRTIO hard disk. C(virtio='{"key":"value", "key":"value"}').
- - Keys allowed are - C(virto[n]) where 0 ≤ n ≤ 15.
- - Values allowed are - C("storage:size,format=value").
- - C(storage) is the storage identifier where to create the disk.
- - C(size) is the size of the disk in GB.
- - C(format) is the drive's backing file's data format. C(qcow2|raw|subvol). Please refer to the Proxmox VE
- Administrator Guide, section Proxmox VE Storage (see U(https://pve.proxmox.com/pve-docs/chapter-pvesm.html)
- for the latest version, tables 3 to 14) to find out format supported by the provided storage backend.
- type: dict
- watchdog:
- description:
- - Creates a virtual hardware watchdog device.
- type: str
- proxmox_default_behavior:
- description:
- - As of community.general 4.0.0, various options no longer have default values.
- These default values caused problems when users expected different behavior from Proxmox
- by default or filled options which caused problems when set.
- - The value C(compatibility) (default before community.general 4.0.0) will ensure that the default values
- are used when the values are not explicitly specified by the user. The new default is C(no_defaults),
- which makes sure these options have no defaults.
- - This affects the I(acpi), I(autostart), I(balloon), I(boot), I(cores), I(cpu),
- I(cpuunits), I(force), I(format), I(kvm), I(memory), I(onboot), I(ostype), I(sockets),
- I(tablet), I(template), I(vga), options.
- type: str
- default: no_defaults
- choices:
- - compatibility
- - no_defaults
- version_added: "1.3.0"
-extends_documentation_fragment:
- - community.general.proxmox.documentation
- - community.general.proxmox.selection
-'''
-
-EXAMPLES = '''
-- name: Create new VM with minimal options
- community.general.proxmox_kvm:
- api_user: root@pam
- api_password: secret
- api_host: helldorado
- name: spynal
- node: sabrewulf
-
-- name: Create new VM with minimal options and given vmid
- community.general.proxmox_kvm:
- api_user: root@pam
- api_password: secret
- api_host: helldorado
- name: spynal
- node: sabrewulf
- vmid: 100
-
-- name: Create new VM with two network interface options
- community.general.proxmox_kvm:
- api_user: root@pam
- api_password: secret
- api_host: helldorado
- name: spynal
- node: sabrewulf
- net:
- net0: 'virtio,bridge=vmbr1,rate=200'
- net1: 'e1000,bridge=vmbr2'
-
-- name: Create new VM with one network interface, three virto hard disk, 4 cores, and 2 vcpus
- community.general.proxmox_kvm:
- api_user: root@pam
- api_password: secret
- api_host: helldorado
- name: spynal
- node: sabrewulf
- net:
- net0: 'virtio,bridge=vmbr1,rate=200'
- virtio:
- virtio0: 'VMs_LVM:10'
- virtio1: 'VMs:2,format=qcow2'
- virtio2: 'VMs:5,format=raw'
- cores: 4
- vcpus: 2
-
-- name: Create VM with 1 10GB SATA disk and an EFI disk, with Secure Boot disabled by default
- community.general.proxmox_kvm:
- api_user: root@pam
- api_password: secret
- api_host: helldorado
- name: spynal
- node: sabrewulf
- sata:
- sata0: 'VMs_LVM:10,format=raw'
- bios: ovmf
- efidisk0:
- storage: VMs_LVM_thin
- format: raw
- efitype: 4m
- pre_enrolled_keys: False
-
-- name: Create VM with 1 10GB SATA disk and an EFI disk, with Secure Boot enabled by default
- community.general.proxmox_kvm:
- api_user: root@pam
- api_password: secret
- api_host: helldorado
- name: spynal
- node: sabrewulf
- sata:
- sata0: 'VMs_LVM:10,format=raw'
- bios: ovmf
- efidisk0:
- storage: VMs_LVM
- format: raw
- efitype: 4m
- pre_enrolled_keys: 1
-
-- name: >
- Clone VM with only source VM name.
- The VM source is spynal.
- The target VM name is zavala
- community.general.proxmox_kvm:
- api_user: root@pam
- api_password: secret
- api_host: helldorado
- clone: spynal
- name: zavala
- node: sabrewulf
- storage: VMs
- format: qcow2
- timeout: 500
-
-- name: >
- Create linked clone VM with only source VM name.
- The VM source is spynal.
- The target VM name is zavala
- community.general.proxmox_kvm:
- api_user: root@pam
- api_password: secret
- api_host: helldorado
- clone: spynal
- name: zavala
- node: sabrewulf
- storage: VMs
- full: no
- format: unspecified
- timeout: 500
-
-- name: Clone VM with source vmid and target newid and raw format
- community.general.proxmox_kvm:
- api_user: root@pam
- api_password: secret
- api_host: helldorado
- clone: arbitrary_name
- vmid: 108
- newid: 152
- name: zavala
- node: sabrewulf
- storage: LVM_STO
- format: raw
- timeout: 300
-
-- name: Create new VM and lock it for snapshot
- community.general.proxmox_kvm:
- api_user: root@pam
- api_password: secret
- api_host: helldorado
- name: spynal
- node: sabrewulf
- lock: snapshot
-
-- name: Create new VM and set protection to disable the remove VM and remove disk operations
- community.general.proxmox_kvm:
- api_user: root@pam
- api_password: secret
- api_host: helldorado
- name: spynal
- node: sabrewulf
- protection: yes
-
-- name: Create new VM using cloud-init with a username and password
- community.general.proxmox_kvm:
- node: sabrewulf
- api_user: root@pam
- api_password: secret
- api_host: helldorado
- name: spynal
- ide:
- ide2: 'local:cloudinit,format=qcow2'
- ciuser: mylinuxuser
- cipassword: supersecret
- searchdomains: 'mydomain.internal'
- nameservers: 1.1.1.1
- net:
- net0: 'virtio,bridge=vmbr1,tag=77'
- ipconfig:
- ipconfig0: 'ip=192.168.1.1/24,gw=192.168.1.1'
-
-- name: Create new VM using Cloud-Init with an ssh key
- community.general.proxmox_kvm:
- node: sabrewulf
- api_user: root@pam
- api_password: secret
- api_host: helldorado
- name: spynal
- ide:
- ide2: 'local:cloudinit,format=qcow2'
- sshkeys: 'ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILJkVm98B71lD5XHfihwcYHE9TVpsJmK1vR1JcaU82L+'
- searchdomains: 'mydomain.internal'
- nameservers:
- - '1.1.1.1'
- - '8.8.8.8'
- net:
- net0: 'virtio,bridge=vmbr1,tag=77'
- ipconfig:
- ipconfig0: 'ip=192.168.1.1/24'
-
-- name: Start VM
- community.general.proxmox_kvm:
- api_user: root@pam
- api_password: secret
- api_host: helldorado
- name: spynal
- node: sabrewulf
- state: started
-
-- name: Stop VM
- community.general.proxmox_kvm:
- api_user: root@pam
- api_password: secret
- api_host: helldorado
- name: spynal
- node: sabrewulf
- state: stopped
-
-- name: Stop VM with force
- community.general.proxmox_kvm:
- api_user: root@pam
- api_password: secret
- api_host: helldorado
- name: spynal
- node: sabrewulf
- state: stopped
- force: yes
-
-- name: Restart VM
- community.general.proxmox_kvm:
- api_user: root@pam
- api_password: secret
- api_host: helldorado
- name: spynal
- node: sabrewulf
- state: restarted
-
-- name: Remove VM
- community.general.proxmox_kvm:
- api_user: root@pam
- api_password: secret
- api_host: helldorado
- name: spynal
- node: sabrewulf
- state: absent
-
-- name: Get VM current state
- community.general.proxmox_kvm:
- api_user: root@pam
- api_password: secret
- api_host: helldorado
- name: spynal
- node: sabrewulf
- state: current
-
-- name: Update VM configuration
- community.general.proxmox_kvm:
- api_user: root@pam
- api_password: secret
- api_host: helldorado
- name: spynal
- node: sabrewulf
- cores: 8
- memory: 16384
- update: yes
-
-- name: Delete QEMU parameters
- community.general.proxmox_kvm:
- api_user: root@pam
- api_password: secret
- api_host: helldorado
- name: spynal
- node: sabrewulf
- delete: 'args,template,cpulimit'
-
-- name: Revert a pending change
- community.general.proxmox_kvm:
- api_user: root@pam
- api_password: secret
- api_host: helldorado
- name: spynal
- node: sabrewulf
- revert: 'template,cpulimit'
-'''
-
-RETURN = '''
-vmid:
- description: The VM vmid.
- returned: success
- type: int
- sample: 115
-status:
- description: The current virtual machine status.
- returned: success, not clone, not absent, not update
- type: str
- sample: running
-msg:
- description: A short message
- returned: always
- type: str
- sample: "VM kropta with vmid = 110 is running"
-'''
-
-import re
-import time
-import traceback
-from ansible.module_utils.six.moves.urllib.parse import quote
-
-from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
-from ansible_collections.community.general.plugins.module_utils.proxmox import (proxmox_auth_argument_spec, ProxmoxAnsible)
-
-from ansible.module_utils.basic import AnsibleModule, missing_required_lib
-from ansible.module_utils.common.text.converters import to_native
-
-
-def parse_mac(netstr):
- return re.search('=(.*?),', netstr).group(1)
-
-
-def parse_dev(devstr):
- return re.search('(.*?)(,|$)', devstr).group(1)
-
-
-class ProxmoxKvmAnsible(ProxmoxAnsible):
- def get_vminfo(self, node, vmid, **kwargs):
- global results
- results = {}
- mac = {}
- devices = {}
- try:
- vm = self.proxmox_api.nodes(node).qemu(vmid).config.get()
- except Exception as e:
- self.module.fail_json(msg='Getting information for VM with vmid = %s failed with exception: %s' % (vmid, e))
-
- # Sanitize kwargs. Remove not defined args and ensure True and False converted to int.
- kwargs = dict((k, v) for k, v in kwargs.items() if v is not None)
-
- # Convert all dict in kwargs to elements.
- # For hostpci[n], ide[n], net[n], numa[n], parallel[n], sata[n], scsi[n], serial[n], virtio[n]
- for k in list(kwargs.keys()):
- if isinstance(kwargs[k], dict):
- kwargs.update(kwargs[k])
- del kwargs[k]
-
- # Split information by type
- re_net = re.compile(r'net[0-9]')
- re_dev = re.compile(r'(virtio|ide|scsi|sata|efidisk)[0-9]')
- for k in kwargs.keys():
- if re_net.match(k):
- mac[k] = parse_mac(vm[k])
- elif re_dev.match(k):
- devices[k] = parse_dev(vm[k])
-
- results['mac'] = mac
- results['devices'] = devices
- results['vmid'] = int(vmid)
-
- def settings(self, vmid, node, **kwargs):
- proxmox_node = self.proxmox_api.nodes(node)
-
- # Sanitize kwargs. Remove not defined args and ensure True and False converted to int.
- kwargs = dict((k, v) for k, v in kwargs.items() if v is not None)
-
- return proxmox_node.qemu(vmid).config.set(**kwargs) is None
-
- def wait_for_task(self, node, taskid):
- timeout = self.module.params['timeout']
-
- while timeout:
- task = self.proxmox_api.nodes(node).tasks(taskid).status.get()
- if task['status'] == 'stopped' and task['exitstatus'] == 'OK':
- # Wait an extra second as the API can be a ahead of the hypervisor
- time.sleep(1)
- return True
- timeout = timeout - 1
- if timeout == 0:
- break
- time.sleep(1)
- return False
-
- def create_vm(self, vmid, newid, node, name, memory, cpu, cores, sockets, update, **kwargs):
- # Available only in PVE 4
- only_v4 = ['force', 'protection', 'skiplock']
- only_v6 = ['ciuser', 'cipassword', 'sshkeys', 'ipconfig', 'tags']
-
- # valide clone parameters
- valid_clone_params = ['format', 'full', 'pool', 'snapname', 'storage', 'target']
- clone_params = {}
- # Default args for vm. Note: -args option is for experts only. It allows you to pass arbitrary arguments to kvm.
- vm_args = "-serial unix:/var/run/qemu-server/{0}.serial,server,nowait".format(vmid)
-
- proxmox_node = self.proxmox_api.nodes(node)
-
- # Sanitize kwargs. Remove not defined args and ensure True and False converted to int.
- kwargs = dict((k, v) for k, v in kwargs.items() if v is not None)
- kwargs.update(dict([k, int(v)] for k, v in kwargs.items() if isinstance(v, bool)))
-
- version = self.version()
- pve_major_version = 3 if version < LooseVersion('4.0') else version.version[0]
-
- # The features work only on PVE 4+
- if pve_major_version < 4:
- for p in only_v4:
- if p in kwargs:
- del kwargs[p]
-
- # The features work only on PVE 6
- if pve_major_version < 6:
- for p in only_v6:
- if p in kwargs:
- del kwargs[p]
-
- # 'sshkeys' param expects an urlencoded string
- if 'sshkeys' in kwargs:
- urlencoded_ssh_keys = quote(kwargs['sshkeys'], safe='')
- kwargs['sshkeys'] = str(urlencoded_ssh_keys)
-
- # If update, don't update disk (virtio, efidisk0, ide, sata, scsi) and network interface
- # pool parameter not supported by qemu//config endpoint on "update" (PVE 6.2) - only with "create"
- if update:
- if 'virtio' in kwargs:
- del kwargs['virtio']
- if 'sata' in kwargs:
- del kwargs['sata']
- if 'scsi' in kwargs:
- del kwargs['scsi']
- if 'ide' in kwargs:
- del kwargs['ide']
- if 'efidisk0' in kwargs:
- del kwargs['efidisk0']
- if 'net' in kwargs:
- del kwargs['net']
- if 'force' in kwargs:
- del kwargs['force']
- if 'pool' in kwargs:
- del kwargs['pool']
-
- # Check that the bios option is set to ovmf if the efidisk0 option is present
- if 'efidisk0' in kwargs:
- if ('bios' not in kwargs) or ('ovmf' != kwargs['bios']):
- self.module.fail_json(msg='efidisk0 cannot be used if bios is not set to ovmf. ')
-
- # Flatten efidisk0 option to a string so that it's a string which is what Proxmoxer and the API expect
- if 'efidisk0' in kwargs:
- efidisk0_str = ''
- # Regexp to catch underscores in keys name, to replace them after by hypens
- hyphen_re = re.compile(r'_')
- # If present, the storage definition should be the first argument
- if 'storage' in kwargs['efidisk0']:
- efidisk0_str += kwargs['efidisk0'].get('storage') + ':1,'
- kwargs['efidisk0'].pop('storage')
- # Join other elements from the dict as key=value using commas as separator, replacing any underscore in key
- # by hyphens (needed for pre_enrolled_keys to pre-enrolled-keys)
- efidisk0_str += ','.join([hyphen_re.sub('-', k) + "=" + str(v) for k, v in kwargs['efidisk0'].items()
- if 'storage' != k])
- kwargs['efidisk0'] = efidisk0_str
-
- # Convert all dict in kwargs to elements.
- # For hostpci[n], ide[n], net[n], numa[n], parallel[n], sata[n], scsi[n], serial[n], virtio[n], ipconfig[n]
- for k in list(kwargs.keys()):
- if isinstance(kwargs[k], dict):
- kwargs.update(kwargs[k])
- del kwargs[k]
-
- # Rename numa_enabled to numa. According the API documentation
- if 'numa_enabled' in kwargs:
- kwargs['numa'] = kwargs['numa_enabled']
- del kwargs['numa_enabled']
-
- # PVE api expects strings for the following params
- if 'nameservers' in self.module.params:
- nameservers = self.module.params.pop('nameservers')
- if nameservers:
- kwargs['nameserver'] = ' '.join(nameservers)
- if 'searchdomains' in self.module.params:
- searchdomains = self.module.params.pop('searchdomains')
- if searchdomains:
- kwargs['searchdomain'] = ' '.join(searchdomains)
-
- # VM tags are expected to be valid and presented as a comma/semi-colon delimited string
- if 'tags' in kwargs:
- re_tag = re.compile(r'^[a-z0-9_][a-z0-9_\-\+\.]*$')
- for tag in kwargs['tags']:
- if not re_tag.match(tag):
- self.module.fail_json(msg='%s is not a valid tag' % tag)
- kwargs['tags'] = ",".join(kwargs['tags'])
-
- # -args and skiplock require root@pam user - but can not use api tokens
- if self.module.params['api_user'] == "root@pam" and self.module.params['args'] is None:
- if not update and self.module.params['proxmox_default_behavior'] == 'compatibility':
- kwargs['args'] = vm_args
- elif self.module.params['api_user'] == "root@pam" and self.module.params['args'] is not None:
- kwargs['args'] = self.module.params['args']
- elif self.module.params['api_user'] != "root@pam" and self.module.params['args'] is not None:
- self.module.fail_json(msg='args parameter require root@pam user. ')
-
- if self.module.params['api_user'] != "root@pam" and self.module.params['skiplock'] is not None:
- self.module.fail_json(msg='skiplock parameter require root@pam user. ')
-
- if update:
- if proxmox_node.qemu(vmid).config.set(name=name, memory=memory, cpu=cpu, cores=cores, sockets=sockets, **kwargs) is None:
- return True
- else:
- return False
- elif self.module.params['clone'] is not None:
- for param in valid_clone_params:
- if self.module.params[param] is not None:
- clone_params[param] = self.module.params[param]
- clone_params.update(dict([k, int(v)] for k, v in clone_params.items() if isinstance(v, bool)))
- taskid = proxmox_node.qemu(vmid).clone.post(newid=newid, name=name, **clone_params)
- else:
- taskid = proxmox_node.qemu.create(vmid=vmid, name=name, memory=memory, cpu=cpu, cores=cores, sockets=sockets, **kwargs)
-
- if not self.wait_for_task(node, taskid):
- self.module.fail_json(msg='Reached timeout while waiting for creating VM. Last line in task before timeout: %s' %
- proxmox_node.tasks(taskid).log.get()[:1])
- return False
- return True
-
- def start_vm(self, vm):
- vmid = vm['vmid']
- proxmox_node = self.proxmox_api.nodes(vm['node'])
- taskid = proxmox_node.qemu(vmid).status.start.post()
- if not self.wait_for_task(vm['node'], taskid):
- self.module.fail_json(msg='Reached timeout while waiting for starting VM. Last line in task before timeout: %s' %
- proxmox_node.tasks(taskid).log.get()[:1])
- return False
- return True
-
- def stop_vm(self, vm, force):
- vmid = vm['vmid']
- proxmox_node = self.proxmox_api.nodes(vm['node'])
- taskid = proxmox_node.qemu(vmid).status.shutdown.post(forceStop=(1 if force else 0))
- if not self.wait_for_task(vm['node'], taskid):
- self.module.fail_json(msg='Reached timeout while waiting for stopping VM. Last line in task before timeout: %s' %
- proxmox_node.tasks(taskid).log.get()[:1])
- return False
- return True
-
-
-def main():
- module_args = proxmox_auth_argument_spec()
- kvm_args = dict(
- acpi=dict(type='bool'),
- agent=dict(type='bool'),
- args=dict(type='str'),
- autostart=dict(type='bool'),
- balloon=dict(type='int'),
- bios=dict(choices=['seabios', 'ovmf']),
- boot=dict(type='str'),
- bootdisk=dict(type='str'),
- cicustom=dict(type='str'),
- cipassword=dict(type='str', no_log=True),
- citype=dict(type='str', choices=['nocloud', 'configdrive2']),
- ciuser=dict(type='str'),
- clone=dict(type='str'),
- cores=dict(type='int'),
- cpu=dict(type='str'),
- cpulimit=dict(type='int'),
- cpuunits=dict(type='int'),
- delete=dict(type='str'),
- description=dict(type='str'),
- digest=dict(type='str'),
- efidisk0=dict(type='dict',
- options=dict(
- storage=dict(type='str'),
- format=dict(type='str'),
- efitype=dict(type='str', choices=['2m', '4m']),
- pre_enrolled_keys=dict(type='bool'),
- )),
- force=dict(type='bool'),
- format=dict(type='str', choices=['cloop', 'cow', 'qcow', 'qcow2', 'qed', 'raw', 'vmdk', 'unspecified']),
- freeze=dict(type='bool'),
- full=dict(type='bool', default=True),
- hostpci=dict(type='dict'),
- hotplug=dict(type='str'),
- hugepages=dict(choices=['any', '2', '1024']),
- ide=dict(type='dict'),
- ipconfig=dict(type='dict'),
- keyboard=dict(type='str'),
- kvm=dict(type='bool'),
- localtime=dict(type='bool'),
- lock=dict(choices=['migrate', 'backup', 'snapshot', 'rollback']),
- machine=dict(type='str'),
- memory=dict(type='int'),
- migrate_downtime=dict(type='int'),
- migrate_speed=dict(type='int'),
- name=dict(type='str'),
- nameservers=dict(type='list', elements='str'),
- net=dict(type='dict'),
- newid=dict(type='int'),
- node=dict(),
- numa=dict(type='dict'),
- numa_enabled=dict(type='bool'),
- onboot=dict(type='bool'),
- ostype=dict(choices=['other', 'wxp', 'w2k', 'w2k3', 'w2k8', 'wvista', 'win7', 'win8', 'win10', 'win11', 'l24', 'l26', 'solaris']),
- parallel=dict(type='dict'),
- pool=dict(type='str'),
- protection=dict(type='bool'),
- reboot=dict(type='bool'),
- revert=dict(type='str'),
- sata=dict(type='dict'),
- scsi=dict(type='dict'),
- scsihw=dict(choices=['lsi', 'lsi53c810', 'virtio-scsi-pci', 'virtio-scsi-single', 'megasas', 'pvscsi']),
- serial=dict(type='dict'),
- searchdomains=dict(type='list', elements='str'),
- shares=dict(type='int'),
- skiplock=dict(type='bool'),
- smbios=dict(type='str'),
- snapname=dict(type='str'),
- sockets=dict(type='int'),
- sshkeys=dict(type='str', no_log=False),
- startdate=dict(type='str'),
- startup=dict(),
- state=dict(default='present', choices=['present', 'absent', 'stopped', 'started', 'restarted', 'current']),
- storage=dict(type='str'),
- tablet=dict(type='bool'),
- tags=dict(type='list', elements='str'),
- target=dict(type='str'),
- tdf=dict(type='bool'),
- template=dict(type='bool'),
- timeout=dict(type='int', default=30),
- update=dict(type='bool', default=False),
- vcpus=dict(type='int'),
- vga=dict(choices=['std', 'cirrus', 'vmware', 'qxl', 'serial0', 'serial1', 'serial2', 'serial3', 'qxl2', 'qxl3', 'qxl4']),
- virtio=dict(type='dict'),
- vmid=dict(type='int'),
- watchdog=dict(),
- proxmox_default_behavior=dict(type='str', default='no_defaults', choices=['compatibility', 'no_defaults']),
- )
- module_args.update(kvm_args)
-
- module = AnsibleModule(
- argument_spec=module_args,
- mutually_exclusive=[('delete', 'revert'), ('delete', 'update'), ('revert', 'update'), ('clone', 'update'), ('clone', 'delete'), ('clone', 'revert')],
- required_together=[('api_token_id', 'api_token_secret')],
- required_one_of=[('name', 'vmid'), ('api_password', 'api_token_id')],
- required_if=[('state', 'present', ['node'])],
- )
-
- clone = module.params['clone']
- cpu = module.params['cpu']
- cores = module.params['cores']
- delete = module.params['delete']
- memory = module.params['memory']
- name = module.params['name']
- newid = module.params['newid']
- node = module.params['node']
- revert = module.params['revert']
- sockets = module.params['sockets']
- state = module.params['state']
- update = bool(module.params['update'])
- vmid = module.params['vmid']
- validate_certs = module.params['validate_certs']
-
- if module.params['proxmox_default_behavior'] == 'compatibility':
- old_default_values = dict(
- acpi=True,
- autostart=False,
- balloon=0,
- boot='cnd',
- cores=1,
- cpu='kvm64',
- cpuunits=1000,
- format='qcow2',
- kvm=True,
- memory=512,
- ostype='l26',
- sockets=1,
- tablet=False,
- template=False,
- vga='std',
- )
- for param, value in old_default_values.items():
- if module.params[param] is None:
- module.params[param] = value
-
- if module.params['format'] == 'unspecified':
- module.params['format'] = None
-
- proxmox = ProxmoxKvmAnsible(module)
-
- # If vmid is not defined then retrieve its value from the vm name,
- # the cloned vm name or retrieve the next free VM id from ProxmoxAPI.
- if not vmid:
- if state == 'present' and not update and not clone and not delete and not revert:
- try:
- vmid = proxmox.get_nextvmid()
- except Exception:
- module.fail_json(msg="Can't get the next vmid for VM {0} automatically. Ensure your cluster state is good".format(name))
- else:
- clone_target = clone or name
- vmid = proxmox.get_vmid(clone_target, ignore_missing=True, choose_first_if_multiple=True)
-
- if clone is not None:
- # If newid is not defined then retrieve the next free id from ProxmoxAPI
- if not newid:
- try:
- newid = proxmox.get_nextvmid()
- except Exception:
- module.fail_json(msg="Can't get the next vmid for VM {0} automatically. Ensure your cluster state is good".format(name))
-
- # Ensure source VM name exists when cloning
- if not vmid:
- module.fail_json(msg='VM with name = %s does not exist in cluster' % clone)
-
- # Ensure source VM id exists when cloning
- proxmox.get_vm(vmid)
-
- # Ensure the choosen VM name doesn't already exist when cloning
- existing_vmid = proxmox.get_vmid(name, ignore_missing=True, choose_first_if_multiple=True)
- if existing_vmid:
- module.exit_json(changed=False, vmid=existing_vmid, msg="VM with name <%s> already exists" % name)
-
- # Ensure the choosen VM id doesn't already exist when cloning
- if proxmox.get_vm(newid, ignore_missing=True):
- module.exit_json(changed=False, vmid=vmid, msg="vmid %s with VM name %s already exists" % (newid, name))
-
- if delete is not None:
- try:
- proxmox.settings(vmid, node, delete=delete)
- module.exit_json(changed=True, vmid=vmid, msg="Settings has deleted on VM {0} with vmid {1}".format(name, vmid))
- except Exception as e:
- module.fail_json(vmid=vmid, msg='Unable to delete settings on VM {0} with vmid {1}: '.format(name, vmid) + str(e))
-
- if revert is not None:
- try:
- proxmox.settings(vmid, node, revert=revert)
- module.exit_json(changed=True, vmid=vmid, msg="Settings has reverted on VM {0} with vmid {1}".format(name, vmid))
- except Exception as e:
- module.fail_json(vmid=vmid, msg='Unable to revert settings on VM {0} with vmid {1}: Maybe is not a pending task... '.format(name, vmid) + str(e))
-
- if state == 'present':
- try:
- if proxmox.get_vm(vmid, ignore_missing=True) and not (update or clone):
- module.exit_json(changed=False, vmid=vmid, msg="VM with vmid <%s> already exists" % vmid)
- elif proxmox.get_vmid(name, ignore_missing=True, choose_first_if_multiple=True) and not (update or clone):
- module.exit_json(changed=False, vmid=proxmox.get_vmid(name, choose_first_if_multiple=True), msg="VM with name <%s> already exists" % name)
- elif not (node, name):
- module.fail_json(msg='node, name is mandatory for creating/updating vm')
- elif not proxmox.get_node(node):
- module.fail_json(msg="node '%s' does not exist in cluster" % node)
-
- proxmox.create_vm(vmid, newid, node, name, memory, cpu, cores, sockets, update,
- acpi=module.params['acpi'],
- agent=module.params['agent'],
- autostart=module.params['autostart'],
- balloon=module.params['balloon'],
- bios=module.params['bios'],
- boot=module.params['boot'],
- bootdisk=module.params['bootdisk'],
- cicustom=module.params['cicustom'],
- cipassword=module.params['cipassword'],
- citype=module.params['citype'],
- ciuser=module.params['ciuser'],
- cpulimit=module.params['cpulimit'],
- cpuunits=module.params['cpuunits'],
- description=module.params['description'],
- digest=module.params['digest'],
- efidisk0=module.params['efidisk0'],
- force=module.params['force'],
- freeze=module.params['freeze'],
- hostpci=module.params['hostpci'],
- hotplug=module.params['hotplug'],
- hugepages=module.params['hugepages'],
- ide=module.params['ide'],
- ipconfig=module.params['ipconfig'],
- keyboard=module.params['keyboard'],
- kvm=module.params['kvm'],
- localtime=module.params['localtime'],
- lock=module.params['lock'],
- machine=module.params['machine'],
- migrate_downtime=module.params['migrate_downtime'],
- migrate_speed=module.params['migrate_speed'],
- net=module.params['net'],
- numa=module.params['numa'],
- numa_enabled=module.params['numa_enabled'],
- onboot=module.params['onboot'],
- ostype=module.params['ostype'],
- parallel=module.params['parallel'],
- pool=module.params['pool'],
- protection=module.params['protection'],
- reboot=module.params['reboot'],
- sata=module.params['sata'],
- scsi=module.params['scsi'],
- scsihw=module.params['scsihw'],
- serial=module.params['serial'],
- shares=module.params['shares'],
- skiplock=module.params['skiplock'],
- smbios1=module.params['smbios'],
- snapname=module.params['snapname'],
- sshkeys=module.params['sshkeys'],
- startdate=module.params['startdate'],
- startup=module.params['startup'],
- tablet=module.params['tablet'],
- tags=module.params['tags'],
- target=module.params['target'],
- tdf=module.params['tdf'],
- template=module.params['template'],
- vcpus=module.params['vcpus'],
- vga=module.params['vga'],
- virtio=module.params['virtio'],
- watchdog=module.params['watchdog'])
-
- if not clone:
- proxmox.get_vminfo(node, vmid,
- ide=module.params['ide'],
- net=module.params['net'],
- sata=module.params['sata'],
- scsi=module.params['scsi'],
- virtio=module.params['virtio'])
- if update:
- module.exit_json(changed=True, vmid=vmid, msg="VM %s with vmid %s updated" % (name, vmid))
- elif clone is not None:
- module.exit_json(changed=True, vmid=newid, msg="VM %s with newid %s cloned from vm with vmid %s" % (name, newid, vmid))
- else:
- module.exit_json(changed=True, msg="VM %s with vmid %s deployed" % (name, vmid), **results)
- except Exception as e:
- if update:
- module.fail_json(vmid=vmid, msg="Unable to update vm {0} with vmid {1}=".format(name, vmid) + str(e))
- elif clone is not None:
- module.fail_json(vmid=vmid, msg="Unable to clone vm {0} from vmid {1}=".format(name, vmid) + str(e))
- else:
- module.fail_json(vmid=vmid, msg="creation of qemu VM %s with vmid %s failed with exception=%s" % (name, vmid, e))
-
- elif state == 'started':
- status = {}
- try:
- if not vmid:
- module.fail_json(msg='VM with name = %s does not exist in cluster' % name)
- vm = proxmox.get_vm(vmid)
- status['status'] = vm['status']
- if vm['status'] == 'running':
- module.exit_json(changed=False, vmid=vmid, msg="VM %s is already running" % vmid, **status)
-
- if proxmox.start_vm(vm):
- module.exit_json(changed=True, vmid=vmid, msg="VM %s started" % vmid, **status)
- except Exception as e:
- module.fail_json(vmid=vmid, msg="starting of VM %s failed with exception: %s" % (vmid, e), **status)
-
- elif state == 'stopped':
- status = {}
- try:
- if not vmid:
- module.fail_json(msg='VM with name = %s does not exist in cluster' % name)
-
- vm = proxmox.get_vm(vmid)
-
- status['status'] = vm['status']
- if vm['status'] == 'stopped':
- module.exit_json(changed=False, vmid=vmid, msg="VM %s is already stopped" % vmid, **status)
-
- if proxmox.stop_vm(vm, force=module.params['force']):
- module.exit_json(changed=True, vmid=vmid, msg="VM %s is shutting down" % vmid, **status)
- except Exception as e:
- module.fail_json(vmid=vmid, msg="stopping of VM %s failed with exception: %s" % (vmid, e), **status)
-
- elif state == 'restarted':
- status = {}
- try:
- if not vmid:
- module.fail_json(msg='VM with name = %s does not exist in cluster' % name)
-
- vm = proxmox.get_vm(vmid)
- status['status'] = vm['status']
- if vm['status'] == 'stopped':
- module.exit_json(changed=False, vmid=vmid, msg="VM %s is not running" % vmid, **status)
-
- if proxmox.stop_vm(vm, force=module.params['force']) and proxmox.start_vm(vm):
- module.exit_json(changed=True, vmid=vmid, msg="VM %s is restarted" % vmid, **status)
- except Exception as e:
- module.fail_json(vmid=vmid, msg="restarting of VM %s failed with exception: %s" % (vmid, e), **status)
-
- elif state == 'absent':
- status = {}
- try:
- vm = proxmox.get_vm(vmid, ignore_missing=True)
- if not vm:
- module.exit_json(changed=False, vmid=vmid)
-
- proxmox_node = proxmox.proxmox_api.nodes(vm['node'])
- status['status'] = vm['status']
- if vm['status'] == 'running':
- if module.params['force']:
- proxmox.stop_vm(vm, True)
- else:
- module.exit_json(changed=False, vmid=vmid, msg="VM %s is running. Stop it before deletion or use force=yes." % vmid)
- taskid = proxmox_node.qemu.delete(vmid)
- if not proxmox.wait_for_task(vm['node'], taskid):
- module.fail_json(msg='Reached timeout while waiting for removing VM. Last line in task before timeout: %s' %
- proxmox_node.tasks(taskid).log.get()[:1])
- else:
- module.exit_json(changed=True, vmid=vmid, msg="VM %s removed" % vmid)
- except Exception as e:
- module.fail_json(msg="deletion of VM %s failed with exception: %s" % (vmid, e))
-
- elif state == 'current':
- status = {}
- if not vmid:
- module.fail_json(msg='VM with name = %s does not exist in cluster' % name)
- vm = proxmox.get_vm(vmid)
- if not name:
- name = vm['name']
- current = proxmox.proxmox_api.nodes(vm['node']).qemu(vmid).status.current.get()['status']
- status['status'] = current
- if status:
- module.exit_json(changed=False, vmid=vmid, msg="VM %s with vmid = %s is %s" % (name, vmid, current), **status)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_nic.py b/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_nic.py
deleted file mode 100644
index e83d0dfe..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_nic.py
+++ /dev/null
@@ -1,304 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright: (c) 2021, Lammert Hellinga (@Kogelvis)
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-DOCUMENTATION = r'''
----
-module: proxmox_nic
-short_description: Management of a NIC of a Qemu(KVM) VM in a Proxmox VE cluster.
-version_added: 3.1.0
-description:
- - Allows you to create/update/delete a NIC on Qemu(KVM) Virtual Machines in a Proxmox VE cluster.
-author: "Lammert Hellinga (@Kogelvis) "
-options:
- bridge:
- description:
- - Add this interface to the specified bridge device. The Proxmox VE default bridge is called C(vmbr0).
- type: str
- firewall:
- description:
- - Whether this interface should be protected by the firewall.
- type: bool
- default: false
- interface:
- description:
- - Name of the interface, should be C(net[n]) where C(1 ≤ n ≤ 31).
- type: str
- required: true
- link_down:
- description:
- - Whether this interface should be disconnected (like pulling the plug).
- type: bool
- default: false
- mac:
- description:
- - C(XX:XX:XX:XX:XX:XX) should be a unique MAC address. This is automatically generated if not specified.
- - When not specified this module will keep the MAC address the same when changing an existing interface.
- type: str
- model:
- description:
- - The NIC emulator model.
- type: str
- choices: ['e1000', 'e1000-82540em', 'e1000-82544gc', 'e1000-82545em', 'i82551', 'i82557b', 'i82559er', 'ne2k_isa', 'ne2k_pci', 'pcnet',
- 'rtl8139', 'virtio', 'vmxnet3']
- default: virtio
- mtu:
- description:
- - Force MTU, for C(virtio) model only, setting will be ignored otherwise.
- - Set to C(1) to use the bridge MTU.
- - Value should be C(1 ≤ n ≤ 65520).
- type: int
- name:
- description:
- - Specifies the VM name. Only used on the configuration web interface.
- - Required only for I(state=present).
- type: str
- queues:
- description:
- - Number of packet queues to be used on the device.
- - Value should be C(0 ≤ n ≤ 16).
- type: int
- rate:
- description:
- - Rate limit in MBps (MegaBytes per second) as floating point number.
- type: float
- state:
- description:
- - Indicates desired state of the NIC.
- type: str
- choices: ['present', 'absent']
- default: present
- tag:
- description:
- - VLAN tag to apply to packets on this interface.
- - Value should be C(1 ≤ n ≤ 4094).
- type: int
- trunks:
- description:
- - List of VLAN trunks to pass through this interface.
- type: list
- elements: int
- vmid:
- description:
- - Specifies the instance ID.
- type: int
-extends_documentation_fragment:
- - community.general.proxmox.documentation
-'''
-
-EXAMPLES = '''
-- name: Create NIC net0 targeting the vm by name
- community.general.proxmox_nic:
- api_user: root@pam
- api_password: secret
- api_host: proxmoxhost
- name: my_vm
- interface: net0
- bridge: vmbr0
- tag: 3
-
-- name: Create NIC net0 targeting the vm by id
- community.general.proxmox_nic:
- api_user: root@pam
- api_password: secret
- api_host: proxmoxhost
- vmid: 103
- interface: net0
- bridge: vmbr0
- mac: "12:34:56:C0:FF:EE"
- firewall: true
-
-- name: Delete NIC net0 targeting the vm by name
- community.general.proxmox_nic:
- api_user: root@pam
- api_password: secret
- api_host: proxmoxhost
- name: my_vm
- interface: net0
- state: absent
-'''
-
-RETURN = '''
-vmid:
- description: The VM vmid.
- returned: success
- type: int
- sample: 115
-msg:
- description: A short message
- returned: always
- type: str
- sample: "Nic net0 unchanged on VM with vmid 103"
-'''
-
-from ansible.module_utils.basic import AnsibleModule, env_fallback
-from ansible_collections.community.general.plugins.module_utils.proxmox import (proxmox_auth_argument_spec, ProxmoxAnsible)
-
-
-class ProxmoxNicAnsible(ProxmoxAnsible):
- def update_nic(self, vmid, interface, model, **kwargs):
- vm = self.get_vm(vmid)
-
- try:
- vminfo = self.proxmox_api.nodes(vm['node']).qemu(vmid).config.get()
- except Exception as e:
- self.module.fail_json(msg='Getting information for VM with vmid = %s failed with exception: %s' % (vmid, e))
-
- if interface in vminfo:
- # Convert the current config to a dictionary
- config = vminfo[interface].split(',')
- config.sort()
-
- config_current = {}
-
- for i in config:
- kv = i.split('=')
- try:
- config_current[kv[0]] = kv[1]
- except IndexError:
- config_current[kv[0]] = ''
-
- # determine the current model nic and mac-address
- models = ['e1000', 'e1000-82540em', 'e1000-82544gc', 'e1000-82545em', 'i82551', 'i82557b',
- 'i82559er', 'ne2k_isa', 'ne2k_pci', 'pcnet', 'rtl8139', 'virtio', 'vmxnet3']
- current_model = set(models) & set(config_current.keys())
- current_model = current_model.pop()
- current_mac = config_current[current_model]
-
- # build nic config string
- config_provided = "{0}={1}".format(model, current_mac)
- else:
- config_provided = model
-
- if kwargs['mac']:
- config_provided = "{0}={1}".format(model, kwargs['mac'])
-
- if kwargs['bridge']:
- config_provided += ",bridge={0}".format(kwargs['bridge'])
-
- if kwargs['firewall']:
- config_provided += ",firewall=1"
-
- if kwargs['link_down']:
- config_provided += ',link_down=1'
-
- if kwargs['mtu']:
- config_provided += ",mtu={0}".format(kwargs['mtu'])
- if model != 'virtio':
- self.module.warn(
- 'Ignoring MTU for nic {0} on VM with vmid {1}, '
- 'model should be set to \'virtio\': '.format(interface, vmid))
-
- if kwargs['queues']:
- config_provided += ",queues={0}".format(kwargs['queues'])
-
- if kwargs['rate']:
- config_provided += ",rate={0}".format(kwargs['rate'])
-
- if kwargs['tag']:
- config_provided += ",tag={0}".format(kwargs['tag'])
-
- if kwargs['trunks']:
- config_provided += ",trunks={0}".format(';'.join(str(x) for x in kwargs['trunks']))
-
- net = {interface: config_provided}
- vm = self.get_vm(vmid)
-
- if ((interface not in vminfo) or (vminfo[interface] != config_provided)):
- if not self.module.check_mode:
- self.proxmox_api.nodes(vm['node']).qemu(vmid).config.set(**net)
- return True
-
- return False
-
- def delete_nic(self, vmid, interface):
- vm = self.get_vm(vmid)
- vminfo = self.proxmox_api.nodes(vm['node']).qemu(vmid).config.get()
-
- if interface in vminfo:
- if not self.module.check_mode:
- self.proxmox_api.nodes(vm['node']).qemu(vmid).config.set(vmid=vmid, delete=interface)
- return True
-
- return False
-
-
-def main():
- module_args = proxmox_auth_argument_spec()
- nic_args = dict(
- bridge=dict(type='str'),
- firewall=dict(type='bool', default=False),
- interface=dict(type='str', required=True),
- link_down=dict(type='bool', default=False),
- mac=dict(type='str'),
- model=dict(choices=['e1000', 'e1000-82540em', 'e1000-82544gc', 'e1000-82545em',
- 'i82551', 'i82557b', 'i82559er', 'ne2k_isa', 'ne2k_pci', 'pcnet',
- 'rtl8139', 'virtio', 'vmxnet3'], default='virtio'),
- mtu=dict(type='int'),
- name=dict(type='str'),
- queues=dict(type='int'),
- rate=dict(type='float'),
- state=dict(default='present', choices=['present', 'absent']),
- tag=dict(type='int'),
- trunks=dict(type='list', elements='int'),
- vmid=dict(type='int'),
- )
- module_args.update(nic_args)
-
- module = AnsibleModule(
- argument_spec=module_args,
- required_together=[('api_token_id', 'api_token_secret')],
- required_one_of=[('name', 'vmid'), ('api_password', 'api_token_id')],
- supports_check_mode=True,
- )
-
- proxmox = ProxmoxNicAnsible(module)
-
- interface = module.params['interface']
- model = module.params['model']
- name = module.params['name']
- state = module.params['state']
- vmid = module.params['vmid']
-
- # If vmid is not defined then retrieve its value from the vm name,
- if not vmid:
- vmid = proxmox.get_vmid(name)
-
- # Ensure VM id exists
- proxmox.get_vm(vmid)
-
- if state == 'present':
- try:
- if proxmox.update_nic(vmid, interface, model,
- bridge=module.params['bridge'],
- firewall=module.params['firewall'],
- link_down=module.params['link_down'],
- mac=module.params['mac'],
- mtu=module.params['mtu'],
- queues=module.params['queues'],
- rate=module.params['rate'],
- tag=module.params['tag'],
- trunks=module.params['trunks']):
- module.exit_json(changed=True, vmid=vmid, msg="Nic {0} updated on VM with vmid {1}".format(interface, vmid))
- else:
- module.exit_json(vmid=vmid, msg="Nic {0} unchanged on VM with vmid {1}".format(interface, vmid))
- except Exception as e:
- module.fail_json(vmid=vmid, msg='Unable to change nic {0} on VM with vmid {1}: '.format(interface, vmid) + str(e))
-
- elif state == 'absent':
- try:
- if proxmox.delete_nic(vmid, interface):
- module.exit_json(changed=True, vmid=vmid, msg="Nic {0} deleted on VM with vmid {1}".format(interface, vmid))
- else:
- module.exit_json(vmid=vmid, msg="Nic {0} does not exist on VM with vmid {1}".format(interface, vmid))
- except Exception as e:
- module.fail_json(vmid=vmid, msg='Unable to delete nic {0} on VM with vmid {1}: '.format(interface, vmid) + str(e))
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_snap.py b/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_snap.py
deleted file mode 100644
index cf570bd1..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_snap.py
+++ /dev/null
@@ -1,217 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright: (c) 2020, Jeffrey van Pelt (@Thulium-Drake)
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-DOCUMENTATION = r'''
----
-module: proxmox_snap
-short_description: Snapshot management of instances in Proxmox VE cluster
-version_added: 2.0.0
-description:
- - Allows you to create/delete snapshots from instances in Proxmox VE cluster.
- - Supports both KVM and LXC, OpenVZ has not been tested, as it is no longer supported on Proxmox VE.
-options:
- hostname:
- description:
- - The instance name.
- type: str
- vmid:
- description:
- - The instance id.
- - If not set, will be fetched from PromoxAPI based on the hostname.
- type: str
- state:
- description:
- - Indicate desired state of the instance snapshot.
- choices: ['present', 'absent']
- default: present
- type: str
- force:
- description:
- - For removal from config file, even if removing disk snapshot fails.
- default: no
- type: bool
- vmstate:
- description:
- - Snapshot includes RAM.
- default: no
- type: bool
- description:
- description:
- - Specify the description for the snapshot. Only used on the configuration web interface.
- - This is saved as a comment inside the configuration file.
- type: str
- timeout:
- description:
- - Timeout for operations.
- default: 30
- type: int
- snapname:
- description:
- - Name of the snapshot that has to be created.
- default: 'ansible_snap'
- type: str
-
-notes:
- - Requires proxmoxer and requests modules on host. These modules can be installed with pip.
- - Supports C(check_mode).
-requirements: [ "proxmoxer", "python >= 2.7", "requests" ]
-author: Jeffrey van Pelt (@Thulium-Drake)
-extends_documentation_fragment:
- - community.general.proxmox.documentation
-'''
-
-EXAMPLES = r'''
-- name: Create new container snapshot
- community.general.proxmox_snap:
- api_user: root@pam
- api_password: 1q2w3e
- api_host: node1
- vmid: 100
- state: present
- snapname: pre-updates
-
-- name: Remove container snapshot
- community.general.proxmox_snap:
- api_user: root@pam
- api_password: 1q2w3e
- api_host: node1
- vmid: 100
- state: absent
- snapname: pre-updates
-'''
-
-RETURN = r'''#'''
-
-import time
-import traceback
-
-from ansible.module_utils.basic import AnsibleModule, missing_required_lib, env_fallback
-from ansible.module_utils.common.text.converters import to_native
-from ansible_collections.community.general.plugins.module_utils.proxmox import (proxmox_auth_argument_spec, ProxmoxAnsible, HAS_PROXMOXER, PROXMOXER_IMP_ERR)
-
-
-class ProxmoxSnapAnsible(ProxmoxAnsible):
- def snapshot(self, vm, vmid):
- return getattr(self.proxmox_api.nodes(vm['node']), vm['type'])(vmid).snapshot
-
- def snapshot_create(self, vm, vmid, timeout, snapname, description, vmstate):
- if self.module.check_mode:
- return True
-
- if vm['type'] == 'lxc':
- taskid = self.snapshot(vm, vmid).post(snapname=snapname, description=description)
- else:
- taskid = self.snapshot(vm, vmid).post(snapname=snapname, description=description, vmstate=int(vmstate))
- while timeout:
- if (self.proxmox_api.nodes(vm['node']).tasks(taskid).status.get()['status'] == 'stopped' and
- self.proxmox_api.nodes(vm['node']).tasks(taskid).status.get()['exitstatus'] == 'OK'):
- return True
- timeout -= 1
- if timeout == 0:
- self.module.fail_json(msg='Reached timeout while waiting for creating VM snapshot. Last line in task before timeout: %s' %
- self.proxmox_api.nodes(vm['node']).tasks(taskid).log.get()[:1])
-
- time.sleep(1)
- return False
-
- def snapshot_remove(self, vm, vmid, timeout, snapname, force):
- if self.module.check_mode:
- return True
-
- taskid = self.snapshot(vm, vmid).delete(snapname, force=int(force))
- while timeout:
- if (self.proxmox_api.nodes(vm['node']).tasks(taskid).status.get()['status'] == 'stopped' and
- self.proxmox_api.nodes(vm['node']).tasks(taskid).status.get()['exitstatus'] == 'OK'):
- return True
- timeout -= 1
- if timeout == 0:
- self.module.fail_json(msg='Reached timeout while waiting for removing VM snapshot. Last line in task before timeout: %s' %
- self.proxmox_api.nodes(vm['node']).tasks(taskid).log.get()[:1])
-
- time.sleep(1)
- return False
-
-
-def main():
- module_args = proxmox_auth_argument_spec()
- snap_args = dict(
- vmid=dict(required=False),
- hostname=dict(),
- timeout=dict(type='int', default=30),
- state=dict(default='present', choices=['present', 'absent']),
- description=dict(type='str'),
- snapname=dict(type='str', default='ansible_snap'),
- force=dict(type='bool', default='no'),
- vmstate=dict(type='bool', default='no'),
- )
- module_args.update(snap_args)
-
- module = AnsibleModule(
- argument_spec=module_args,
- supports_check_mode=True
- )
-
- proxmox = ProxmoxSnapAnsible(module)
-
- state = module.params['state']
- vmid = module.params['vmid']
- hostname = module.params['hostname']
- description = module.params['description']
- snapname = module.params['snapname']
- timeout = module.params['timeout']
- force = module.params['force']
- vmstate = module.params['vmstate']
-
- # If hostname is set get the VM id from ProxmoxAPI
- if not vmid and hostname:
- vmid = proxmox.get_vmid(hostname, choose_first_if_multiple=True)
- elif not vmid:
- module.exit_json(changed=False, msg="Vmid could not be fetched for the following action: %s" % state)
-
- vm = proxmox.get_vm(vmid)
-
- if state == 'present':
- try:
- for i in proxmox.snapshot(vm, vmid).get():
- if i['name'] == snapname:
- module.exit_json(changed=False, msg="Snapshot %s is already present" % snapname)
-
- if proxmox.snapshot_create(vm, vmid, timeout, snapname, description, vmstate):
- if module.check_mode:
- module.exit_json(changed=False, msg="Snapshot %s would be created" % snapname)
- else:
- module.exit_json(changed=True, msg="Snapshot %s created" % snapname)
-
- except Exception as e:
- module.fail_json(msg="Creating snapshot %s of VM %s failed with exception: %s" % (snapname, vmid, to_native(e)))
-
- elif state == 'absent':
- try:
- snap_exist = False
-
- for i in proxmox.snapshot(vm, vmid).get():
- if i['name'] == snapname:
- snap_exist = True
- continue
-
- if not snap_exist:
- module.exit_json(changed=False, msg="Snapshot %s does not exist" % snapname)
- else:
- if proxmox.snapshot_remove(vm, vmid, timeout, snapname, force):
- if module.check_mode:
- module.exit_json(changed=False, msg="Snapshot %s would be removed" % snapname)
- else:
- module.exit_json(changed=True, msg="Snapshot %s removed" % snapname)
-
- except Exception as e:
- module.fail_json(msg="Removing snapshot %s of VM %s failed with exception: %s" % (snapname, vmid, to_native(e)))
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_storage_info.py b/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_storage_info.py
deleted file mode 100644
index 265b6fba..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_storage_info.py
+++ /dev/null
@@ -1,187 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright: Tristan Le Guern (@tleguern)
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: proxmox_storage_info
-short_description: Retrieve information about one or more Proxmox VE storages
-version_added: 2.2.0
-description:
- - Retrieve information about one or more Proxmox VE storages.
-options:
- storage:
- description:
- - Only return informations on a specific storage.
- aliases: ['name']
- type: str
- type:
- description:
- - Filter on a specifc storage type.
- type: str
-author: Tristan Le Guern (@tleguern)
-extends_documentation_fragment: community.general.proxmox.documentation
-notes:
- - Storage specific options can be returned by this module, please look at the documentation at U(https://pve.proxmox.com/wiki/Storage).
-'''
-
-
-EXAMPLES = '''
-- name: List existing storages
- community.general.proxmox_storage_info:
- api_host: helldorado
- api_user: root@pam
- api_password: "{{ password | default(omit) }}"
- api_token_id: "{{ token_id | default(omit) }}"
- api_token_secret: "{{ token_secret | default(omit) }}"
- register: proxmox_storages
-
-- name: List NFS storages only
- community.general.proxmox_storage_info:
- api_host: helldorado
- api_user: root@pam
- api_password: "{{ password | default(omit) }}"
- api_token_id: "{{ token_id | default(omit) }}"
- api_token_secret: "{{ token_secret | default(omit) }}"
- type: nfs
- register: proxmox_storages_nfs
-
-- name: Retrieve information about the lvm2 storage
- community.general.proxmox_storage_info:
- api_host: helldorado
- api_user: root@pam
- api_password: "{{ password | default(omit) }}"
- api_token_id: "{{ token_id | default(omit) }}"
- api_token_secret: "{{ token_secret | default(omit) }}"
- storage: lvm2
- register: proxmox_storage_lvm
-'''
-
-
-RETURN = '''
-proxmox_storages:
- description: List of storage pools.
- returned: on success
- type: list
- elements: dict
- contains:
- content:
- description: Proxmox content types available in this storage
- returned: on success
- type: list
- elements: str
- digest:
- description: Storage's digest
- returned: on success
- type: str
- nodes:
- description: List of nodes associated to this storage
- returned: on success, if storage is not local
- type: list
- elements: str
- path:
- description: Physical path to this storage
- returned: on success
- type: str
- prune-backups:
- description: Backup retention options
- returned: on success
- type: list
- elements: dict
- shared:
- description: Is this storage shared
- returned: on success
- type: bool
- storage:
- description: Storage name
- returned: on success
- type: str
- type:
- description: Storage type
- returned: on success
- type: str
-'''
-
-
-from ansible.module_utils.basic import AnsibleModule, missing_required_lib
-from ansible_collections.community.general.plugins.module_utils.proxmox import (
- proxmox_auth_argument_spec, ProxmoxAnsible, proxmox_to_ansible_bool)
-
-
-class ProxmoxStorageInfoAnsible(ProxmoxAnsible):
- def get_storage(self, storage):
- try:
- storage = self.proxmox_api.storage.get(storage)
- except Exception:
- self.module.fail_json(msg="Storage '%s' does not exist" % storage)
- return ProxmoxStorage(storage)
-
- def get_storages(self, type=None):
- storages = self.proxmox_api.storage.get(type=type)
- storages = [ProxmoxStorage(storage) for storage in storages]
- return storages
-
-
-class ProxmoxStorage:
- def __init__(self, storage):
- self.storage = storage
- # Convert proxmox representation of lists, dicts and boolean for easier
- # manipulation within ansible.
- if 'shared' in self.storage:
- self.storage['shared'] = proxmox_to_ansible_bool(self.storage['shared'])
- if 'content' in self.storage:
- self.storage['content'] = self.storage['content'].split(',')
- if 'nodes' in self.storage:
- self.storage['nodes'] = self.storage['nodes'].split(',')
- if 'prune-backups' in storage:
- options = storage['prune-backups'].split(',')
- self.storage['prune-backups'] = dict()
- for option in options:
- k, v = option.split('=')
- self.storage['prune-backups'][k] = v
-
-
-def proxmox_storage_info_argument_spec():
- return dict(
- storage=dict(type='str', aliases=['name']),
- type=dict(type='str'),
- )
-
-
-def main():
- module_args = proxmox_auth_argument_spec()
- storage_info_args = proxmox_storage_info_argument_spec()
- module_args.update(storage_info_args)
-
- module = AnsibleModule(
- argument_spec=module_args,
- required_one_of=[('api_password', 'api_token_id')],
- required_together=[('api_token_id', 'api_token_secret')],
- mutually_exclusive=[('storage', 'type')],
- supports_check_mode=True
- )
- result = dict(
- changed=False
- )
-
- proxmox = ProxmoxStorageInfoAnsible(module)
- storage = module.params['storage']
- storagetype = module.params['type']
-
- if storage:
- storages = [proxmox.get_storage(storage)]
- else:
- storages = proxmox.get_storages(type=storagetype)
- result['proxmox_storages'] = [storage.storage for storage in storages]
-
- module.exit_json(**result)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_tasks_info.py b/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_tasks_info.py
deleted file mode 100644
index ff3bf686..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_tasks_info.py
+++ /dev/null
@@ -1,183 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2021, Andreas Botzner (@paginabianca)
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = r'''
----
-module: proxmox_tasks_info
-short_description: Retrieve information about one or more Proxmox VE tasks
-version_added: 3.8.0
-description:
- - Retrieve information about one or more Proxmox VE tasks.
-author: 'Andreas Botzner (@paginabianca) '
-options:
- node:
- description:
- - Node where to get tasks.
- required: true
- type: str
- task:
- description:
- - Return specific task.
- aliases: ['upid', 'name']
- type: str
-extends_documentation_fragment:
- - community.general.proxmox.documentation
-'''
-
-
-EXAMPLES = '''
-- name: List tasks on node01
- community.general.proxmox_task_info:
- api_host: proxmoxhost
- api_user: root@pam
- api_password: '{{ password | default(omit) }}'
- api_token_id: '{{ token_id | default(omit) }}'
- api_token_secret: '{{ token_secret | default(omit) }}'
- node: node01
- register: result
-
-- name: Retrieve information about specific tasks on node01
- community.general.proxmox_task_info:
- api_host: proxmoxhost
- api_user: root@pam
- api_password: '{{ password | default(omit) }}'
- api_token_id: '{{ token_id | default(omit) }}'
- api_token_secret: '{{ token_secret | default(omit) }}'
- task: 'UPID:node01:00003263:16167ACE:621EE230:srvreload:networking:root@pam:'
- node: node01
- register: proxmox_tasks
-'''
-
-
-RETURN = '''
-proxmox_tasks:
- description: List of tasks.
- returned: on success
- type: list
- elements: dict
- contains:
- id:
- description: ID of the task.
- returned: on success
- type: str
- node:
- description: Node name.
- returned: on success
- type: str
- pid:
- description: PID of the task.
- returned: on success
- type: int
- pstart:
- description: pastart of the task.
- returned: on success
- type: int
- starttime:
- description: Starting time of the task.
- returned: on success
- type: int
- type:
- description: Type of the task.
- returned: on success
- type: str
- upid:
- description: UPID of the task.
- returned: on success
- type: str
- user:
- description: User that owns the task.
- returned: on success
- type: str
- endtime:
- description: Endtime of the task.
- returned: on success, can be absent
- type: int
- status:
- description: Status of the task.
- returned: on success, can be absent
- type: str
- failed:
- description: If the task failed.
- returned: when status is defined
- type: bool
-msg:
- description: Short message.
- returned: on failure
- type: str
- sample: 'Task: UPID:xyz:xyz does not exist on node: proxmoxnode'
-'''
-
-from ansible.module_utils.basic import AnsibleModule, missing_required_lib
-from ansible_collections.community.general.plugins.module_utils.proxmox import (
- proxmox_auth_argument_spec, ProxmoxAnsible)
-
-
-class ProxmoxTaskInfoAnsible(ProxmoxAnsible):
- def get_task(self, upid, node):
- tasks = self.get_tasks(node)
- for task in tasks:
- if task.info['upid'] == upid:
- return [task]
-
- def get_tasks(self, node):
- tasks = self.proxmox_api.nodes(node).tasks.get()
- return [ProxmoxTask(task) for task in tasks]
-
-
-class ProxmoxTask:
- def __init__(self, task):
- self.info = dict()
- for k, v in task.items():
- if k == 'status' and isinstance(v, str):
- self.info[k] = v
- if v != 'OK':
- self.info['failed'] = True
- else:
- self.info[k] = v
-
-
-def proxmox_task_info_argument_spec():
- return dict(
- task=dict(type='str', aliases=['upid', 'name'], required=False),
- node=dict(type='str', required=True),
- )
-
-
-def main():
- module_args = proxmox_auth_argument_spec()
- task_info_args = proxmox_task_info_argument_spec()
- module_args.update(task_info_args)
-
- module = AnsibleModule(
- argument_spec=module_args,
- required_together=[('api_token_id', 'api_token_secret'),
- ('api_user', 'api_password')],
- required_one_of=[('api_password', 'api_token_id')],
- supports_check_mode=True)
- result = dict(changed=False)
-
- proxmox = ProxmoxTaskInfoAnsible(module)
- upid = module.params['task']
- node = module.params['node']
- if upid:
- tasks = proxmox.get_task(upid=upid, node=node)
- else:
- tasks = proxmox.get_tasks(node=node)
- if tasks is not None:
- result['proxmox_tasks'] = [task.info for task in tasks]
- module.exit_json(**result)
- else:
- result['msg'] = 'Task: {0} does not exist on node: {1}.'.format(
- upid, node)
- module.fail_json(**result)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_template.py b/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_template.py
deleted file mode 100644
index 32ff8e7e..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_template.py
+++ /dev/null
@@ -1,240 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: proxmox_template
-short_description: management of OS templates in Proxmox VE cluster
-description:
- - allows you to upload/delete templates in Proxmox VE cluster
-options:
- node:
- description:
- - Proxmox VE node on which to operate.
- type: str
- src:
- description:
- - path to uploaded file
- - required only for C(state=present)
- type: path
- template:
- description:
- - the template name
- - Required for state C(absent) to delete a template.
- - Required for state C(present) to download an appliance container template (pveam).
- type: str
- content_type:
- description:
- - content type
- - required only for C(state=present)
- type: str
- default: 'vztmpl'
- choices: ['vztmpl', 'iso']
- storage:
- description:
- - target storage
- type: str
- default: 'local'
- timeout:
- description:
- - timeout for operations
- type: int
- default: 30
- force:
- description:
- - can be used only with C(state=present), exists template will be overwritten
- type: bool
- default: 'no'
- state:
- description:
- - Indicate desired state of the template
- type: str
- choices: ['present', 'absent']
- default: present
-notes:
- - Requires proxmoxer and requests modules on host. This modules can be installed with pip.
-author: Sergei Antipov (@UnderGreen)
-extends_documentation_fragment: community.general.proxmox.documentation
-'''
-
-EXAMPLES = '''
-- name: Upload new openvz template with minimal options
- community.general.proxmox_template:
- node: uk-mc02
- api_user: root@pam
- api_password: 1q2w3e
- api_host: node1
- src: ~/ubuntu-14.04-x86_64.tar.gz
-
-- name: >
- Upload new openvz template with minimal options use environment
- PROXMOX_PASSWORD variable(you should export it before)
- community.general.proxmox_template:
- node: uk-mc02
- api_user: root@pam
- api_host: node1
- src: ~/ubuntu-14.04-x86_64.tar.gz
-
-- name: Upload new openvz template with all options and force overwrite
- community.general.proxmox_template:
- node: uk-mc02
- api_user: root@pam
- api_password: 1q2w3e
- api_host: node1
- storage: local
- content_type: vztmpl
- src: ~/ubuntu-14.04-x86_64.tar.gz
- force: yes
-
-- name: Delete template with minimal options
- community.general.proxmox_template:
- node: uk-mc02
- api_user: root@pam
- api_password: 1q2w3e
- api_host: node1
- template: ubuntu-14.04-x86_64.tar.gz
- state: absent
-
-- name: Download proxmox appliance container template
- community.general.proxmox_template:
- node: uk-mc02
- api_user: root@pam
- api_password: 1q2w3e
- api_host: node1
- storage: local
- content_type: vztmpl
- template: ubuntu-20.04-standard_20.04-1_amd64.tar.gz
-'''
-
-import os
-import time
-
-from ansible.module_utils.basic import AnsibleModule, env_fallback
-from ansible_collections.community.general.plugins.module_utils.proxmox import (proxmox_auth_argument_spec, ProxmoxAnsible)
-
-
-class ProxmoxTemplateAnsible(ProxmoxAnsible):
- def get_template(self, node, storage, content_type, template):
- return [True for tmpl in self.proxmox_api.nodes(node).storage(storage).content.get()
- if tmpl['volid'] == '%s:%s/%s' % (storage, content_type, template)]
-
- def task_status(self, node, taskid, timeout):
- """
- Check the task status and wait until the task is completed or the timeout is reached.
- """
- while timeout:
- task_status = self.proxmox_api.nodes(node).tasks(taskid).status.get()
- if task_status['status'] == 'stopped' and task_status['exitstatus'] == 'OK':
- return True
- timeout = timeout - 1
- if timeout == 0:
- self.module.fail_json(msg='Reached timeout while waiting for uploading/downloading template. Last line in task before timeout: %s' %
- self.proxmox_api.node(node).tasks(taskid).log.get()[:1])
-
- time.sleep(1)
- return False
-
- def upload_template(self, node, storage, content_type, realpath, timeout):
- taskid = self.proxmox_api.nodes(node).storage(storage).upload.post(content=content_type, filename=open(realpath, 'rb'))
- return self.task_status(node, taskid, timeout)
-
- def download_template(self, node, storage, template, timeout):
- taskid = self.proxmox_api.nodes(node).aplinfo.post(storage=storage, template=template)
- return self.task_status(node, taskid, timeout)
-
- def delete_template(self, node, storage, content_type, template, timeout):
- volid = '%s:%s/%s' % (storage, content_type, template)
- self.proxmox_api.nodes(node).storage(storage).content.delete(volid)
- while timeout:
- if not self.get_template(node, storage, content_type, template):
- return True
- timeout = timeout - 1
- if timeout == 0:
- self.module.fail_json(msg='Reached timeout while waiting for deleting template.')
-
- time.sleep(1)
- return False
-
-
-def main():
- module_args = proxmox_auth_argument_spec()
- template_args = dict(
- node=dict(),
- src=dict(type='path'),
- template=dict(),
- content_type=dict(default='vztmpl', choices=['vztmpl', 'iso']),
- storage=dict(default='local'),
- timeout=dict(type='int', default=30),
- force=dict(type='bool', default=False),
- state=dict(default='present', choices=['present', 'absent']),
- )
- module_args.update(template_args)
-
- module = AnsibleModule(
- argument_spec=module_args,
- required_together=[('api_token_id', 'api_token_secret')],
- required_one_of=[('api_password', 'api_token_id')],
- required_if=[('state', 'absent', ['template'])]
- )
-
- proxmox = ProxmoxTemplateAnsible(module)
-
- state = module.params['state']
- node = module.params['node']
- storage = module.params['storage']
- timeout = module.params['timeout']
-
- if state == 'present':
- try:
- content_type = module.params['content_type']
- src = module.params['src']
-
- # download appliance template
- if content_type == 'vztmpl' and not src:
- template = module.params['template']
-
- if not template:
- module.fail_json(msg='template param for downloading appliance template is mandatory')
-
- if proxmox.get_template(node, storage, content_type, template) and not module.params['force']:
- module.exit_json(changed=False, msg='template with volid=%s:%s/%s already exists' % (storage, content_type, template))
-
- if proxmox.download_template(node, storage, template, timeout):
- module.exit_json(changed=True, msg='template with volid=%s:%s/%s downloaded' % (storage, content_type, template))
-
- template = os.path.basename(src)
- if proxmox.get_template(node, storage, content_type, template) and not module.params['force']:
- module.exit_json(changed=False, msg='template with volid=%s:%s/%s is already exists' % (storage, content_type, template))
- elif not src:
- module.fail_json(msg='src param to uploading template file is mandatory')
- elif not (os.path.exists(src) and os.path.isfile(src)):
- module.fail_json(msg='template file on path %s not exists' % src)
-
- if proxmox.upload_template(node, storage, content_type, src, timeout):
- module.exit_json(changed=True, msg='template with volid=%s:%s/%s uploaded' % (storage, content_type, template))
- except Exception as e:
- module.fail_json(msg="uploading/downloading of template %s failed with exception: %s" % (template, e))
-
- elif state == 'absent':
- try:
- content_type = module.params['content_type']
- template = module.params['template']
-
- if not proxmox.get_template(node, storage, content_type, template):
- module.exit_json(changed=False, msg='template with volid=%s:%s/%s is already deleted' % (storage, content_type, template))
-
- if proxmox.delete_template(node, storage, content_type, template, timeout):
- module.exit_json(changed=True, msg='template with volid=%s:%s/%s deleted' % (storage, content_type, template))
- except Exception as e:
- module.fail_json(msg="deleting of template %s failed with exception: %s" % (template, e))
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_user_info.py b/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_user_info.py
deleted file mode 100644
index d0ee365b..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/misc/proxmox_user_info.py
+++ /dev/null
@@ -1,253 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright: Tristan Le Guern
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: proxmox_user_info
-short_description: Retrieve information about one or more Proxmox VE users
-version_added: 1.3.0
-description:
- - Retrieve information about one or more Proxmox VE users
-options:
- domain:
- description:
- - Restrict results to a specific authentication realm.
- aliases: ['realm']
- type: str
- user:
- description:
- - Restrict results to a specific user.
- aliases: ['name']
- type: str
- userid:
- description:
- - Restrict results to a specific user ID, which is a concatenation of a user and domain parts.
- type: str
-author: Tristan Le Guern (@tleguern)
-extends_documentation_fragment: community.general.proxmox.documentation
-'''
-
-EXAMPLES = '''
-- name: List existing users
- community.general.proxmox_user_info:
- api_host: helldorado
- api_user: root@pam
- api_password: "{{ password | default(omit) }}"
- api_token_id: "{{ token_id | default(omit) }}"
- api_token_secret: "{{ token_secret | default(omit) }}"
- register: proxmox_users
-
-- name: List existing users in the pve authentication realm
- community.general.proxmox_user_info:
- api_host: helldorado
- api_user: root@pam
- api_password: "{{ password | default(omit) }}"
- api_token_id: "{{ token_id | default(omit) }}"
- api_token_secret: "{{ token_secret | default(omit) }}"
- domain: pve
- register: proxmox_users_pve
-
-- name: Retrieve information about admin@pve
- community.general.proxmox_user_info:
- api_host: helldorado
- api_user: root@pam
- api_password: "{{ password | default(omit) }}"
- api_token_id: "{{ token_id | default(omit) }}"
- api_token_secret: "{{ token_secret | default(omit) }}"
- userid: admin@pve
- register: proxmox_user_admin
-
-- name: Alternative way to retrieve information about admin@pve
- community.general.proxmox_user_info:
- api_host: helldorado
- api_user: root@pam
- api_password: "{{ password | default(omit) }}"
- api_token_id: "{{ token_id | default(omit) }}"
- api_token_secret: "{{ token_secret | default(omit) }}"
- user: admin
- domain: pve
- register: proxmox_user_admin
-'''
-
-
-RETURN = '''
-proxmox_users:
- description: List of users.
- returned: always, but can be empty
- type: list
- elements: dict
- contains:
- comment:
- description: Short description of the user.
- returned: on success
- type: str
- domain:
- description: User's authentication realm, also the right part of the user ID.
- returned: on success
- type: str
- email:
- description: User's email address.
- returned: on success
- type: str
- enabled:
- description: User's account state.
- returned: on success
- type: bool
- expire:
- description: Expiration date in seconds since EPOCH. Zero means no expiration.
- returned: on success
- type: int
- firstname:
- description: User's first name.
- returned: on success
- type: str
- groups:
- description: List of groups which the user is a member of.
- returned: on success
- type: list
- elements: str
- keys:
- description: User's two factor authentication keys.
- returned: on success
- type: str
- lastname:
- description: User's last name.
- returned: on success
- type: str
- tokens:
- description: List of API tokens associated to the user.
- returned: on success
- type: list
- elements: dict
- contains:
- comment:
- description: Short description of the token.
- returned: on success
- type: str
- expire:
- description: Expiration date in seconds since EPOCH. Zero means no expiration.
- returned: on success
- type: int
- privsep:
- description: Describe if the API token is further restricted with ACLs or is fully privileged.
- returned: on success
- type: bool
- tokenid:
- description: Token name.
- returned: on success
- type: str
- user:
- description: User's login name, also the left part of the user ID.
- returned: on success
- type: str
- userid:
- description: Proxmox user ID, represented as user@realm.
- returned: on success
- type: str
-'''
-
-
-from ansible.module_utils.basic import AnsibleModule, missing_required_lib
-from ansible_collections.community.general.plugins.module_utils.proxmox import (
- proxmox_auth_argument_spec, ProxmoxAnsible, proxmox_to_ansible_bool)
-
-
-class ProxmoxUserInfoAnsible(ProxmoxAnsible):
- def get_user(self, userid):
- try:
- user = self.proxmox_api.access.users.get(userid)
- except Exception:
- self.module.fail_json(msg="User '%s' does not exist" % userid)
- user['userid'] = userid
- return ProxmoxUser(user)
-
- def get_users(self, domain=None):
- users = self.proxmox_api.access.users.get(full=1)
- users = [ProxmoxUser(user) for user in users]
- if domain:
- return [user for user in users if user.user['domain'] == domain]
- return users
-
-
-class ProxmoxUser:
- def __init__(self, user):
- self.user = dict()
- # Data representation is not the same depending on API calls
- for k, v in user.items():
- if k == 'enable':
- self.user['enabled'] = proxmox_to_ansible_bool(user['enable'])
- elif k == 'userid':
- self.user['user'] = user['userid'].split('@')[0]
- self.user['domain'] = user['userid'].split('@')[1]
- self.user[k] = v
- elif k in ['groups', 'tokens'] and (v == '' or v is None):
- self.user[k] = []
- elif k == 'groups' and type(v) == str:
- self.user['groups'] = v.split(',')
- elif k == 'tokens' and type(v) == list:
- for token in v:
- if 'privsep' in token:
- token['privsep'] = proxmox_to_ansible_bool(token['privsep'])
- self.user['tokens'] = v
- elif k == 'tokens' and type(v) == dict:
- self.user['tokens'] = list()
- for tokenid, tokenvalues in v.items():
- t = tokenvalues
- t['tokenid'] = tokenid
- if 'privsep' in tokenvalues:
- t['privsep'] = proxmox_to_ansible_bool(tokenvalues['privsep'])
- self.user['tokens'].append(t)
- else:
- self.user[k] = v
-
-
-def proxmox_user_info_argument_spec():
- return dict(
- domain=dict(type='str', aliases=['realm']),
- user=dict(type='str', aliases=['name']),
- userid=dict(type='str'),
- )
-
-
-def main():
- module_args = proxmox_auth_argument_spec()
- user_info_args = proxmox_user_info_argument_spec()
- module_args.update(user_info_args)
-
- module = AnsibleModule(
- argument_spec=module_args,
- required_one_of=[('api_password', 'api_token_id')],
- required_together=[('api_token_id', 'api_token_secret')],
- mutually_exclusive=[('user', 'userid'), ('domain', 'userid')],
- supports_check_mode=True
- )
- result = dict(
- changed=False
- )
-
- proxmox = ProxmoxUserInfoAnsible(module)
- domain = module.params['domain']
- user = module.params['user']
- if user and domain:
- userid = user + '@' + domain
- else:
- userid = module.params['userid']
-
- if userid:
- users = [proxmox.get_user(userid=userid)]
- else:
- users = proxmox.get_users(domain=domain)
- result['proxmox_users'] = [user.user for user in users]
-
- module.exit_json(**result)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/misc/rhevm.py b/ansible_collections/community/general/plugins/modules/cloud/misc/rhevm.py
deleted file mode 100644
index 77b40248..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/misc/rhevm.py
+++ /dev/null
@@ -1,1498 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2016, Timothy Vandenbrande
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-DOCUMENTATION = r'''
----
-module: rhevm
-short_description: RHEV/oVirt automation
-description:
- - This module only supports oVirt/RHEV version 3.
- - A newer module M(ovirt.ovirt.ovirt_vm) supports oVirt/RHV version 4.
- - Allows you to create/remove/update or powermanage virtual machines on a RHEV/oVirt platform.
-requirements:
- - ovirtsdk
-author:
-- Timothy Vandenbrande (@TimothyVandenbrande)
-options:
- user:
- description:
- - The user to authenticate with.
- type: str
- default: admin@internal
- password:
- description:
- - The password for user authentication.
- type: str
- required: true
- server:
- description:
- - The name/IP of your RHEV-m/oVirt instance.
- type: str
- default: 127.0.0.1
- port:
- description:
- - The port on which the API is reachable.
- type: int
- default: 443
- insecure_api:
- description:
- - A boolean switch to make a secure or insecure connection to the server.
- type: bool
- default: no
- name:
- description:
- - The name of the VM.
- type: str
- cluster:
- description:
- - The RHEV/oVirt cluster in which you want you VM to start.
- type: str
- datacenter:
- description:
- - The RHEV/oVirt datacenter in which you want you VM to start.
- type: str
- default: Default
- state:
- description:
- - This serves to create/remove/update or powermanage your VM.
- type: str
- choices: [ absent, cd, down, info, ping, present, restarted, up ]
- default: present
- image:
- description:
- - The template to use for the VM.
- type: str
- type:
- description:
- - To define if the VM is a server or desktop.
- type: str
- choices: [ desktop, host, server ]
- default: server
- vmhost:
- description:
- - The host you wish your VM to run on.
- type: str
- vmcpu:
- description:
- - The number of CPUs you want in your VM.
- type: int
- default: 2
- cpu_share:
- description:
- - This parameter is used to configure the CPU share.
- type: int
- default: 0
- vmmem:
- description:
- - The amount of memory you want your VM to use (in GB).
- type: int
- default: 1
- osver:
- description:
- - The operating system option in RHEV/oVirt.
- type: str
- default: rhel_6x64
- mempol:
- description:
- - The minimum amount of memory you wish to reserve for this system.
- type: int
- default: 1
- vm_ha:
- description:
- - To make your VM High Available.
- type: bool
- default: yes
- disks:
- description:
- - This option uses complex arguments and is a list of disks with the options name, size and domain.
- type: list
- elements: str
- ifaces:
- description:
- - This option uses complex arguments and is a list of interfaces with the options name and vlan.
- type: list
- elements: str
- aliases: [ interfaces, nics ]
- boot_order:
- description:
- - This option uses complex arguments and is a list of items that specify the bootorder.
- type: list
- elements: str
- default: [ hd, network ]
- del_prot:
- description:
- - This option sets the delete protection checkbox.
- type: bool
- default: yes
- cd_drive:
- description:
- - The CD you wish to have mounted on the VM when I(state = 'CD').
- type: str
- timeout:
- description:
- - The timeout you wish to define for power actions.
- - When I(state = 'up').
- - When I(state = 'down').
- - When I(state = 'restarted').
- type: int
-'''
-
-RETURN = r'''
-vm:
- description: Returns all of the VMs variables and execution.
- returned: always
- type: dict
- sample: '{
- "boot_order": [
- "hd",
- "network"
- ],
- "changed": true,
- "changes": [
- "Delete Protection"
- ],
- "cluster": "C1",
- "cpu_share": "0",
- "created": false,
- "datacenter": "Default",
- "del_prot": true,
- "disks": [
- {
- "domain": "ssd-san",
- "name": "OS",
- "size": 40
- }
- ],
- "eth0": "00:00:5E:00:53:00",
- "eth1": "00:00:5E:00:53:01",
- "eth2": "00:00:5E:00:53:02",
- "exists": true,
- "failed": false,
- "ifaces": [
- {
- "name": "eth0",
- "vlan": "Management"
- },
- {
- "name": "eth1",
- "vlan": "Internal"
- },
- {
- "name": "eth2",
- "vlan": "External"
- }
- ],
- "image": false,
- "mempol": "0",
- "msg": [
- "VM exists",
- "cpu_share was already set to 0",
- "VM high availability was already set to True",
- "The boot order has already been set",
- "VM delete protection has been set to True",
- "Disk web2_Disk0_OS already exists",
- "The VM starting host was already set to host416"
- ],
- "name": "web2",
- "type": "server",
- "uuid": "4ba5a1be-e60b-4368-9533-920f156c817b",
- "vm_ha": true,
- "vmcpu": "4",
- "vmhost": "host416",
- "vmmem": "16"
- }'
-'''
-
-EXAMPLES = r'''
-- name: Basic get info from VM
- community.general.rhevm:
- server: rhevm01
- user: '{{ rhev.admin.name }}'
- password: '{{ rhev.admin.pass }}'
- name: demo
- state: info
-
-- name: Basic create example from image
- community.general.rhevm:
- server: rhevm01
- user: '{{ rhev.admin.name }}'
- password: '{{ rhev.admin.pass }}'
- name: demo
- cluster: centos
- image: centos7_x64
- state: present
-
-- name: Power management
- community.general.rhevm:
- server: rhevm01
- user: '{{ rhev.admin.name }}'
- password: '{{ rhev.admin.pass }}'
- cluster: RH
- name: uptime_server
- image: centos7_x64
- state: down
-
-- name: Multi disk, multi nic create example
- community.general.rhevm:
- server: rhevm01
- user: '{{ rhev.admin.name }}'
- password: '{{ rhev.admin.pass }}'
- cluster: RH
- name: server007
- type: server
- vmcpu: 4
- vmmem: 2
- ifaces:
- - name: eth0
- vlan: vlan2202
- - name: eth1
- vlan: vlan36
- - name: eth2
- vlan: vlan38
- - name: eth3
- vlan: vlan2202
- disks:
- - name: root
- size: 10
- domain: ssd-san
- - name: swap
- size: 10
- domain: 15kiscsi-san
- - name: opt
- size: 10
- domain: 15kiscsi-san
- - name: var
- size: 10
- domain: 10kiscsi-san
- - name: home
- size: 10
- domain: sata-san
- boot_order:
- - network
- - hd
- state: present
-
-- name: Add a CD to the disk cd_drive
- community.general.rhevm:
- user: '{{ rhev.admin.name }}'
- password: '{{ rhev.admin.pass }}'
- name: server007
- cd_drive: rhev-tools-setup.iso
- state: cd
-
-- name: New host deployment + host network configuration
- community.general.rhevm:
- password: '{{ rhevm.admin.pass }}'
- name: ovirt_node007
- type: host
- cluster: rhevm01
- ifaces:
- - name: em1
- - name: em2
- - name: p3p1
- ip: 172.31.224.200
- netmask: 255.255.254.0
- - name: p3p2
- ip: 172.31.225.200
- netmask: 255.255.254.0
- - name: bond0
- bond:
- - em1
- - em2
- network: rhevm
- ip: 172.31.222.200
- netmask: 255.255.255.0
- management: yes
- - name: bond0.36
- network: vlan36
- ip: 10.2.36.200
- netmask: 255.255.254.0
- gateway: 10.2.36.254
- - name: bond0.2202
- network: vlan2202
- - name: bond0.38
- network: vlan38
- state: present
-'''
-
-import time
-
-try:
- from ovirtsdk.api import API
- from ovirtsdk.xml import params
- HAS_SDK = True
-except ImportError:
- HAS_SDK = False
-
-from ansible.module_utils.basic import AnsibleModule
-
-
-RHEV_FAILED = 1
-RHEV_SUCCESS = 0
-RHEV_UNAVAILABLE = 2
-
-RHEV_TYPE_OPTS = ['desktop', 'host', 'server']
-STATE_OPTS = ['absent', 'cd', 'down', 'info', 'ping', 'present', 'restart', 'up']
-
-msg = []
-changed = False
-failed = False
-
-
-class RHEVConn(object):
- 'Connection to RHEV-M'
-
- def __init__(self, module):
- self.module = module
-
- user = module.params.get('user')
- password = module.params.get('password')
- server = module.params.get('server')
- port = module.params.get('port')
- insecure_api = module.params.get('insecure_api')
-
- url = "https://%s:%s" % (server, port)
-
- try:
- api = API(url=url, username=user, password=password, insecure=str(insecure_api))
- api.test()
- self.conn = api
- except Exception:
- raise Exception("Failed to connect to RHEV-M.")
-
- def __del__(self):
- self.conn.disconnect()
-
- def createVMimage(self, name, cluster, template):
- try:
- vmparams = params.VM(
- name=name,
- cluster=self.conn.clusters.get(name=cluster),
- template=self.conn.templates.get(name=template),
- disks=params.Disks(clone=True)
- )
- self.conn.vms.add(vmparams)
- setMsg("VM is created")
- setChanged()
- return True
- except Exception as e:
- setMsg("Failed to create VM")
- setMsg(str(e))
- setFailed()
- return False
-
- def createVM(self, name, cluster, os, actiontype):
- try:
- vmparams = params.VM(
- name=name,
- cluster=self.conn.clusters.get(name=cluster),
- os=params.OperatingSystem(type_=os),
- template=self.conn.templates.get(name="Blank"),
- type_=actiontype
- )
- self.conn.vms.add(vmparams)
- setMsg("VM is created")
- setChanged()
- return True
- except Exception as e:
- setMsg("Failed to create VM")
- setMsg(str(e))
- setFailed()
- return False
-
- def createDisk(self, vmname, diskname, disksize, diskdomain, diskinterface, diskformat, diskallocationtype, diskboot):
- VM = self.get_VM(vmname)
-
- newdisk = params.Disk(
- name=diskname,
- size=1024 * 1024 * 1024 * int(disksize),
- wipe_after_delete=True,
- sparse=diskallocationtype,
- interface=diskinterface,
- format=diskformat,
- bootable=diskboot,
- storage_domains=params.StorageDomains(
- storage_domain=[self.get_domain(diskdomain)]
- )
- )
-
- try:
- VM.disks.add(newdisk)
- VM.update()
- setMsg("Successfully added disk " + diskname)
- setChanged()
- except Exception as e:
- setFailed()
- setMsg("Error attaching " + diskname + "disk, please recheck and remove any leftover configuration.")
- setMsg(str(e))
- return False
-
- try:
- currentdisk = VM.disks.get(name=diskname)
- attempt = 1
- while currentdisk.status.state != 'ok':
- currentdisk = VM.disks.get(name=diskname)
- if attempt == 100:
- setMsg("Error, disk %s, state %s" % (diskname, str(currentdisk.status.state)))
- raise Exception()
- else:
- attempt += 1
- time.sleep(2)
- setMsg("The disk " + diskname + " is ready.")
- except Exception as e:
- setFailed()
- setMsg("Error getting the state of " + diskname + ".")
- setMsg(str(e))
- return False
- return True
-
- def createNIC(self, vmname, nicname, vlan, interface):
- VM = self.get_VM(vmname)
- CLUSTER = self.get_cluster_byid(VM.cluster.id)
- DC = self.get_DC_byid(CLUSTER.data_center.id)
- newnic = params.NIC(
- name=nicname,
- network=DC.networks.get(name=vlan),
- interface=interface
- )
-
- try:
- VM.nics.add(newnic)
- VM.update()
- setMsg("Successfully added iface " + nicname)
- setChanged()
- except Exception as e:
- setFailed()
- setMsg("Error attaching " + nicname + " iface, please recheck and remove any leftover configuration.")
- setMsg(str(e))
- return False
-
- try:
- currentnic = VM.nics.get(name=nicname)
- attempt = 1
- while currentnic.active is not True:
- currentnic = VM.nics.get(name=nicname)
- if attempt == 100:
- setMsg("Error, iface %s, state %s" % (nicname, str(currentnic.active)))
- raise Exception()
- else:
- attempt += 1
- time.sleep(2)
- setMsg("The iface " + nicname + " is ready.")
- except Exception as e:
- setFailed()
- setMsg("Error getting the state of " + nicname + ".")
- setMsg(str(e))
- return False
- return True
-
- def get_DC(self, dc_name):
- return self.conn.datacenters.get(name=dc_name)
-
- def get_DC_byid(self, dc_id):
- return self.conn.datacenters.get(id=dc_id)
-
- def get_VM(self, vm_name):
- return self.conn.vms.get(name=vm_name)
-
- def get_cluster_byid(self, cluster_id):
- return self.conn.clusters.get(id=cluster_id)
-
- def get_cluster(self, cluster_name):
- return self.conn.clusters.get(name=cluster_name)
-
- def get_domain_byid(self, dom_id):
- return self.conn.storagedomains.get(id=dom_id)
-
- def get_domain(self, domain_name):
- return self.conn.storagedomains.get(name=domain_name)
-
- def get_disk(self, disk):
- return self.conn.disks.get(disk)
-
- def get_network(self, dc_name, network_name):
- return self.get_DC(dc_name).networks.get(network_name)
-
- def get_network_byid(self, network_id):
- return self.conn.networks.get(id=network_id)
-
- def get_NIC(self, vm_name, nic_name):
- return self.get_VM(vm_name).nics.get(nic_name)
-
- def get_Host(self, host_name):
- return self.conn.hosts.get(name=host_name)
-
- def get_Host_byid(self, host_id):
- return self.conn.hosts.get(id=host_id)
-
- def set_Memory(self, name, memory):
- VM = self.get_VM(name)
- VM.memory = int(int(memory) * 1024 * 1024 * 1024)
- try:
- VM.update()
- setMsg("The Memory has been updated.")
- setChanged()
- return True
- except Exception as e:
- setMsg("Failed to update memory.")
- setMsg(str(e))
- setFailed()
- return False
-
- def set_Memory_Policy(self, name, memory_policy):
- VM = self.get_VM(name)
- VM.memory_policy.guaranteed = int(memory_policy) * 1024 * 1024 * 1024
- try:
- VM.update()
- setMsg("The memory policy has been updated.")
- setChanged()
- return True
- except Exception as e:
- setMsg("Failed to update memory policy.")
- setMsg(str(e))
- setFailed()
- return False
-
- def set_CPU(self, name, cpu):
- VM = self.get_VM(name)
- VM.cpu.topology.cores = int(cpu)
- try:
- VM.update()
- setMsg("The number of CPUs has been updated.")
- setChanged()
- return True
- except Exception as e:
- setMsg("Failed to update the number of CPUs.")
- setMsg(str(e))
- setFailed()
- return False
-
- def set_CPU_share(self, name, cpu_share):
- VM = self.get_VM(name)
- VM.cpu_shares = int(cpu_share)
- try:
- VM.update()
- setMsg("The CPU share has been updated.")
- setChanged()
- return True
- except Exception as e:
- setMsg("Failed to update the CPU share.")
- setMsg(str(e))
- setFailed()
- return False
-
- def set_Disk(self, diskname, disksize, diskinterface, diskboot):
- DISK = self.get_disk(diskname)
- setMsg("Checking disk " + diskname)
- if DISK.get_bootable() != diskboot:
- try:
- DISK.set_bootable(diskboot)
- setMsg("Updated the boot option on the disk.")
- setChanged()
- except Exception as e:
- setMsg("Failed to set the boot option on the disk.")
- setMsg(str(e))
- setFailed()
- return False
- else:
- setMsg("The boot option of the disk is correct")
- if int(DISK.size) < (1024 * 1024 * 1024 * int(disksize)):
- try:
- DISK.size = (1024 * 1024 * 1024 * int(disksize))
- setMsg("Updated the size of the disk.")
- setChanged()
- except Exception as e:
- setMsg("Failed to update the size of the disk.")
- setMsg(str(e))
- setFailed()
- return False
- elif int(DISK.size) > (1024 * 1024 * 1024 * int(disksize)):
- setMsg("Shrinking disks is not supported")
- setFailed()
- return False
- else:
- setMsg("The size of the disk is correct")
- if str(DISK.interface) != str(diskinterface):
- try:
- DISK.interface = diskinterface
- setMsg("Updated the interface of the disk.")
- setChanged()
- except Exception as e:
- setMsg("Failed to update the interface of the disk.")
- setMsg(str(e))
- setFailed()
- return False
- else:
- setMsg("The interface of the disk is correct")
- return True
-
- def set_NIC(self, vmname, nicname, newname, vlan, interface):
- NIC = self.get_NIC(vmname, nicname)
- VM = self.get_VM(vmname)
- CLUSTER = self.get_cluster_byid(VM.cluster.id)
- DC = self.get_DC_byid(CLUSTER.data_center.id)
- NETWORK = self.get_network(str(DC.name), vlan)
- checkFail()
- if NIC.name != newname:
- NIC.name = newname
- setMsg('Updating iface name to ' + newname)
- setChanged()
- if str(NIC.network.id) != str(NETWORK.id):
- NIC.set_network(NETWORK)
- setMsg('Updating iface network to ' + vlan)
- setChanged()
- if NIC.interface != interface:
- NIC.interface = interface
- setMsg('Updating iface interface to ' + interface)
- setChanged()
- try:
- NIC.update()
- setMsg('iface has successfully been updated.')
- except Exception as e:
- setMsg("Failed to update the iface.")
- setMsg(str(e))
- setFailed()
- return False
- return True
-
- def set_DeleteProtection(self, vmname, del_prot):
- VM = self.get_VM(vmname)
- VM.delete_protected = del_prot
- try:
- VM.update()
- setChanged()
- except Exception as e:
- setMsg("Failed to update delete protection.")
- setMsg(str(e))
- setFailed()
- return False
- return True
-
- def set_BootOrder(self, vmname, boot_order):
- VM = self.get_VM(vmname)
- bootorder = []
- for device in boot_order:
- bootorder.append(params.Boot(dev=device))
- VM.os.boot = bootorder
-
- try:
- VM.update()
- setChanged()
- except Exception as e:
- setMsg("Failed to update the boot order.")
- setMsg(str(e))
- setFailed()
- return False
- return True
-
- def set_Host(self, host_name, cluster, ifaces):
- HOST = self.get_Host(host_name)
- CLUSTER = self.get_cluster(cluster)
-
- if HOST is None:
- setMsg("Host does not exist.")
- ifacelist = dict()
- networklist = []
- manageip = ''
-
- try:
- for iface in ifaces:
- try:
- setMsg('creating host interface ' + iface['name'])
- if 'management' in iface:
- manageip = iface['ip']
- if 'boot_protocol' not in iface:
- if 'ip' in iface:
- iface['boot_protocol'] = 'static'
- else:
- iface['boot_protocol'] = 'none'
- if 'ip' not in iface:
- iface['ip'] = ''
- if 'netmask' not in iface:
- iface['netmask'] = ''
- if 'gateway' not in iface:
- iface['gateway'] = ''
-
- if 'network' in iface:
- if 'bond' in iface:
- bond = []
- for slave in iface['bond']:
- bond.append(ifacelist[slave])
- try:
- tmpiface = params.Bonding(
- slaves=params.Slaves(host_nic=bond),
- options=params.Options(
- option=[
- params.Option(name='miimon', value='100'),
- params.Option(name='mode', value='4')
- ]
- )
- )
- except Exception as e:
- setMsg('Failed to create the bond for ' + iface['name'])
- setFailed()
- setMsg(str(e))
- return False
- try:
- tmpnetwork = params.HostNIC(
- network=params.Network(name=iface['network']),
- name=iface['name'],
- boot_protocol=iface['boot_protocol'],
- ip=params.IP(
- address=iface['ip'],
- netmask=iface['netmask'],
- gateway=iface['gateway']
- ),
- override_configuration=True,
- bonding=tmpiface)
- networklist.append(tmpnetwork)
- setMsg('Applying network ' + iface['name'])
- except Exception as e:
- setMsg('Failed to set' + iface['name'] + ' as network interface')
- setFailed()
- setMsg(str(e))
- return False
- else:
- tmpnetwork = params.HostNIC(
- network=params.Network(name=iface['network']),
- name=iface['name'],
- boot_protocol=iface['boot_protocol'],
- ip=params.IP(
- address=iface['ip'],
- netmask=iface['netmask'],
- gateway=iface['gateway']
- ))
- networklist.append(tmpnetwork)
- setMsg('Applying network ' + iface['name'])
- else:
- tmpiface = params.HostNIC(
- name=iface['name'],
- network=params.Network(),
- boot_protocol=iface['boot_protocol'],
- ip=params.IP(
- address=iface['ip'],
- netmask=iface['netmask'],
- gateway=iface['gateway']
- ))
- ifacelist[iface['name']] = tmpiface
- except Exception as e:
- setMsg('Failed to set ' + iface['name'])
- setFailed()
- setMsg(str(e))
- return False
- except Exception as e:
- setMsg('Failed to set networks')
- setMsg(str(e))
- setFailed()
- return False
-
- if manageip == '':
- setMsg('No management network is defined')
- setFailed()
- return False
-
- try:
- HOST = params.Host(name=host_name, address=manageip, cluster=CLUSTER, ssh=params.SSH(authentication_method='publickey'))
- if self.conn.hosts.add(HOST):
- setChanged()
- HOST = self.get_Host(host_name)
- state = HOST.status.state
- while (state != 'non_operational' and state != 'up'):
- HOST = self.get_Host(host_name)
- state = HOST.status.state
- time.sleep(1)
- if state == 'non_responsive':
- setMsg('Failed to add host to RHEVM')
- setFailed()
- return False
-
- setMsg('status host: up')
- time.sleep(5)
-
- HOST = self.get_Host(host_name)
- state = HOST.status.state
- setMsg('State before setting to maintenance: ' + str(state))
- HOST.deactivate()
- while state != 'maintenance':
- HOST = self.get_Host(host_name)
- state = HOST.status.state
- time.sleep(1)
- setMsg('status host: maintenance')
-
- try:
- HOST.nics.setupnetworks(params.Action(
- force=True,
- check_connectivity=False,
- host_nics=params.HostNics(host_nic=networklist)
- ))
- setMsg('nics are set')
- except Exception as e:
- setMsg('Failed to apply networkconfig')
- setFailed()
- setMsg(str(e))
- return False
-
- try:
- HOST.commitnetconfig()
- setMsg('Network config is saved')
- except Exception as e:
- setMsg('Failed to save networkconfig')
- setFailed()
- setMsg(str(e))
- return False
- except Exception as e:
- if 'The Host name is already in use' in str(e):
- setMsg("Host already exists")
- else:
- setMsg("Failed to add host")
- setFailed()
- setMsg(str(e))
- return False
-
- HOST.activate()
- while state != 'up':
- HOST = self.get_Host(host_name)
- state = HOST.status.state
- time.sleep(1)
- if state == 'non_responsive':
- setMsg('Failed to apply networkconfig.')
- setFailed()
- return False
- setMsg('status host: up')
- else:
- setMsg("Host exists.")
-
- return True
-
- def del_NIC(self, vmname, nicname):
- return self.get_NIC(vmname, nicname).delete()
-
- def remove_VM(self, vmname):
- VM = self.get_VM(vmname)
- try:
- VM.delete()
- except Exception as e:
- setMsg("Failed to remove VM.")
- setMsg(str(e))
- setFailed()
- return False
- return True
-
- def start_VM(self, vmname, timeout):
- VM = self.get_VM(vmname)
- try:
- VM.start()
- except Exception as e:
- setMsg("Failed to start VM.")
- setMsg(str(e))
- setFailed()
- return False
- return self.wait_VM(vmname, "up", timeout)
-
- def wait_VM(self, vmname, state, timeout):
- VM = self.get_VM(vmname)
- while VM.status.state != state:
- VM = self.get_VM(vmname)
- time.sleep(10)
- if timeout is not False:
- timeout -= 10
- if timeout <= 0:
- setMsg("Timeout expired")
- setFailed()
- return False
- return True
-
- def stop_VM(self, vmname, timeout):
- VM = self.get_VM(vmname)
- try:
- VM.stop()
- except Exception as e:
- setMsg("Failed to stop VM.")
- setMsg(str(e))
- setFailed()
- return False
- return self.wait_VM(vmname, "down", timeout)
-
- def set_CD(self, vmname, cd_drive):
- VM = self.get_VM(vmname)
- try:
- if str(VM.status.state) == 'down':
- cdrom = params.CdRom(file=cd_drive)
- VM.cdroms.add(cdrom)
- setMsg("Attached the image.")
- setChanged()
- else:
- cdrom = VM.cdroms.get(id="00000000-0000-0000-0000-000000000000")
- cdrom.set_file(cd_drive)
- cdrom.update(current=True)
- setMsg("Attached the image.")
- setChanged()
- except Exception as e:
- setMsg("Failed to attach image.")
- setMsg(str(e))
- setFailed()
- return False
- return True
-
- def set_VM_Host(self, vmname, vmhost):
- VM = self.get_VM(vmname)
- HOST = self.get_Host(vmhost)
- try:
- VM.placement_policy.host = HOST
- VM.update()
- setMsg("Set startup host to " + vmhost)
- setChanged()
- except Exception as e:
- setMsg("Failed to set startup host.")
- setMsg(str(e))
- setFailed()
- return False
- return True
-
- def migrate_VM(self, vmname, vmhost):
- VM = self.get_VM(vmname)
-
- HOST = self.get_Host_byid(VM.host.id)
- if str(HOST.name) != vmhost:
- try:
- VM.migrate(
- action=params.Action(
- host=params.Host(
- name=vmhost,
- )
- ),
- )
- setChanged()
- setMsg("VM migrated to " + vmhost)
- except Exception as e:
- setMsg("Failed to set startup host.")
- setMsg(str(e))
- setFailed()
- return False
- return True
-
- def remove_CD(self, vmname):
- VM = self.get_VM(vmname)
- try:
- VM.cdroms.get(id="00000000-0000-0000-0000-000000000000").delete()
- setMsg("Removed the image.")
- setChanged()
- except Exception as e:
- setMsg("Failed to remove the image.")
- setMsg(str(e))
- setFailed()
- return False
- return True
-
-
-class RHEV(object):
- def __init__(self, module):
- self.module = module
-
- def __get_conn(self):
- self.conn = RHEVConn(self.module)
- return self.conn
-
- def test(self):
- self.__get_conn()
- return "OK"
-
- def getVM(self, name):
- self.__get_conn()
- VM = self.conn.get_VM(name)
- if VM:
- vminfo = dict()
- vminfo['uuid'] = VM.id
- vminfo['name'] = VM.name
- vminfo['status'] = VM.status.state
- vminfo['cpu_cores'] = VM.cpu.topology.cores
- vminfo['cpu_sockets'] = VM.cpu.topology.sockets
- vminfo['cpu_shares'] = VM.cpu_shares
- vminfo['memory'] = (int(VM.memory) // 1024 // 1024 // 1024)
- vminfo['mem_pol'] = (int(VM.memory_policy.guaranteed) // 1024 // 1024 // 1024)
- vminfo['os'] = VM.get_os().type_
- vminfo['del_prot'] = VM.delete_protected
- try:
- vminfo['host'] = str(self.conn.get_Host_byid(str(VM.host.id)).name)
- except Exception:
- vminfo['host'] = None
- vminfo['boot_order'] = []
- for boot_dev in VM.os.get_boot():
- vminfo['boot_order'].append(str(boot_dev.dev))
- vminfo['disks'] = []
- for DISK in VM.disks.list():
- disk = dict()
- disk['name'] = DISK.name
- disk['size'] = (int(DISK.size) // 1024 // 1024 // 1024)
- disk['domain'] = str((self.conn.get_domain_byid(DISK.get_storage_domains().get_storage_domain()[0].id)).name)
- disk['interface'] = DISK.interface
- vminfo['disks'].append(disk)
- vminfo['ifaces'] = []
- for NIC in VM.nics.list():
- iface = dict()
- iface['name'] = str(NIC.name)
- iface['vlan'] = str(self.conn.get_network_byid(NIC.get_network().id).name)
- iface['interface'] = NIC.interface
- iface['mac'] = NIC.mac.address
- vminfo['ifaces'].append(iface)
- vminfo[str(NIC.name)] = NIC.mac.address
- CLUSTER = self.conn.get_cluster_byid(VM.cluster.id)
- if CLUSTER:
- vminfo['cluster'] = CLUSTER.name
- else:
- vminfo = False
- return vminfo
-
- def createVMimage(self, name, cluster, template, disks):
- self.__get_conn()
- return self.conn.createVMimage(name, cluster, template, disks)
-
- def createVM(self, name, cluster, os, actiontype):
- self.__get_conn()
- return self.conn.createVM(name, cluster, os, actiontype)
-
- def setMemory(self, name, memory):
- self.__get_conn()
- return self.conn.set_Memory(name, memory)
-
- def setMemoryPolicy(self, name, memory_policy):
- self.__get_conn()
- return self.conn.set_Memory_Policy(name, memory_policy)
-
- def setCPU(self, name, cpu):
- self.__get_conn()
- return self.conn.set_CPU(name, cpu)
-
- def setCPUShare(self, name, cpu_share):
- self.__get_conn()
- return self.conn.set_CPU_share(name, cpu_share)
-
- def setDisks(self, name, disks):
- self.__get_conn()
- counter = 0
- bootselect = False
- for disk in disks:
- if 'bootable' in disk:
- if disk['bootable'] is True:
- bootselect = True
-
- for disk in disks:
- diskname = name + "_Disk" + str(counter) + "_" + disk.get('name', '').replace('/', '_')
- disksize = disk.get('size', 1)
- diskdomain = disk.get('domain', None)
- if diskdomain is None:
- setMsg("`domain` is a required disk key.")
- setFailed()
- return False
- diskinterface = disk.get('interface', 'virtio')
- diskformat = disk.get('format', 'raw')
- diskallocationtype = disk.get('thin', False)
- diskboot = disk.get('bootable', False)
-
- if bootselect is False and counter == 0:
- diskboot = True
-
- DISK = self.conn.get_disk(diskname)
-
- if DISK is None:
- self.conn.createDisk(name, diskname, disksize, diskdomain, diskinterface, diskformat, diskallocationtype, diskboot)
- else:
- self.conn.set_Disk(diskname, disksize, diskinterface, diskboot)
- checkFail()
- counter += 1
-
- return True
-
- def setNetworks(self, vmname, ifaces):
- self.__get_conn()
- VM = self.conn.get_VM(vmname)
-
- counter = 0
- length = len(ifaces)
-
- for NIC in VM.nics.list():
- if counter < length:
- iface = ifaces[counter]
- name = iface.get('name', None)
- if name is None:
- setMsg("`name` is a required iface key.")
- setFailed()
- elif str(name) != str(NIC.name):
- setMsg("ifaces are in the wrong order, rebuilding everything.")
- for NIC in VM.nics.list():
- self.conn.del_NIC(vmname, NIC.name)
- self.setNetworks(vmname, ifaces)
- checkFail()
- return True
- vlan = iface.get('vlan', None)
- if vlan is None:
- setMsg("`vlan` is a required iface key.")
- setFailed()
- checkFail()
- interface = iface.get('interface', 'virtio')
- self.conn.set_NIC(vmname, str(NIC.name), name, vlan, interface)
- else:
- self.conn.del_NIC(vmname, NIC.name)
- counter += 1
- checkFail()
-
- while counter < length:
- iface = ifaces[counter]
- name = iface.get('name', None)
- if name is None:
- setMsg("`name` is a required iface key.")
- setFailed()
- vlan = iface.get('vlan', None)
- if vlan is None:
- setMsg("`vlan` is a required iface key.")
- setFailed()
- if failed is True:
- return False
- interface = iface.get('interface', 'virtio')
- self.conn.createNIC(vmname, name, vlan, interface)
-
- counter += 1
- checkFail()
- return True
-
- def setDeleteProtection(self, vmname, del_prot):
- self.__get_conn()
- VM = self.conn.get_VM(vmname)
- if bool(VM.delete_protected) != bool(del_prot):
- self.conn.set_DeleteProtection(vmname, del_prot)
- checkFail()
- setMsg("`delete protection` has been updated.")
- else:
- setMsg("`delete protection` already has the right value.")
- return True
-
- def setBootOrder(self, vmname, boot_order):
- self.__get_conn()
- VM = self.conn.get_VM(vmname)
- bootorder = []
- for boot_dev in VM.os.get_boot():
- bootorder.append(str(boot_dev.dev))
-
- if boot_order != bootorder:
- self.conn.set_BootOrder(vmname, boot_order)
- setMsg('The boot order has been set')
- else:
- setMsg('The boot order has already been set')
- return True
-
- def removeVM(self, vmname):
- self.__get_conn()
- self.setPower(vmname, "down", 300)
- return self.conn.remove_VM(vmname)
-
- def setPower(self, vmname, state, timeout):
- self.__get_conn()
- VM = self.conn.get_VM(vmname)
- if VM is None:
- setMsg("VM does not exist.")
- setFailed()
- return False
-
- if state == VM.status.state:
- setMsg("VM state was already " + state)
- else:
- if state == "up":
- setMsg("VM is going to start")
- self.conn.start_VM(vmname, timeout)
- setChanged()
- elif state == "down":
- setMsg("VM is going to stop")
- self.conn.stop_VM(vmname, timeout)
- setChanged()
- elif state == "restarted":
- self.setPower(vmname, "down", timeout)
- checkFail()
- self.setPower(vmname, "up", timeout)
- checkFail()
- setMsg("the vm state is set to " + state)
- return True
-
- def setCD(self, vmname, cd_drive):
- self.__get_conn()
- if cd_drive:
- return self.conn.set_CD(vmname, cd_drive)
- else:
- return self.conn.remove_CD(vmname)
-
- def setVMHost(self, vmname, vmhost):
- self.__get_conn()
- return self.conn.set_VM_Host(vmname, vmhost)
-
- def setHost(self, hostname, cluster, ifaces):
- self.__get_conn()
- return self.conn.set_Host(hostname, cluster, ifaces)
-
-
-def checkFail():
- if failed:
- module.fail_json(msg=msg)
- else:
- return True
-
-
-def setFailed():
- global failed
- failed = True
-
-
-def setChanged():
- global changed
- changed = True
-
-
-def setMsg(message):
- global failed
- msg.append(message)
-
-
-def core(module):
-
- r = RHEV(module)
-
- state = module.params.get('state')
-
- if state == 'ping':
- r.test()
- return RHEV_SUCCESS, {"ping": "pong"}
- elif state == 'info':
- name = module.params.get('name')
- if not name:
- setMsg("`name` is a required argument.")
- return RHEV_FAILED, msg
- vminfo = r.getVM(name)
- return RHEV_SUCCESS, {'changed': changed, 'msg': msg, 'vm': vminfo}
- elif state == 'present':
- created = False
- name = module.params.get('name')
- if not name:
- setMsg("`name` is a required argument.")
- return RHEV_FAILED, msg
- actiontype = module.params.get('type')
- if actiontype == 'server' or actiontype == 'desktop':
- vminfo = r.getVM(name)
- if vminfo:
- setMsg('VM exists')
- else:
- # Create VM
- cluster = module.params.get('cluster')
- if cluster is None:
- setMsg("cluster is a required argument.")
- setFailed()
- template = module.params.get('image')
- if template:
- disks = module.params.get('disks')
- if disks is None:
- setMsg("disks is a required argument.")
- setFailed()
- checkFail()
- if r.createVMimage(name, cluster, template, disks) is False:
- return RHEV_FAILED, vminfo
- else:
- os = module.params.get('osver')
- if os is None:
- setMsg("osver is a required argument.")
- setFailed()
- checkFail()
- if r.createVM(name, cluster, os, actiontype) is False:
- return RHEV_FAILED, vminfo
- created = True
-
- # Set MEMORY and MEMORY POLICY
- vminfo = r.getVM(name)
- memory = module.params.get('vmmem')
- if memory is not None:
- memory_policy = module.params.get('mempol')
- if memory_policy == 0:
- memory_policy = memory
- mem_pol_nok = True
- if int(vminfo['mem_pol']) == memory_policy:
- setMsg("Memory is correct")
- mem_pol_nok = False
-
- mem_nok = True
- if int(vminfo['memory']) == memory:
- setMsg("Memory is correct")
- mem_nok = False
-
- if memory_policy > memory:
- setMsg('memory_policy cannot have a higher value than memory.')
- return RHEV_FAILED, msg
-
- if mem_nok and mem_pol_nok:
- if memory_policy > int(vminfo['memory']):
- r.setMemory(vminfo['name'], memory)
- r.setMemoryPolicy(vminfo['name'], memory_policy)
- else:
- r.setMemoryPolicy(vminfo['name'], memory_policy)
- r.setMemory(vminfo['name'], memory)
- elif mem_nok:
- r.setMemory(vminfo['name'], memory)
- elif mem_pol_nok:
- r.setMemoryPolicy(vminfo['name'], memory_policy)
- checkFail()
-
- # Set CPU
- cpu = module.params.get('vmcpu')
- if int(vminfo['cpu_cores']) == cpu:
- setMsg("Number of CPUs is correct")
- else:
- if r.setCPU(vminfo['name'], cpu) is False:
- return RHEV_FAILED, msg
-
- # Set CPU SHARE
- cpu_share = module.params.get('cpu_share')
- if cpu_share is not None:
- if int(vminfo['cpu_shares']) == cpu_share:
- setMsg("CPU share is correct.")
- else:
- if r.setCPUShare(vminfo['name'], cpu_share) is False:
- return RHEV_FAILED, msg
-
- # Set DISKS
- disks = module.params.get('disks')
- if disks is not None:
- if r.setDisks(vminfo['name'], disks) is False:
- return RHEV_FAILED, msg
-
- # Set NETWORKS
- ifaces = module.params.get('ifaces', None)
- if ifaces is not None:
- if r.setNetworks(vminfo['name'], ifaces) is False:
- return RHEV_FAILED, msg
-
- # Set Delete Protection
- del_prot = module.params.get('del_prot')
- if r.setDeleteProtection(vminfo['name'], del_prot) is False:
- return RHEV_FAILED, msg
-
- # Set Boot Order
- boot_order = module.params.get('boot_order')
- if r.setBootOrder(vminfo['name'], boot_order) is False:
- return RHEV_FAILED, msg
-
- # Set VM Host
- vmhost = module.params.get('vmhost')
- if vmhost:
- if r.setVMHost(vminfo['name'], vmhost) is False:
- return RHEV_FAILED, msg
-
- vminfo = r.getVM(name)
- vminfo['created'] = created
- return RHEV_SUCCESS, {'changed': changed, 'msg': msg, 'vm': vminfo}
-
- if actiontype == 'host':
- cluster = module.params.get('cluster')
- if cluster is None:
- setMsg("cluster is a required argument.")
- setFailed()
- ifaces = module.params.get('ifaces')
- if ifaces is None:
- setMsg("ifaces is a required argument.")
- setFailed()
- if r.setHost(name, cluster, ifaces) is False:
- return RHEV_FAILED, msg
- return RHEV_SUCCESS, {'changed': changed, 'msg': msg}
-
- elif state == 'absent':
- name = module.params.get('name')
- if not name:
- setMsg("`name` is a required argument.")
- return RHEV_FAILED, msg
- actiontype = module.params.get('type')
- if actiontype == 'server' or actiontype == 'desktop':
- vminfo = r.getVM(name)
- if vminfo:
- setMsg('VM exists')
-
- # Set Delete Protection
- del_prot = module.params.get('del_prot')
- if r.setDeleteProtection(vminfo['name'], del_prot) is False:
- return RHEV_FAILED, msg
-
- # Remove VM
- if r.removeVM(vminfo['name']) is False:
- return RHEV_FAILED, msg
- setMsg('VM has been removed.')
- vminfo['state'] = 'DELETED'
- else:
- setMsg('VM was already removed.')
- return RHEV_SUCCESS, {'changed': changed, 'msg': msg, 'vm': vminfo}
-
- elif state == 'up' or state == 'down' or state == 'restarted':
- name = module.params.get('name')
- if not name:
- setMsg("`name` is a required argument.")
- return RHEV_FAILED, msg
- timeout = module.params.get('timeout')
- if r.setPower(name, state, timeout) is False:
- return RHEV_FAILED, msg
- vminfo = r.getVM(name)
- return RHEV_SUCCESS, {'changed': changed, 'msg': msg, 'vm': vminfo}
-
- elif state == 'cd':
- name = module.params.get('name')
- cd_drive = module.params.get('cd_drive')
- if r.setCD(name, cd_drive) is False:
- return RHEV_FAILED, msg
- return RHEV_SUCCESS, {'changed': changed, 'msg': msg}
-
-
-def main():
- global module
- module = AnsibleModule(
- argument_spec=dict(
- state=dict(type='str', default='present', choices=['absent', 'cd', 'down', 'info', 'ping', 'present', 'restarted', 'up']),
- user=dict(type='str', default='admin@internal'),
- password=dict(type='str', required=True, no_log=True),
- server=dict(type='str', default='127.0.0.1'),
- port=dict(type='int', default=443),
- insecure_api=dict(type='bool', default=False),
- name=dict(type='str'),
- image=dict(type='str'),
- datacenter=dict(type='str', default="Default"),
- type=dict(type='str', default='server', choices=['desktop', 'host', 'server']),
- cluster=dict(type='str', default=''),
- vmhost=dict(type='str'),
- vmcpu=dict(type='int', default=2),
- vmmem=dict(type='int', default=1),
- disks=dict(type='list', elements='str'),
- osver=dict(type='str', default="rhel_6x64"),
- ifaces=dict(type='list', elements='str', aliases=['interfaces', 'nics']),
- timeout=dict(type='int'),
- mempol=dict(type='int', default=1),
- vm_ha=dict(type='bool', default=True),
- cpu_share=dict(type='int', default=0),
- boot_order=dict(type='list', elements='str', default=['hd', 'network']),
- del_prot=dict(type='bool', default=True),
- cd_drive=dict(type='str'),
- ),
- )
-
- if not HAS_SDK:
- module.fail_json(msg="The 'ovirtsdk' module is not importable. Check the requirements.")
-
- rc = RHEV_SUCCESS
- try:
- rc, result = core(module)
- except Exception as e:
- module.fail_json(msg=str(e))
-
- if rc != 0: # something went wrong emit the msg
- module.fail_json(rc=rc, msg=result)
-
- module.exit_json(**result)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/misc/serverless.py b/ansible_collections/community/general/plugins/modules/cloud/misc/serverless.py
deleted file mode 100644
index 878621c3..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/misc/serverless.py
+++ /dev/null
@@ -1,224 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2016, Ryan Scott Brown
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-DOCUMENTATION = r'''
----
-module: serverless
-short_description: Manages a Serverless Framework project
-description:
- - Provides support for managing Serverless Framework (https://serverless.com/) project deployments and stacks.
-options:
- state:
- description:
- - Goal state of given stage/project.
- type: str
- choices: [ absent, present ]
- default: present
- serverless_bin_path:
- description:
- - The path of a serverless framework binary relative to the 'service_path' eg. node_module/.bin/serverless
- type: path
- service_path:
- description:
- - The path to the root of the Serverless Service to be operated on.
- type: path
- required: true
- stage:
- description:
- - The name of the serverless framework project stage to deploy to.
- - This uses the serverless framework default "dev".
- type: str
- functions:
- description:
- - A list of specific functions to deploy.
- - If this is not provided, all functions in the service will be deployed.
- - Deprecated parameter, it will be removed in community.general 5.0.0.
- type: list
- elements: str
- default: []
- region:
- description:
- - AWS region to deploy the service to.
- - This parameter defaults to C(us-east-1).
- type: str
- deploy:
- description:
- - Whether or not to deploy artifacts after building them.
- - When this option is C(false) all the functions will be built, but no stack update will be run to send them out.
- - This is mostly useful for generating artifacts to be stored/deployed elsewhere.
- type: bool
- default: yes
- force:
- description:
- - Whether or not to force full deployment, equivalent to serverless C(--force) option.
- type: bool
- default: no
- verbose:
- description:
- - Shows all stack events during deployment, and display any Stack Output.
- type: bool
- default: no
-notes:
- - Currently, the C(serverless) command must be in the path of the node executing the task.
- In the future this may be a flag.
-requirements:
-- serverless
-- yaml
-author:
-- Ryan Scott Brown (@ryansb)
-'''
-
-EXAMPLES = r'''
-- name: Basic deploy of a service
- community.general.serverless:
- service_path: '{{ project_dir }}'
- state: present
-
-- name: Deploy a project, then pull its resource list back into Ansible
- community.general.serverless:
- stage: dev
- region: us-east-1
- service_path: '{{ project_dir }}'
- register: sls
-
-# The cloudformation stack is always named the same as the full service, so the
-# cloudformation_info module can get a full list of the stack resources, as
-# well as stack events and outputs
-- cloudformation_info:
- region: us-east-1
- stack_name: '{{ sls.service_name }}'
- stack_resources: true
-
-- name: Deploy a project using a locally installed serverless binary
- community.general.serverless:
- stage: dev
- region: us-east-1
- service_path: '{{ project_dir }}'
- serverless_bin_path: node_modules/.bin/serverless
-'''
-
-RETURN = r'''
-service_name:
- type: str
- description: The service name specified in the serverless.yml that was just deployed.
- returned: always
- sample: my-fancy-service-dev
-state:
- type: str
- description: Whether the stack for the serverless project is present/absent.
- returned: always
-command:
- type: str
- description: Full `serverless` command run by this module, in case you want to re-run the command outside the module.
- returned: always
- sample: serverless deploy --stage production
-'''
-
-import os
-
-try:
- import yaml
- HAS_YAML = True
-except ImportError:
- HAS_YAML = False
-
-from ansible.module_utils.basic import AnsibleModule
-
-
-def read_serverless_config(module):
- path = module.params.get('service_path')
- full_path = os.path.join(path, 'serverless.yml')
-
- try:
- with open(full_path) as sls_config:
- config = yaml.safe_load(sls_config.read())
- return config
- except IOError as e:
- module.fail_json(msg="Could not open serverless.yml in {0}. err: {1}".format(full_path, str(e)))
-
-
-def get_service_name(module, stage):
- config = read_serverless_config(module)
- if config.get('service') is None:
- module.fail_json(msg="Could not read `service` key from serverless.yml file")
-
- if stage:
- return "{0}-{1}".format(config['service'], stage)
-
- return "{0}-{1}".format(config['service'], config.get('stage', 'dev'))
-
-
-def main():
- module = AnsibleModule(
- argument_spec=dict(
- service_path=dict(type='path', required=True),
- state=dict(type='str', default='present', choices=['absent', 'present']),
- functions=dict(type='list', elements='str',
- removed_in_version="5.0.0", removed_from_collection="community.general"),
- region=dict(type='str', default=''),
- stage=dict(type='str', default=''),
- deploy=dict(type='bool', default=True),
- serverless_bin_path=dict(type='path'),
- force=dict(type='bool', default=False),
- verbose=dict(type='bool', default=False),
- ),
- )
-
- if not HAS_YAML:
- module.fail_json(msg='yaml is required for this module')
-
- service_path = module.params.get('service_path')
- state = module.params.get('state')
- region = module.params.get('region')
- stage = module.params.get('stage')
- deploy = module.params.get('deploy', True)
- force = module.params.get('force', False)
- verbose = module.params.get('verbose', False)
- serverless_bin_path = module.params.get('serverless_bin_path')
-
- if serverless_bin_path is not None:
- command = serverless_bin_path + " "
- else:
- command = module.get_bin_path("serverless") + " "
-
- if state == 'present':
- command += 'deploy '
- elif state == 'absent':
- command += 'remove '
- else:
- module.fail_json(msg="State must either be 'present' or 'absent'. Received: {0}".format(state))
-
- if state == 'present':
- if not deploy:
- command += '--noDeploy '
- elif force:
- command += '--force '
-
- if region:
- command += '--region {0} '.format(region)
- if stage:
- command += '--stage {0} '.format(stage)
- if verbose:
- command += '--verbose '
-
- rc, out, err = module.run_command(command, cwd=service_path)
- if rc != 0:
- if state == 'absent' and "-{0}' does not exist".format(stage) in out:
- module.exit_json(changed=False, state='absent', command=command,
- out=out, service_name=get_service_name(module, stage))
-
- module.fail_json(msg="Failure when executing Serverless command. Exited {0}.\nstdout: {1}\nstderr: {2}".format(rc, out, err))
-
- # gather some facts about the deployment
- module.exit_json(changed=True, state='present', out=out, command=command,
- service_name=get_service_name(module, stage))
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/misc/terraform.py b/ansible_collections/community/general/plugins/modules/cloud/misc/terraform.py
deleted file mode 100644
index 8eca14e7..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/misc/terraform.py
+++ /dev/null
@@ -1,521 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# (c) 2017, Ryan Scott Brown
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = r'''
----
-module: terraform
-short_description: Manages a Terraform deployment (and plans)
-description:
- - Provides support for deploying resources with Terraform and pulling
- resource information back into Ansible.
-options:
- state:
- choices: ['planned', 'present', 'absent']
- description:
- - Goal state of given stage/project
- type: str
- default: present
- binary_path:
- description:
- - The path of a terraform binary to use, relative to the 'service_path'
- unless you supply an absolute path.
- type: path
- project_path:
- description:
- - The path to the root of the Terraform directory with the
- vars.tf/main.tf/etc to use.
- type: path
- required: true
- plugin_paths:
- description:
- - List of paths containing Terraform plugin executable files.
- - Plugin executables can be downloaded from U(https://releases.hashicorp.com/).
- - When set, the plugin discovery and auto-download behavior of Terraform is disabled.
- - The directory structure in the plugin path can be tricky. The Terraform docs
- U(https://learn.hashicorp.com/tutorials/terraform/automate-terraform#pre-installed-plugins)
- show a simple directory of files, but actually, the directory structure
- has to follow the same structure you would see if Terraform auto-downloaded the plugins.
- See the examples below for a tree output of an example plugin directory.
- type: list
- elements: path
- version_added: 3.0.0
- workspace:
- description:
- - The terraform workspace to work with.
- type: str
- default: default
- purge_workspace:
- description:
- - Only works with state = absent
- - If true, the workspace will be deleted after the "terraform destroy" action.
- - The 'default' workspace will not be deleted.
- default: false
- type: bool
- plan_file:
- description:
- - The path to an existing Terraform plan file to apply. If this is not
- specified, Ansible will build a new TF plan and execute it.
- Note that this option is required if 'state' has the 'planned' value.
- type: path
- state_file:
- description:
- - The path to an existing Terraform state file to use when building plan.
- If this is not specified, the default `terraform.tfstate` will be used.
- - This option is ignored when plan is specified.
- type: path
- variables_files:
- description:
- - The path to a variables file for Terraform to fill into the TF
- configurations. This can accept a list of paths to multiple variables files.
- - Up until Ansible 2.9, this option was usable as I(variables_file).
- type: list
- elements: path
- aliases: [ 'variables_file' ]
- variables:
- description:
- - A group of key-values to override template variables or those in
- variables files.
- type: dict
- targets:
- description:
- - A list of specific resources to target in this plan/application. The
- resources selected here will also auto-include any dependencies.
- type: list
- elements: str
- lock:
- description:
- - Enable statefile locking, if you use a service that accepts locks (such
- as S3+DynamoDB) to store your statefile.
- type: bool
- default: true
- lock_timeout:
- description:
- - How long to maintain the lock on the statefile, if you use a service
- that accepts locks (such as S3+DynamoDB).
- type: int
- force_init:
- description:
- - To avoid duplicating infra, if a state file can't be found this will
- force a `terraform init`. Generally, this should be turned off unless
- you intend to provision an entirely new Terraform deployment.
- default: false
- type: bool
- overwrite_init:
- description:
- - Run init even if C(.terraform/terraform.tfstate) already exists in I(project_path).
- default: true
- type: bool
- version_added: '3.2.0'
- backend_config:
- description:
- - A group of key-values to provide at init stage to the -backend-config parameter.
- type: dict
- backend_config_files:
- description:
- - The path to a configuration file to provide at init state to the -backend-config parameter.
- This can accept a list of paths to multiple configuration files.
- type: list
- elements: path
- version_added: '0.2.0'
- init_reconfigure:
- description:
- - Forces backend reconfiguration during init.
- default: false
- type: bool
- version_added: '1.3.0'
- check_destroy:
- description:
- - Apply only when no resources are destroyed. Note that this only prevents "destroy" actions,
- but not "destroy and re-create" actions. This option is ignored when I(state=absent).
- type: bool
- default: false
- version_added: '3.3.0'
- parallelism:
- description:
- - Restrict concurrent operations when Terraform applies the plan.
- type: int
- version_added: '3.8.0'
-notes:
- - To just run a `terraform plan`, use check mode.
-requirements: [ "terraform" ]
-author: "Ryan Scott Brown (@ryansb)"
-'''
-
-EXAMPLES = """
-- name: Basic deploy of a service
- community.general.terraform:
- project_path: '{{ project_dir }}'
- state: present
-
-- name: Define the backend configuration at init
- community.general.terraform:
- project_path: 'project/'
- state: "{{ state }}"
- force_init: true
- backend_config:
- region: "eu-west-1"
- bucket: "some-bucket"
- key: "random.tfstate"
-
-- name: Define the backend configuration with one or more files at init
- community.general.terraform:
- project_path: 'project/'
- state: "{{ state }}"
- force_init: true
- backend_config_files:
- - /path/to/backend_config_file_1
- - /path/to/backend_config_file_2
-
-- name: Disable plugin discovery and auto-download by setting plugin_paths
- community.general.terraform:
- project_path: 'project/'
- state: "{{ state }}"
- force_init: true
- plugin_paths:
- - /path/to/plugins_dir_1
- - /path/to/plugins_dir_2
-
-### Example directory structure for plugin_paths example
-# $ tree /path/to/plugins_dir_1
-# /path/to/plugins_dir_1/
-# └── registry.terraform.io
-# └── hashicorp
-# └── vsphere
-# ├── 1.24.0
-# │ └── linux_amd64
-# │ └── terraform-provider-vsphere_v1.24.0_x4
-# └── 1.26.0
-# └── linux_amd64
-# └── terraform-provider-vsphere_v1.26.0_x4
-"""
-
-RETURN = """
-outputs:
- type: complex
- description: A dictionary of all the TF outputs by their assigned name. Use `.outputs.MyOutputName.value` to access the value.
- returned: on success
- sample: '{"bukkit_arn": {"sensitive": false, "type": "string", "value": "arn:aws:s3:::tf-test-bukkit"}'
- contains:
- sensitive:
- type: bool
- returned: always
- description: Whether Terraform has marked this value as sensitive
- type:
- type: str
- returned: always
- description: The type of the value (string, int, etc)
- value:
- type: str
- returned: always
- description: The value of the output as interpolated by Terraform
-stdout:
- type: str
- description: Full `terraform` command stdout, in case you want to display it or examine the event log
- returned: always
- sample: ''
-command:
- type: str
- description: Full `terraform` command built by this module, in case you want to re-run the command outside the module or debug a problem.
- returned: always
- sample: terraform apply ...
-"""
-
-import os
-import json
-import tempfile
-from ansible.module_utils.six.moves import shlex_quote
-
-from ansible.module_utils.basic import AnsibleModule
-
-from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
-
-module = None
-
-
-def get_version(bin_path):
- extract_version = module.run_command([bin_path, 'version', '-json'])
- terraform_version = (json.loads(extract_version[1]))['terraform_version']
- return terraform_version
-
-
-def preflight_validation(bin_path, project_path, version, variables_args=None, plan_file=None):
- if project_path is None or '/' not in project_path:
- module.fail_json(msg="Path for Terraform project can not be None or ''.")
- if not os.path.exists(bin_path):
- module.fail_json(msg="Path for Terraform binary '{0}' doesn't exist on this host - check the path and try again please.".format(bin_path))
- if not os.path.isdir(project_path):
- module.fail_json(msg="Path for Terraform project '{0}' doesn't exist on this host - check the path and try again please.".format(project_path))
- if LooseVersion(version) < LooseVersion('0.15.0'):
- rc, out, err = module.run_command([bin_path, 'validate'] + variables_args, check_rc=True, cwd=project_path)
- else:
- rc, out, err = module.run_command([bin_path, 'validate'], check_rc=True, cwd=project_path)
-
-
-def _state_args(state_file):
- if state_file and os.path.exists(state_file):
- return ['-state', state_file]
- if state_file and not os.path.exists(state_file):
- module.fail_json(msg='Could not find state_file "{0}", check the path and try again.'.format(state_file))
- return []
-
-
-def init_plugins(bin_path, project_path, backend_config, backend_config_files, init_reconfigure, plugin_paths):
- command = [bin_path, 'init', '-input=false']
- if backend_config:
- for key, val in backend_config.items():
- command.extend([
- '-backend-config',
- shlex_quote('{0}={1}'.format(key, val))
- ])
- if backend_config_files:
- for f in backend_config_files:
- command.extend(['-backend-config', f])
- if init_reconfigure:
- command.extend(['-reconfigure'])
- if plugin_paths:
- for plugin_path in plugin_paths:
- command.extend(['-plugin-dir', plugin_path])
- rc, out, err = module.run_command(command, check_rc=True, cwd=project_path)
-
-
-def get_workspace_context(bin_path, project_path):
- workspace_ctx = {"current": "default", "all": []}
- command = [bin_path, 'workspace', 'list', '-no-color']
- rc, out, err = module.run_command(command, cwd=project_path)
- if rc != 0:
- module.warn("Failed to list Terraform workspaces:\r\n{0}".format(err))
- for item in out.split('\n'):
- stripped_item = item.strip()
- if not stripped_item:
- continue
- elif stripped_item.startswith('* '):
- workspace_ctx["current"] = stripped_item.replace('* ', '')
- else:
- workspace_ctx["all"].append(stripped_item)
- return workspace_ctx
-
-
-def _workspace_cmd(bin_path, project_path, action, workspace):
- command = [bin_path, 'workspace', action, workspace, '-no-color']
- rc, out, err = module.run_command(command, check_rc=True, cwd=project_path)
- return rc, out, err
-
-
-def create_workspace(bin_path, project_path, workspace):
- _workspace_cmd(bin_path, project_path, 'new', workspace)
-
-
-def select_workspace(bin_path, project_path, workspace):
- _workspace_cmd(bin_path, project_path, 'select', workspace)
-
-
-def remove_workspace(bin_path, project_path, workspace):
- _workspace_cmd(bin_path, project_path, 'delete', workspace)
-
-
-def build_plan(command, project_path, variables_args, state_file, targets, state, apply_args, plan_path=None):
- if plan_path is None:
- f, plan_path = tempfile.mkstemp(suffix='.tfplan')
-
- local_command = command.copy()
-
- plan_command = [command[0], 'plan']
-
- if state == "planned":
- for c in local_command[1:]:
- plan_command.append(c)
-
- if state == "present":
- for a in apply_args:
- local_command.remove(a)
- for c in local_command[1:]:
- plan_command.append(c)
-
- plan_command.extend(['-input=false', '-no-color', '-detailed-exitcode', '-out', plan_path])
-
- for t in targets:
- plan_command.extend(['-target', t])
-
- plan_command.extend(_state_args(state_file))
-
- rc, out, err = module.run_command(plan_command + variables_args, cwd=project_path)
-
- if rc == 0:
- # no changes
- return plan_path, False, out, err, plan_command if state == 'planned' else command
- elif rc == 1:
- # failure to plan
- module.fail_json(msg='Terraform plan could not be created\r\nSTDOUT: {0}\r\n\r\nSTDERR: {1}'.format(out, err))
- elif rc == 2:
- # changes, but successful
- return plan_path, True, out, err, plan_command if state == 'planned' else command
-
- module.fail_json(msg='Terraform plan failed with unexpected exit code {0}. \r\nSTDOUT: {1}\r\n\r\nSTDERR: {2}'.format(rc, out, err))
-
-
-def main():
- global module
- module = AnsibleModule(
- argument_spec=dict(
- project_path=dict(required=True, type='path'),
- binary_path=dict(type='path'),
- plugin_paths=dict(type='list', elements='path'),
- workspace=dict(type='str', default='default'),
- purge_workspace=dict(type='bool', default=False),
- state=dict(default='present', choices=['present', 'absent', 'planned']),
- variables=dict(type='dict'),
- variables_files=dict(aliases=['variables_file'], type='list', elements='path'),
- plan_file=dict(type='path'),
- state_file=dict(type='path'),
- targets=dict(type='list', elements='str', default=[]),
- lock=dict(type='bool', default=True),
- lock_timeout=dict(type='int',),
- force_init=dict(type='bool', default=False),
- backend_config=dict(type='dict'),
- backend_config_files=dict(type='list', elements='path'),
- init_reconfigure=dict(type='bool', default=False),
- overwrite_init=dict(type='bool', default=True),
- check_destroy=dict(type='bool', default=False),
- parallelism=dict(type='int'),
- ),
- required_if=[('state', 'planned', ['plan_file'])],
- supports_check_mode=True,
- )
-
- project_path = module.params.get('project_path')
- bin_path = module.params.get('binary_path')
- plugin_paths = module.params.get('plugin_paths')
- workspace = module.params.get('workspace')
- purge_workspace = module.params.get('purge_workspace')
- state = module.params.get('state')
- variables = module.params.get('variables') or {}
- variables_files = module.params.get('variables_files')
- plan_file = module.params.get('plan_file')
- state_file = module.params.get('state_file')
- force_init = module.params.get('force_init')
- backend_config = module.params.get('backend_config')
- backend_config_files = module.params.get('backend_config_files')
- init_reconfigure = module.params.get('init_reconfigure')
- overwrite_init = module.params.get('overwrite_init')
- check_destroy = module.params.get('check_destroy')
-
- if bin_path is not None:
- command = [bin_path]
- else:
- command = [module.get_bin_path('terraform', required=True)]
-
- checked_version = get_version(command[0])
-
- if LooseVersion(checked_version) < LooseVersion('0.15.0'):
- DESTROY_ARGS = ('destroy', '-no-color', '-force')
- APPLY_ARGS = ('apply', '-no-color', '-input=false', '-auto-approve=true')
- else:
- DESTROY_ARGS = ('destroy', '-no-color', '-auto-approve')
- APPLY_ARGS = ('apply', '-no-color', '-input=false', '-auto-approve')
-
- if force_init:
- if overwrite_init or not os.path.isfile(os.path.join(project_path, ".terraform", "terraform.tfstate")):
- init_plugins(command[0], project_path, backend_config, backend_config_files, init_reconfigure, plugin_paths)
-
- workspace_ctx = get_workspace_context(command[0], project_path)
- if workspace_ctx["current"] != workspace:
- if workspace not in workspace_ctx["all"]:
- create_workspace(command[0], project_path, workspace)
- else:
- select_workspace(command[0], project_path, workspace)
-
- if state == 'present':
- command.extend(APPLY_ARGS)
- elif state == 'absent':
- command.extend(DESTROY_ARGS)
-
- if state == 'present' and module.params.get('parallelism') is not None:
- command.append('-parallelism=%d' % module.params.get('parallelism'))
-
- variables_args = []
- for k, v in variables.items():
- variables_args.extend([
- '-var',
- '{0}={1}'.format(k, v)
- ])
- if variables_files:
- for f in variables_files:
- variables_args.extend(['-var-file', f])
-
- preflight_validation(command[0], project_path, checked_version, variables_args)
-
- if module.params.get('lock') is not None:
- if module.params.get('lock'):
- command.append('-lock=true')
- else:
- command.append('-lock=false')
- if module.params.get('lock_timeout') is not None:
- command.append('-lock-timeout=%ds' % module.params.get('lock_timeout'))
-
- for t in (module.params.get('targets') or []):
- command.extend(['-target', t])
-
- # we aren't sure if this plan will result in changes, so assume yes
- needs_application, changed = True, False
-
- out, err = '', ''
-
- if state == 'absent':
- command.extend(variables_args)
- elif state == 'present' and plan_file:
- if any([os.path.isfile(project_path + "/" + plan_file), os.path.isfile(plan_file)]):
- command.append(plan_file)
- else:
- module.fail_json(msg='Could not find plan_file "{0}", check the path and try again.'.format(plan_file))
- else:
- plan_file, needs_application, out, err, command = build_plan(command, project_path, variables_args, state_file,
- module.params.get('targets'), state, APPLY_ARGS, plan_file)
- if state == 'present' and check_destroy and '- destroy' in out:
- module.fail_json(msg="Aborting command because it would destroy some resources. "
- "Consider switching the 'check_destroy' to false to suppress this error")
- command.append(plan_file)
-
- if needs_application and not module.check_mode and state != 'planned':
- rc, out, err = module.run_command(command, check_rc=False, cwd=project_path)
- if rc != 0:
- if workspace_ctx["current"] != workspace:
- select_workspace(command[0], project_path, workspace_ctx["current"])
- module.fail_json(msg=err.rstrip(), rc=rc, stdout=out,
- stdout_lines=out.splitlines(), stderr=err,
- stderr_lines=err.splitlines(),
- cmd=' '.join(command))
- # checks out to decide if changes were made during execution
- if ' 0 added, 0 changed' not in out and not state == "absent" or ' 0 destroyed' not in out:
- changed = True
-
- outputs_command = [command[0], 'output', '-no-color', '-json'] + _state_args(state_file)
- rc, outputs_text, outputs_err = module.run_command(outputs_command, cwd=project_path)
- if rc == 1:
- module.warn("Could not get Terraform outputs. This usually means none have been defined.\nstdout: {0}\nstderr: {1}".format(outputs_text, outputs_err))
- outputs = {}
- elif rc != 0:
- module.fail_json(
- msg="Failure when getting Terraform outputs. "
- "Exited {0}.\nstdout: {1}\nstderr: {2}".format(rc, outputs_text, outputs_err),
- command=' '.join(outputs_command))
- else:
- outputs = json.loads(outputs_text)
-
- # Restore the Terraform workspace found when running the module
- if workspace_ctx["current"] != workspace:
- select_workspace(command[0], project_path, workspace_ctx["current"])
- if state == 'absent' and workspace != 'default' and purge_workspace is True:
- remove_workspace(command[0], project_path, workspace)
-
- module.exit_json(changed=changed, state=state, workspace=workspace, outputs=outputs, stdout=out, stderr=err, command=' '.join(command))
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/misc/xenserver_facts.py b/ansible_collections/community/general/plugins/modules/cloud/misc/xenserver_facts.py
deleted file mode 100644
index f65e3c9a..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/misc/xenserver_facts.py
+++ /dev/null
@@ -1,205 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: xenserver_facts
-short_description: get facts reported on xenserver
-description:
- - Reads data out of XenAPI, can be used instead of multiple xe commands.
-author:
- - Andy Hill (@andyhky)
- - Tim Rupp (@caphrim007)
- - Robin Lee (@cheese)
-options: {}
-'''
-
-EXAMPLES = '''
-- name: Gather facts from xenserver
- community.general.xenserver_facts:
-
-- name: Print running VMs
- ansible.builtin.debug:
- msg: "{{ item }}"
- with_items: "{{ xs_vms.keys() }}"
- when: xs_vms[item]['power_state'] == "Running"
-
-# Which will print:
-#
-# TASK: [Print running VMs] ***********************************************************
-# skipping: [10.13.0.22] => (item=CentOS 4.7 (32-bit))
-# ok: [10.13.0.22] => (item=Control domain on host: 10.0.13.22) => {
-# "item": "Control domain on host: 10.0.13.22",
-# "msg": "Control domain on host: 10.0.13.22"
-# }
-'''
-
-
-HAVE_XENAPI = False
-try:
- import XenAPI
- HAVE_XENAPI = True
-except ImportError:
- pass
-
-from ansible.module_utils import distro
-from ansible.module_utils.basic import AnsibleModule
-
-
-class XenServerFacts:
- def __init__(self):
- self.codes = {
- '5.5.0': 'george',
- '5.6.100': 'oxford',
- '6.0.0': 'boston',
- '6.1.0': 'tampa',
- '6.2.0': 'clearwater'
- }
-
- @property
- def version(self):
- result = distro.linux_distribution()[1]
- return result
-
- @property
- def codename(self):
- if self.version in self.codes:
- result = self.codes[self.version]
- else:
- result = None
-
- return result
-
-
-def get_xenapi_session():
- session = XenAPI.xapi_local()
- session.xenapi.login_with_password('', '')
- return session
-
-
-def get_networks(session):
- recs = session.xenapi.network.get_all_records()
- networks = change_keys(recs, key='name_label')
- return networks
-
-
-def get_pifs(session):
- recs = session.xenapi.PIF.get_all_records()
- pifs = change_keys(recs, key='uuid')
- xs_pifs = {}
- devicenums = range(0, 7)
- for pif in pifs.values():
- for eth in devicenums:
- interface_name = "eth%s" % (eth)
- bond_name = interface_name.replace('eth', 'bond')
- if pif['device'] == interface_name:
- xs_pifs[interface_name] = pif
- elif pif['device'] == bond_name:
- xs_pifs[bond_name] = pif
- return xs_pifs
-
-
-def get_vlans(session):
- recs = session.xenapi.VLAN.get_all_records()
- return change_keys(recs, key='tag')
-
-
-def change_keys(recs, key='uuid', filter_func=None):
- """
- Take a xapi dict, and make the keys the value of recs[ref][key].
-
- Preserves the ref in rec['ref']
-
- """
- new_recs = {}
-
- for ref, rec in recs.items():
- if filter_func is not None and not filter_func(rec):
- continue
-
- for param_name, param_value in rec.items():
- # param_value may be of type xmlrpc.client.DateTime,
- # which is not simply convertable to str.
- # Use 'value' attr to get the str value,
- # following an example in xmlrpc.client.DateTime document
- if hasattr(param_value, "value"):
- rec[param_name] = param_value.value
- new_recs[rec[key]] = rec
- new_recs[rec[key]]['ref'] = ref
-
- return new_recs
-
-
-def get_host(session):
- """Get the host"""
- host_recs = session.xenapi.host.get_all()
- # We only have one host, so just return its entry
- return session.xenapi.host.get_record(host_recs[0])
-
-
-def get_vms(session):
- recs = session.xenapi.VM.get_all_records()
- if not recs:
- return None
- vms = change_keys(recs, key='name_label')
- return vms
-
-
-def get_srs(session):
- recs = session.xenapi.SR.get_all_records()
- if not recs:
- return None
- srs = change_keys(recs, key='name_label')
- return srs
-
-
-def main():
- module = AnsibleModule(
- supports_check_mode=True,
- )
-
- if not HAVE_XENAPI:
- module.fail_json(changed=False, msg="python xen api required for this module")
-
- obj = XenServerFacts()
- try:
- session = get_xenapi_session()
- except XenAPI.Failure as e:
- module.fail_json(msg='%s' % e)
-
- data = {
- 'xenserver_version': obj.version,
- 'xenserver_codename': obj.codename
- }
-
- xs_networks = get_networks(session)
- xs_pifs = get_pifs(session)
- xs_vlans = get_vlans(session)
- xs_vms = get_vms(session)
- xs_srs = get_srs(session)
-
- if xs_vlans:
- data['xs_vlans'] = xs_vlans
- if xs_pifs:
- data['xs_pifs'] = xs_pifs
- if xs_networks:
- data['xs_networks'] = xs_networks
-
- if xs_vms:
- data['xs_vms'] = xs_vms
-
- if xs_srs:
- data['xs_srs'] = xs_srs
-
- module.exit_json(ansible_facts=data)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/oneandone/oneandone_firewall_policy.py b/ansible_collections/community/general/plugins/modules/cloud/oneandone/oneandone_firewall_policy.py
deleted file mode 100644
index d46ce388..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/oneandone/oneandone_firewall_policy.py
+++ /dev/null
@@ -1,579 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see .
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-DOCUMENTATION = '''
----
-module: oneandone_firewall_policy
-short_description: Configure 1&1 firewall policy.
-description:
- - Create, remove, reconfigure, update firewall policies.
- This module has a dependency on 1and1 >= 1.0
-options:
- state:
- description:
- - Define a firewall policy state to create, remove, or update.
- required: false
- type: str
- default: 'present'
- choices: [ "present", "absent", "update" ]
- auth_token:
- description:
- - Authenticating API token provided by 1&1.
- type: str
- api_url:
- description:
- - Custom API URL. Overrides the
- ONEANDONE_API_URL environment variable.
- type: str
- required: false
- name:
- description:
- - Firewall policy name used with present state. Used as identifier (id or name) when used with absent state.
- maxLength=128
- type: str
- firewall_policy:
- description:
- - The identifier (id or name) of the firewall policy used with update state.
- type: str
- rules:
- description:
- - A list of rules that will be set for the firewall policy.
- Each rule must contain protocol parameter, in addition to three optional parameters
- (port_from, port_to, and source)
- type: list
- elements: dict
- add_server_ips:
- description:
- - A list of server identifiers (id or name) to be assigned to a firewall policy.
- Used in combination with update state.
- type: list
- elements: str
- required: false
- remove_server_ips:
- description:
- - A list of server IP ids to be unassigned from a firewall policy. Used in combination with update state.
- type: list
- elements: str
- required: false
- add_rules:
- description:
- - A list of rules that will be added to an existing firewall policy.
- It is syntax is the same as the one used for rules parameter. Used in combination with update state.
- type: list
- elements: dict
- required: false
- remove_rules:
- description:
- - A list of rule ids that will be removed from an existing firewall policy. Used in combination with update state.
- type: list
- elements: str
- required: false
- description:
- description:
- - Firewall policy description. maxLength=256
- type: str
- required: false
- wait:
- description:
- - wait for the instance to be in state 'running' before returning
- required: false
- default: "yes"
- type: bool
- wait_timeout:
- description:
- - how long before wait gives up, in seconds
- type: int
- default: 600
- wait_interval:
- description:
- - Defines the number of seconds to wait when using the _wait_for methods
- type: int
- default: 5
-
-requirements:
- - "1and1"
- - "python >= 2.6"
-
-author:
- - "Amel Ajdinovic (@aajdinov)"
- - "Ethan Devenport (@edevenport)"
-'''
-
-EXAMPLES = '''
-- name: Create a firewall policy
- community.general.oneandone_firewall_policy:
- auth_token: oneandone_private_api_key
- name: ansible-firewall-policy
- description: Testing creation of firewall policies with ansible
- rules:
- -
- protocol: TCP
- port_from: 80
- port_to: 80
- source: 0.0.0.0
- wait: true
- wait_timeout: 500
-
-- name: Destroy a firewall policy
- community.general.oneandone_firewall_policy:
- auth_token: oneandone_private_api_key
- state: absent
- name: ansible-firewall-policy
-
-- name: Update a firewall policy
- community.general.oneandone_firewall_policy:
- auth_token: oneandone_private_api_key
- state: update
- firewall_policy: ansible-firewall-policy
- name: ansible-firewall-policy-updated
- description: Testing creation of firewall policies with ansible - updated
-
-- name: Add server to a firewall policy
- community.general.oneandone_firewall_policy:
- auth_token: oneandone_private_api_key
- firewall_policy: ansible-firewall-policy-updated
- add_server_ips:
- - server_identifier (id or name)
- - server_identifier #2 (id or name)
- wait: true
- wait_timeout: 500
- state: update
-
-- name: Remove server from a firewall policy
- community.general.oneandone_firewall_policy:
- auth_token: oneandone_private_api_key
- firewall_policy: ansible-firewall-policy-updated
- remove_server_ips:
- - B2504878540DBC5F7634EB00A07C1EBD (server's IP id)
- wait: true
- wait_timeout: 500
- state: update
-
-- name: Add rules to a firewall policy
- community.general.oneandone_firewall_policy:
- auth_token: oneandone_private_api_key
- firewall_policy: ansible-firewall-policy-updated
- description: Adding rules to an existing firewall policy
- add_rules:
- -
- protocol: TCP
- port_from: 70
- port_to: 70
- source: 0.0.0.0
- -
- protocol: TCP
- port_from: 60
- port_to: 60
- source: 0.0.0.0
- wait: true
- wait_timeout: 500
- state: update
-
-- name: Remove rules from a firewall policy
- community.general.oneandone_firewall_policy:
- auth_token: oneandone_private_api_key
- firewall_policy: ansible-firewall-policy-updated
- remove_rules:
- - rule_id #1
- - rule_id #2
- - ...
- wait: true
- wait_timeout: 500
- state: update
-'''
-
-RETURN = '''
-firewall_policy:
- description: Information about the firewall policy that was processed
- type: dict
- sample: '{"id": "92B74394A397ECC3359825C1656D67A6", "name": "Default Policy"}'
- returned: always
-'''
-
-import os
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.oneandone import (
- get_firewall_policy,
- get_server,
- OneAndOneResources,
- wait_for_resource_creation_completion
-)
-
-HAS_ONEANDONE_SDK = True
-
-try:
- import oneandone.client
-except ImportError:
- HAS_ONEANDONE_SDK = False
-
-
-def _check_mode(module, result):
- if module.check_mode:
- module.exit_json(
- changed=result
- )
-
-
-def _add_server_ips(module, oneandone_conn, firewall_id, server_ids):
- """
- Assigns servers to a firewall policy.
- """
- try:
- attach_servers = []
-
- for _server_id in server_ids:
- server = get_server(oneandone_conn, _server_id, True)
- attach_server = oneandone.client.AttachServer(
- server_id=server['id'],
- server_ip_id=next(iter(server['ips'] or []), None)['id']
- )
- attach_servers.append(attach_server)
-
- if module.check_mode:
- if attach_servers:
- return True
- return False
-
- firewall_policy = oneandone_conn.attach_server_firewall_policy(
- firewall_id=firewall_id,
- server_ips=attach_servers)
- return firewall_policy
- except Exception as e:
- module.fail_json(msg=str(e))
-
-
-def _remove_firewall_server(module, oneandone_conn, firewall_id, server_ip_id):
- """
- Unassigns a server/IP from a firewall policy.
- """
- try:
- if module.check_mode:
- firewall_server = oneandone_conn.get_firewall_server(
- firewall_id=firewall_id,
- server_ip_id=server_ip_id)
- if firewall_server:
- return True
- return False
-
- firewall_policy = oneandone_conn.remove_firewall_server(
- firewall_id=firewall_id,
- server_ip_id=server_ip_id)
- return firewall_policy
- except Exception as e:
- module.fail_json(msg=str(e))
-
-
-def _add_firewall_rules(module, oneandone_conn, firewall_id, rules):
- """
- Adds new rules to a firewall policy.
- """
- try:
- firewall_rules = []
-
- for rule in rules:
- firewall_rule = oneandone.client.FirewallPolicyRule(
- protocol=rule['protocol'],
- port_from=rule['port_from'],
- port_to=rule['port_to'],
- source=rule['source'])
- firewall_rules.append(firewall_rule)
-
- if module.check_mode:
- firewall_policy_id = get_firewall_policy(oneandone_conn, firewall_id)
- if (firewall_rules and firewall_policy_id):
- return True
- return False
-
- firewall_policy = oneandone_conn.add_firewall_policy_rule(
- firewall_id=firewall_id,
- firewall_policy_rules=firewall_rules
- )
- return firewall_policy
- except Exception as e:
- module.fail_json(msg=str(e))
-
-
-def _remove_firewall_rule(module, oneandone_conn, firewall_id, rule_id):
- """
- Removes a rule from a firewall policy.
- """
- try:
- if module.check_mode:
- rule = oneandone_conn.get_firewall_policy_rule(
- firewall_id=firewall_id,
- rule_id=rule_id)
- if rule:
- return True
- return False
-
- firewall_policy = oneandone_conn.remove_firewall_rule(
- firewall_id=firewall_id,
- rule_id=rule_id
- )
- return firewall_policy
- except Exception as e:
- module.fail_json(msg=str(e))
-
-
-def update_firewall_policy(module, oneandone_conn):
- """
- Updates a firewall policy based on input arguments.
- Firewall rules and server ips can be added/removed to/from
- firewall policy. Firewall policy name and description can be
- updated as well.
-
- module : AnsibleModule object
- oneandone_conn: authenticated oneandone object
- """
- try:
- firewall_policy_id = module.params.get('firewall_policy')
- name = module.params.get('name')
- description = module.params.get('description')
- add_server_ips = module.params.get('add_server_ips')
- remove_server_ips = module.params.get('remove_server_ips')
- add_rules = module.params.get('add_rules')
- remove_rules = module.params.get('remove_rules')
-
- changed = False
-
- firewall_policy = get_firewall_policy(oneandone_conn, firewall_policy_id, True)
- if firewall_policy is None:
- _check_mode(module, False)
-
- if name or description:
- _check_mode(module, True)
- firewall_policy = oneandone_conn.modify_firewall(
- firewall_id=firewall_policy['id'],
- name=name,
- description=description)
- changed = True
-
- if add_server_ips:
- if module.check_mode:
- _check_mode(module, _add_server_ips(module,
- oneandone_conn,
- firewall_policy['id'],
- add_server_ips))
-
- firewall_policy = _add_server_ips(module, oneandone_conn, firewall_policy['id'], add_server_ips)
- changed = True
-
- if remove_server_ips:
- chk_changed = False
- for server_ip_id in remove_server_ips:
- if module.check_mode:
- chk_changed |= _remove_firewall_server(module,
- oneandone_conn,
- firewall_policy['id'],
- server_ip_id)
-
- _remove_firewall_server(module,
- oneandone_conn,
- firewall_policy['id'],
- server_ip_id)
- _check_mode(module, chk_changed)
- firewall_policy = get_firewall_policy(oneandone_conn, firewall_policy['id'], True)
- changed = True
-
- if add_rules:
- firewall_policy = _add_firewall_rules(module,
- oneandone_conn,
- firewall_policy['id'],
- add_rules)
- _check_mode(module, firewall_policy)
- changed = True
-
- if remove_rules:
- chk_changed = False
- for rule_id in remove_rules:
- if module.check_mode:
- chk_changed |= _remove_firewall_rule(module,
- oneandone_conn,
- firewall_policy['id'],
- rule_id)
-
- _remove_firewall_rule(module,
- oneandone_conn,
- firewall_policy['id'],
- rule_id)
- _check_mode(module, chk_changed)
- firewall_policy = get_firewall_policy(oneandone_conn, firewall_policy['id'], True)
- changed = True
-
- return (changed, firewall_policy)
- except Exception as e:
- module.fail_json(msg=str(e))
-
-
-def create_firewall_policy(module, oneandone_conn):
- """
- Create a new firewall policy.
-
- module : AnsibleModule object
- oneandone_conn: authenticated oneandone object
- """
- try:
- name = module.params.get('name')
- description = module.params.get('description')
- rules = module.params.get('rules')
- wait = module.params.get('wait')
- wait_timeout = module.params.get('wait_timeout')
- wait_interval = module.params.get('wait_interval')
-
- firewall_rules = []
-
- for rule in rules:
- firewall_rule = oneandone.client.FirewallPolicyRule(
- protocol=rule['protocol'],
- port_from=rule['port_from'],
- port_to=rule['port_to'],
- source=rule['source'])
- firewall_rules.append(firewall_rule)
-
- firewall_policy_obj = oneandone.client.FirewallPolicy(
- name=name,
- description=description
- )
-
- _check_mode(module, True)
- firewall_policy = oneandone_conn.create_firewall_policy(
- firewall_policy=firewall_policy_obj,
- firewall_policy_rules=firewall_rules
- )
-
- if wait:
- wait_for_resource_creation_completion(
- oneandone_conn,
- OneAndOneResources.firewall_policy,
- firewall_policy['id'],
- wait_timeout,
- wait_interval)
-
- firewall_policy = get_firewall_policy(oneandone_conn, firewall_policy['id'], True) # refresh
- changed = True if firewall_policy else False
-
- _check_mode(module, False)
-
- return (changed, firewall_policy)
- except Exception as e:
- module.fail_json(msg=str(e))
-
-
-def remove_firewall_policy(module, oneandone_conn):
- """
- Removes a firewall policy.
-
- module : AnsibleModule object
- oneandone_conn: authenticated oneandone object
- """
- try:
- fp_id = module.params.get('name')
- firewall_policy_id = get_firewall_policy(oneandone_conn, fp_id)
- if module.check_mode:
- if firewall_policy_id is None:
- _check_mode(module, False)
- _check_mode(module, True)
- firewall_policy = oneandone_conn.delete_firewall(firewall_policy_id)
-
- changed = True if firewall_policy else False
-
- return (changed, {
- 'id': firewall_policy['id'],
- 'name': firewall_policy['name']
- })
- except Exception as e:
- module.fail_json(msg=str(e))
-
-
-def main():
- module = AnsibleModule(
- argument_spec=dict(
- auth_token=dict(
- type='str', no_log=True,
- default=os.environ.get('ONEANDONE_AUTH_TOKEN')),
- api_url=dict(
- type='str',
- default=os.environ.get('ONEANDONE_API_URL')),
- name=dict(type='str'),
- firewall_policy=dict(type='str'),
- description=dict(type='str'),
- rules=dict(type='list', elements="dict", default=[]),
- add_server_ips=dict(type='list', elements="str", default=[]),
- remove_server_ips=dict(type='list', elements="str", default=[]),
- add_rules=dict(type='list', elements="dict", default=[]),
- remove_rules=dict(type='list', elements="str", default=[]),
- wait=dict(type='bool', default=True),
- wait_timeout=dict(type='int', default=600),
- wait_interval=dict(type='int', default=5),
- state=dict(type='str', default='present', choices=['present', 'absent', 'update']),
- ),
- supports_check_mode=True
- )
-
- if not HAS_ONEANDONE_SDK:
- module.fail_json(msg='1and1 required for this module')
-
- if not module.params.get('auth_token'):
- module.fail_json(
- msg='The "auth_token" parameter or ' +
- 'ONEANDONE_AUTH_TOKEN environment variable is required.')
-
- if not module.params.get('api_url'):
- oneandone_conn = oneandone.client.OneAndOneService(
- api_token=module.params.get('auth_token'))
- else:
- oneandone_conn = oneandone.client.OneAndOneService(
- api_token=module.params.get('auth_token'), api_url=module.params.get('api_url'))
-
- state = module.params.get('state')
-
- if state == 'absent':
- if not module.params.get('name'):
- module.fail_json(
- msg="'name' parameter is required to delete a firewall policy.")
- try:
- (changed, firewall_policy) = remove_firewall_policy(module, oneandone_conn)
- except Exception as e:
- module.fail_json(msg=str(e))
-
- elif state == 'update':
- if not module.params.get('firewall_policy'):
- module.fail_json(
- msg="'firewall_policy' parameter is required to update a firewall policy.")
- try:
- (changed, firewall_policy) = update_firewall_policy(module, oneandone_conn)
- except Exception as e:
- module.fail_json(msg=str(e))
-
- elif state == 'present':
- for param in ('name', 'rules'):
- if not module.params.get(param):
- module.fail_json(
- msg="%s parameter is required for new firewall policies." % param)
- try:
- (changed, firewall_policy) = create_firewall_policy(module, oneandone_conn)
- except Exception as e:
- module.fail_json(msg=str(e))
-
- module.exit_json(changed=changed, firewall_policy=firewall_policy)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/oneandone/oneandone_load_balancer.py b/ansible_collections/community/general/plugins/modules/cloud/oneandone/oneandone_load_balancer.py
deleted file mode 100644
index 5f541a87..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/oneandone/oneandone_load_balancer.py
+++ /dev/null
@@ -1,683 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see .
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-DOCUMENTATION = '''
----
-module: oneandone_load_balancer
-short_description: Configure 1&1 load balancer.
-description:
- - Create, remove, update load balancers.
- This module has a dependency on 1and1 >= 1.0
-options:
- state:
- description:
- - Define a load balancer state to create, remove, or update.
- type: str
- required: false
- default: 'present'
- choices: [ "present", "absent", "update" ]
- auth_token:
- description:
- - Authenticating API token provided by 1&1.
- type: str
- load_balancer:
- description:
- - The identifier (id or name) of the load balancer used with update state.
- type: str
- api_url:
- description:
- - Custom API URL. Overrides the
- ONEANDONE_API_URL environment variable.
- type: str
- required: false
- name:
- description:
- - Load balancer name used with present state. Used as identifier (id or name) when used with absent state.
- maxLength=128
- type: str
- health_check_test:
- description:
- - Type of the health check. At the moment, HTTP is not allowed.
- type: str
- choices: [ "NONE", "TCP", "HTTP", "ICMP" ]
- health_check_interval:
- description:
- - Health check period in seconds. minimum=5, maximum=300, multipleOf=1
- type: str
- health_check_path:
- description:
- - Url to call for checking. Required for HTTP health check. maxLength=1000
- type: str
- required: false
- health_check_parse:
- description:
- - Regular expression to check. Required for HTTP health check. maxLength=64
- type: str
- required: false
- persistence:
- description:
- - Persistence.
- type: bool
- persistence_time:
- description:
- - Persistence time in seconds. Required if persistence is enabled. minimum=30, maximum=1200, multipleOf=1
- type: str
- method:
- description:
- - Balancing procedure.
- type: str
- choices: [ "ROUND_ROBIN", "LEAST_CONNECTIONS" ]
- datacenter:
- description:
- - ID or country code of the datacenter where the load balancer will be created.
- - If not specified, it defaults to I(US).
- type: str
- choices: [ "US", "ES", "DE", "GB" ]
- required: false
- rules:
- description:
- - A list of rule objects that will be set for the load balancer. Each rule must contain protocol,
- port_balancer, and port_server parameters, in addition to source parameter, which is optional.
- type: list
- elements: dict
- description:
- description:
- - Description of the load balancer. maxLength=256
- type: str
- required: false
- add_server_ips:
- description:
- - A list of server identifiers (id or name) to be assigned to a load balancer.
- Used in combination with update state.
- type: list
- elements: str
- required: false
- remove_server_ips:
- description:
- - A list of server IP ids to be unassigned from a load balancer. Used in combination with update state.
- type: list
- elements: str
- required: false
- add_rules:
- description:
- - A list of rules that will be added to an existing load balancer.
- It is syntax is the same as the one used for rules parameter. Used in combination with update state.
- type: list
- elements: dict
- required: false
- remove_rules:
- description:
- - A list of rule ids that will be removed from an existing load balancer. Used in combination with update state.
- type: list
- elements: str
- required: false
- wait:
- description:
- - wait for the instance to be in state 'running' before returning
- required: false
- default: "yes"
- type: bool
- wait_timeout:
- description:
- - how long before wait gives up, in seconds
- type: int
- default: 600
- wait_interval:
- description:
- - Defines the number of seconds to wait when using the _wait_for methods
- type: int
- default: 5
-
-requirements:
- - "1and1"
- - "python >= 2.6"
-
-author:
- - Amel Ajdinovic (@aajdinov)
- - Ethan Devenport (@edevenport)
-'''
-
-EXAMPLES = '''
-- name: Create a load balancer
- community.general.oneandone_load_balancer:
- auth_token: oneandone_private_api_key
- name: ansible load balancer
- description: Testing creation of load balancer with ansible
- health_check_test: TCP
- health_check_interval: 40
- persistence: true
- persistence_time: 1200
- method: ROUND_ROBIN
- datacenter: US
- rules:
- -
- protocol: TCP
- port_balancer: 80
- port_server: 80
- source: 0.0.0.0
- wait: true
- wait_timeout: 500
-
-- name: Destroy a load balancer
- community.general.oneandone_load_balancer:
- auth_token: oneandone_private_api_key
- name: ansible load balancer
- wait: true
- wait_timeout: 500
- state: absent
-
-- name: Update a load balancer
- community.general.oneandone_load_balancer:
- auth_token: oneandone_private_api_key
- load_balancer: ansible load balancer
- name: ansible load balancer updated
- description: Testing the update of a load balancer with ansible
- wait: true
- wait_timeout: 500
- state: update
-
-- name: Add server to a load balancer
- community.general.oneandone_load_balancer:
- auth_token: oneandone_private_api_key
- load_balancer: ansible load balancer updated
- description: Adding server to a load balancer with ansible
- add_server_ips:
- - server identifier (id or name)
- wait: true
- wait_timeout: 500
- state: update
-
-- name: Remove server from a load balancer
- community.general.oneandone_load_balancer:
- auth_token: oneandone_private_api_key
- load_balancer: ansible load balancer updated
- description: Removing server from a load balancer with ansible
- remove_server_ips:
- - B2504878540DBC5F7634EB00A07C1EBD (server's ip id)
- wait: true
- wait_timeout: 500
- state: update
-
-- name: Add rules to a load balancer
- community.general.oneandone_load_balancer:
- auth_token: oneandone_private_api_key
- load_balancer: ansible load balancer updated
- description: Adding rules to a load balancer with ansible
- add_rules:
- -
- protocol: TCP
- port_balancer: 70
- port_server: 70
- source: 0.0.0.0
- -
- protocol: TCP
- port_balancer: 60
- port_server: 60
- source: 0.0.0.0
- wait: true
- wait_timeout: 500
- state: update
-
-- name: Remove rules from a load balancer
- community.general.oneandone_load_balancer:
- auth_token: oneandone_private_api_key
- load_balancer: ansible load balancer updated
- description: Adding rules to a load balancer with ansible
- remove_rules:
- - rule_id #1
- - rule_id #2
- - ...
- wait: true
- wait_timeout: 500
- state: update
-'''
-
-RETURN = '''
-load_balancer:
- description: Information about the load balancer that was processed
- type: dict
- sample: '{"id": "92B74394A397ECC3359825C1656D67A6", "name": "Default Balancer"}'
- returned: always
-'''
-
-import os
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.oneandone import (
- get_load_balancer,
- get_server,
- get_datacenter,
- OneAndOneResources,
- wait_for_resource_creation_completion
-)
-
-HAS_ONEANDONE_SDK = True
-
-try:
- import oneandone.client
-except ImportError:
- HAS_ONEANDONE_SDK = False
-
-DATACENTERS = ['US', 'ES', 'DE', 'GB']
-HEALTH_CHECK_TESTS = ['NONE', 'TCP', 'HTTP', 'ICMP']
-METHODS = ['ROUND_ROBIN', 'LEAST_CONNECTIONS']
-
-
-def _check_mode(module, result):
- if module.check_mode:
- module.exit_json(
- changed=result
- )
-
-
-def _add_server_ips(module, oneandone_conn, load_balancer_id, server_ids):
- """
- Assigns servers to a load balancer.
- """
- try:
- attach_servers = []
-
- for server_id in server_ids:
- server = get_server(oneandone_conn, server_id, True)
- attach_server = oneandone.client.AttachServer(
- server_id=server['id'],
- server_ip_id=next(iter(server['ips'] or []), None)['id']
- )
- attach_servers.append(attach_server)
-
- if module.check_mode:
- if attach_servers:
- return True
- return False
-
- load_balancer = oneandone_conn.attach_load_balancer_server(
- load_balancer_id=load_balancer_id,
- server_ips=attach_servers)
- return load_balancer
- except Exception as ex:
- module.fail_json(msg=str(ex))
-
-
-def _remove_load_balancer_server(module, oneandone_conn, load_balancer_id, server_ip_id):
- """
- Unassigns a server/IP from a load balancer.
- """
- try:
- if module.check_mode:
- lb_server = oneandone_conn.get_load_balancer_server(
- load_balancer_id=load_balancer_id,
- server_ip_id=server_ip_id)
- if lb_server:
- return True
- return False
-
- load_balancer = oneandone_conn.remove_load_balancer_server(
- load_balancer_id=load_balancer_id,
- server_ip_id=server_ip_id)
- return load_balancer
- except Exception as ex:
- module.fail_json(msg=str(ex))
-
-
-def _add_load_balancer_rules(module, oneandone_conn, load_balancer_id, rules):
- """
- Adds new rules to a load_balancer.
- """
- try:
- load_balancer_rules = []
-
- for rule in rules:
- load_balancer_rule = oneandone.client.LoadBalancerRule(
- protocol=rule['protocol'],
- port_balancer=rule['port_balancer'],
- port_server=rule['port_server'],
- source=rule['source'])
- load_balancer_rules.append(load_balancer_rule)
-
- if module.check_mode:
- lb_id = get_load_balancer(oneandone_conn, load_balancer_id)
- if (load_balancer_rules and lb_id):
- return True
- return False
-
- load_balancer = oneandone_conn.add_load_balancer_rule(
- load_balancer_id=load_balancer_id,
- load_balancer_rules=load_balancer_rules
- )
-
- return load_balancer
- except Exception as ex:
- module.fail_json(msg=str(ex))
-
-
-def _remove_load_balancer_rule(module, oneandone_conn, load_balancer_id, rule_id):
- """
- Removes a rule from a load_balancer.
- """
- try:
- if module.check_mode:
- rule = oneandone_conn.get_load_balancer_rule(
- load_balancer_id=load_balancer_id,
- rule_id=rule_id)
- if rule:
- return True
- return False
-
- load_balancer = oneandone_conn.remove_load_balancer_rule(
- load_balancer_id=load_balancer_id,
- rule_id=rule_id
- )
- return load_balancer
- except Exception as ex:
- module.fail_json(msg=str(ex))
-
-
-def update_load_balancer(module, oneandone_conn):
- """
- Updates a load_balancer based on input arguments.
- Load balancer rules and server ips can be added/removed to/from
- load balancer. Load balancer name, description, health_check_test,
- health_check_interval, persistence, persistence_time, and method
- can be updated as well.
-
- module : AnsibleModule object
- oneandone_conn: authenticated oneandone object
- """
- load_balancer_id = module.params.get('load_balancer')
- name = module.params.get('name')
- description = module.params.get('description')
- health_check_test = module.params.get('health_check_test')
- health_check_interval = module.params.get('health_check_interval')
- health_check_path = module.params.get('health_check_path')
- health_check_parse = module.params.get('health_check_parse')
- persistence = module.params.get('persistence')
- persistence_time = module.params.get('persistence_time')
- method = module.params.get('method')
- add_server_ips = module.params.get('add_server_ips')
- remove_server_ips = module.params.get('remove_server_ips')
- add_rules = module.params.get('add_rules')
- remove_rules = module.params.get('remove_rules')
-
- changed = False
-
- load_balancer = get_load_balancer(oneandone_conn, load_balancer_id, True)
- if load_balancer is None:
- _check_mode(module, False)
-
- if (name or description or health_check_test or health_check_interval or health_check_path or
- health_check_parse or persistence or persistence_time or method):
- _check_mode(module, True)
- load_balancer = oneandone_conn.modify_load_balancer(
- load_balancer_id=load_balancer['id'],
- name=name,
- description=description,
- health_check_test=health_check_test,
- health_check_interval=health_check_interval,
- health_check_path=health_check_path,
- health_check_parse=health_check_parse,
- persistence=persistence,
- persistence_time=persistence_time,
- method=method)
- changed = True
-
- if add_server_ips:
- if module.check_mode:
- _check_mode(module, _add_server_ips(module,
- oneandone_conn,
- load_balancer['id'],
- add_server_ips))
-
- load_balancer = _add_server_ips(module, oneandone_conn, load_balancer['id'], add_server_ips)
- changed = True
-
- if remove_server_ips:
- chk_changed = False
- for server_ip_id in remove_server_ips:
- if module.check_mode:
- chk_changed |= _remove_load_balancer_server(module,
- oneandone_conn,
- load_balancer['id'],
- server_ip_id)
-
- _remove_load_balancer_server(module,
- oneandone_conn,
- load_balancer['id'],
- server_ip_id)
- _check_mode(module, chk_changed)
- load_balancer = get_load_balancer(oneandone_conn, load_balancer['id'], True)
- changed = True
-
- if add_rules:
- load_balancer = _add_load_balancer_rules(module,
- oneandone_conn,
- load_balancer['id'],
- add_rules)
- _check_mode(module, load_balancer)
- changed = True
-
- if remove_rules:
- chk_changed = False
- for rule_id in remove_rules:
- if module.check_mode:
- chk_changed |= _remove_load_balancer_rule(module,
- oneandone_conn,
- load_balancer['id'],
- rule_id)
-
- _remove_load_balancer_rule(module,
- oneandone_conn,
- load_balancer['id'],
- rule_id)
- _check_mode(module, chk_changed)
- load_balancer = get_load_balancer(oneandone_conn, load_balancer['id'], True)
- changed = True
-
- try:
- return (changed, load_balancer)
- except Exception as ex:
- module.fail_json(msg=str(ex))
-
-
-def create_load_balancer(module, oneandone_conn):
- """
- Create a new load_balancer.
-
- module : AnsibleModule object
- oneandone_conn: authenticated oneandone object
- """
- try:
- name = module.params.get('name')
- description = module.params.get('description')
- health_check_test = module.params.get('health_check_test')
- health_check_interval = module.params.get('health_check_interval')
- health_check_path = module.params.get('health_check_path')
- health_check_parse = module.params.get('health_check_parse')
- persistence = module.params.get('persistence')
- persistence_time = module.params.get('persistence_time')
- method = module.params.get('method')
- datacenter = module.params.get('datacenter')
- rules = module.params.get('rules')
- wait = module.params.get('wait')
- wait_timeout = module.params.get('wait_timeout')
- wait_interval = module.params.get('wait_interval')
-
- load_balancer_rules = []
-
- datacenter_id = None
- if datacenter is not None:
- datacenter_id = get_datacenter(oneandone_conn, datacenter)
- if datacenter_id is None:
- module.fail_json(
- msg='datacenter %s not found.' % datacenter)
-
- for rule in rules:
- load_balancer_rule = oneandone.client.LoadBalancerRule(
- protocol=rule['protocol'],
- port_balancer=rule['port_balancer'],
- port_server=rule['port_server'],
- source=rule['source'])
- load_balancer_rules.append(load_balancer_rule)
-
- _check_mode(module, True)
- load_balancer_obj = oneandone.client.LoadBalancer(
- health_check_path=health_check_path,
- health_check_parse=health_check_parse,
- name=name,
- description=description,
- health_check_test=health_check_test,
- health_check_interval=health_check_interval,
- persistence=persistence,
- persistence_time=persistence_time,
- method=method,
- datacenter_id=datacenter_id
- )
-
- load_balancer = oneandone_conn.create_load_balancer(
- load_balancer=load_balancer_obj,
- load_balancer_rules=load_balancer_rules
- )
-
- if wait:
- wait_for_resource_creation_completion(oneandone_conn,
- OneAndOneResources.load_balancer,
- load_balancer['id'],
- wait_timeout,
- wait_interval)
-
- load_balancer = get_load_balancer(oneandone_conn, load_balancer['id'], True) # refresh
- changed = True if load_balancer else False
-
- _check_mode(module, False)
-
- return (changed, load_balancer)
- except Exception as ex:
- module.fail_json(msg=str(ex))
-
-
-def remove_load_balancer(module, oneandone_conn):
- """
- Removes a load_balancer.
-
- module : AnsibleModule object
- oneandone_conn: authenticated oneandone object
- """
- try:
- lb_id = module.params.get('name')
- load_balancer_id = get_load_balancer(oneandone_conn, lb_id)
- if module.check_mode:
- if load_balancer_id is None:
- _check_mode(module, False)
- _check_mode(module, True)
- load_balancer = oneandone_conn.delete_load_balancer(load_balancer_id)
-
- changed = True if load_balancer else False
-
- return (changed, {
- 'id': load_balancer['id'],
- 'name': load_balancer['name']
- })
- except Exception as ex:
- module.fail_json(msg=str(ex))
-
-
-def main():
- module = AnsibleModule(
- argument_spec=dict(
- auth_token=dict(
- type='str', no_log=True,
- default=os.environ.get('ONEANDONE_AUTH_TOKEN')),
- api_url=dict(
- type='str',
- default=os.environ.get('ONEANDONE_API_URL')),
- load_balancer=dict(type='str'),
- name=dict(type='str'),
- description=dict(type='str'),
- health_check_test=dict(
- choices=HEALTH_CHECK_TESTS),
- health_check_interval=dict(type='str'),
- health_check_path=dict(type='str'),
- health_check_parse=dict(type='str'),
- persistence=dict(type='bool'),
- persistence_time=dict(type='str'),
- method=dict(
- choices=METHODS),
- datacenter=dict(
- choices=DATACENTERS),
- rules=dict(type='list', elements="dict", default=[]),
- add_server_ips=dict(type='list', elements="str", default=[]),
- remove_server_ips=dict(type='list', elements="str", default=[]),
- add_rules=dict(type='list', elements="dict", default=[]),
- remove_rules=dict(type='list', elements="str", default=[]),
- wait=dict(type='bool', default=True),
- wait_timeout=dict(type='int', default=600),
- wait_interval=dict(type='int', default=5),
- state=dict(type='str', default='present', choices=['present', 'absent', 'update']),
- ),
- supports_check_mode=True
- )
-
- if not HAS_ONEANDONE_SDK:
- module.fail_json(msg='1and1 required for this module')
-
- if not module.params.get('auth_token'):
- module.fail_json(
- msg='auth_token parameter is required.')
-
- if not module.params.get('api_url'):
- oneandone_conn = oneandone.client.OneAndOneService(
- api_token=module.params.get('auth_token'))
- else:
- oneandone_conn = oneandone.client.OneAndOneService(
- api_token=module.params.get('auth_token'), api_url=module.params.get('api_url'))
-
- state = module.params.get('state')
-
- if state == 'absent':
- if not module.params.get('name'):
- module.fail_json(
- msg="'name' parameter is required for deleting a load balancer.")
- try:
- (changed, load_balancer) = remove_load_balancer(module, oneandone_conn)
- except Exception as ex:
- module.fail_json(msg=str(ex))
- elif state == 'update':
- if not module.params.get('load_balancer'):
- module.fail_json(
- msg="'load_balancer' parameter is required for updating a load balancer.")
- try:
- (changed, load_balancer) = update_load_balancer(module, oneandone_conn)
- except Exception as ex:
- module.fail_json(msg=str(ex))
-
- elif state == 'present':
- for param in ('name', 'health_check_test', 'health_check_interval', 'persistence',
- 'persistence_time', 'method', 'rules'):
- if not module.params.get(param):
- module.fail_json(
- msg="%s parameter is required for new load balancers." % param)
- try:
- (changed, load_balancer) = create_load_balancer(module, oneandone_conn)
- except Exception as ex:
- module.fail_json(msg=str(ex))
-
- module.exit_json(changed=changed, load_balancer=load_balancer)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/oneandone/oneandone_monitoring_policy.py b/ansible_collections/community/general/plugins/modules/cloud/oneandone/oneandone_monitoring_policy.py
deleted file mode 100644
index 28dd0d41..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/oneandone/oneandone_monitoring_policy.py
+++ /dev/null
@@ -1,1038 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see .
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-DOCUMENTATION = '''
----
-module: oneandone_monitoring_policy
-short_description: Configure 1&1 monitoring policy.
-description:
- - Create, remove, update monitoring policies
- (and add/remove ports, processes, and servers).
- This module has a dependency on 1and1 >= 1.0
-options:
- state:
- description:
- - Define a monitoring policy's state to create, remove, update.
- type: str
- required: false
- default: present
- choices: [ "present", "absent", "update" ]
- auth_token:
- description:
- - Authenticating API token provided by 1&1.
- type: str
- api_url:
- description:
- - Custom API URL. Overrides the
- ONEANDONE_API_URL environment variable.
- type: str
- required: false
- name:
- description:
- - Monitoring policy name used with present state. Used as identifier (id or name) when used with absent state. maxLength=128
- type: str
- monitoring_policy:
- description:
- - The identifier (id or name) of the monitoring policy used with update state.
- type: str
- agent:
- description:
- - Set true for using agent.
- type: str
- email:
- description:
- - User's email. maxLength=128
- type: str
- description:
- description:
- - Monitoring policy description. maxLength=256
- type: str
- required: false
- thresholds:
- description:
- - Monitoring policy thresholds. Each of the suboptions have warning and critical,
- which both have alert and value suboptions. Warning is used to set limits for
- warning alerts, critical is used to set critical alerts. alert enables alert,
- and value is used to advise when the value is exceeded.
- type: list
- elements: dict
- suboptions:
- cpu:
- description:
- - Consumption limits of CPU.
- required: true
- ram:
- description:
- - Consumption limits of RAM.
- required: true
- disk:
- description:
- - Consumption limits of hard disk.
- required: true
- internal_ping:
- description:
- - Response limits of internal ping.
- required: true
- transfer:
- description:
- - Consumption limits for transfer.
- required: true
- ports:
- description:
- - Array of ports that will be monitoring.
- type: list
- elements: dict
- suboptions:
- protocol:
- description:
- - Internet protocol.
- choices: [ "TCP", "UDP" ]
- required: true
- port:
- description:
- - Port number. minimum=1, maximum=65535
- required: true
- alert_if:
- description:
- - Case of alert.
- choices: [ "RESPONDING", "NOT_RESPONDING" ]
- required: true
- email_notification:
- description:
- - Set true for sending e-mail notifications.
- required: true
- processes:
- description:
- - Array of processes that will be monitoring.
- type: list
- elements: dict
- suboptions:
- process:
- description:
- - Name of the process. maxLength=50
- required: true
- alert_if:
- description:
- - Case of alert.
- choices: [ "RUNNING", "NOT_RUNNING" ]
- required: true
- add_ports:
- description:
- - Ports to add to the monitoring policy.
- type: list
- elements: dict
- required: false
- add_processes:
- description:
- - Processes to add to the monitoring policy.
- type: list
- elements: dict
- required: false
- add_servers:
- description:
- - Servers to add to the monitoring policy.
- type: list
- elements: str
- required: false
- remove_ports:
- description:
- - Ports to remove from the monitoring policy.
- type: list
- elements: str
- required: false
- remove_processes:
- description:
- - Processes to remove from the monitoring policy.
- type: list
- elements: str
- required: false
- remove_servers:
- description:
- - Servers to remove from the monitoring policy.
- type: list
- elements: str
- required: false
- update_ports:
- description:
- - Ports to be updated on the monitoring policy.
- type: list
- elements: dict
- required: false
- update_processes:
- description:
- - Processes to be updated on the monitoring policy.
- type: list
- elements: dict
- required: false
- wait:
- description:
- - wait for the instance to be in state 'running' before returning
- required: false
- default: "yes"
- type: bool
- wait_timeout:
- description:
- - how long before wait gives up, in seconds
- type: int
- default: 600
- wait_interval:
- description:
- - Defines the number of seconds to wait when using the _wait_for methods
- type: int
- default: 5
-
-requirements:
- - "1and1"
- - "python >= 2.6"
-
-author:
- - "Amel Ajdinovic (@aajdinov)"
- - "Ethan Devenport (@edevenport)"
-'''
-
-EXAMPLES = '''
-- name: Create a monitoring policy
- community.general.oneandone_monitoring_policy:
- auth_token: oneandone_private_api_key
- name: ansible monitoring policy
- description: Testing creation of a monitoring policy with ansible
- email: your@emailaddress.com
- agent: true
- thresholds:
- -
- cpu:
- warning:
- value: 80
- alert: false
- critical:
- value: 92
- alert: false
- -
- ram:
- warning:
- value: 80
- alert: false
- critical:
- value: 90
- alert: false
- -
- disk:
- warning:
- value: 80
- alert: false
- critical:
- value: 90
- alert: false
- -
- internal_ping:
- warning:
- value: 50
- alert: false
- critical:
- value: 100
- alert: false
- -
- transfer:
- warning:
- value: 1000
- alert: false
- critical:
- value: 2000
- alert: false
- ports:
- -
- protocol: TCP
- port: 22
- alert_if: RESPONDING
- email_notification: false
- processes:
- -
- process: test
- alert_if: NOT_RUNNING
- email_notification: false
- wait: true
-
-- name: Destroy a monitoring policy
- community.general.oneandone_monitoring_policy:
- auth_token: oneandone_private_api_key
- state: absent
- name: ansible monitoring policy
-
-- name: Update a monitoring policy
- community.general.oneandone_monitoring_policy:
- auth_token: oneandone_private_api_key
- monitoring_policy: ansible monitoring policy
- name: ansible monitoring policy updated
- description: Testing creation of a monitoring policy with ansible updated
- email: another@emailaddress.com
- thresholds:
- -
- cpu:
- warning:
- value: 70
- alert: false
- critical:
- value: 90
- alert: false
- -
- ram:
- warning:
- value: 70
- alert: false
- critical:
- value: 80
- alert: false
- -
- disk:
- warning:
- value: 70
- alert: false
- critical:
- value: 80
- alert: false
- -
- internal_ping:
- warning:
- value: 60
- alert: false
- critical:
- value: 90
- alert: false
- -
- transfer:
- warning:
- value: 900
- alert: false
- critical:
- value: 1900
- alert: false
- wait: true
- state: update
-
-- name: Add a port to a monitoring policy
- community.general.oneandone_monitoring_policy:
- auth_token: oneandone_private_api_key
- monitoring_policy: ansible monitoring policy updated
- add_ports:
- -
- protocol: TCP
- port: 33
- alert_if: RESPONDING
- email_notification: false
- wait: true
- state: update
-
-- name: Update existing ports of a monitoring policy
- community.general.oneandone_monitoring_policy:
- auth_token: oneandone_private_api_key
- monitoring_policy: ansible monitoring policy updated
- update_ports:
- -
- id: existing_port_id
- protocol: TCP
- port: 34
- alert_if: RESPONDING
- email_notification: false
- -
- id: existing_port_id
- protocol: TCP
- port: 23
- alert_if: RESPONDING
- email_notification: false
- wait: true
- state: update
-
-- name: Remove a port from a monitoring policy
- community.general.oneandone_monitoring_policy:
- auth_token: oneandone_private_api_key
- monitoring_policy: ansible monitoring policy updated
- remove_ports:
- - port_id
- state: update
-
-- name: Add a process to a monitoring policy
- community.general.oneandone_monitoring_policy:
- auth_token: oneandone_private_api_key
- monitoring_policy: ansible monitoring policy updated
- add_processes:
- -
- process: test_2
- alert_if: NOT_RUNNING
- email_notification: false
- wait: true
- state: update
-
-- name: Update existing processes of a monitoring policy
- community.general.oneandone_monitoring_policy:
- auth_token: oneandone_private_api_key
- monitoring_policy: ansible monitoring policy updated
- update_processes:
- -
- id: process_id
- process: test_1
- alert_if: NOT_RUNNING
- email_notification: false
- -
- id: process_id
- process: test_3
- alert_if: NOT_RUNNING
- email_notification: false
- wait: true
- state: update
-
-- name: Remove a process from a monitoring policy
- community.general.oneandone_monitoring_policy:
- auth_token: oneandone_private_api_key
- monitoring_policy: ansible monitoring policy updated
- remove_processes:
- - process_id
- wait: true
- state: update
-
-- name: Add server to a monitoring policy
- community.general.oneandone_monitoring_policy:
- auth_token: oneandone_private_api_key
- monitoring_policy: ansible monitoring policy updated
- add_servers:
- - server id or name
- wait: true
- state: update
-
-- name: Remove server from a monitoring policy
- community.general.oneandone_monitoring_policy:
- auth_token: oneandone_private_api_key
- monitoring_policy: ansible monitoring policy updated
- remove_servers:
- - server01
- wait: true
- state: update
-'''
-
-RETURN = '''
-monitoring_policy:
- description: Information about the monitoring policy that was processed
- type: dict
- sample: '{"id": "92B74394A397ECC3359825C1656D67A6", "name": "Default Policy"}'
- returned: always
-'''
-
-import os
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.oneandone import (
- get_monitoring_policy,
- get_server,
- OneAndOneResources,
- wait_for_resource_creation_completion
-)
-
-HAS_ONEANDONE_SDK = True
-
-try:
- import oneandone.client
-except ImportError:
- HAS_ONEANDONE_SDK = False
-
-
-def _check_mode(module, result):
- if module.check_mode:
- module.exit_json(
- changed=result
- )
-
-
-def _add_ports(module, oneandone_conn, monitoring_policy_id, ports):
- """
- Adds new ports to a monitoring policy.
- """
- try:
- monitoring_policy_ports = []
-
- for _port in ports:
- monitoring_policy_port = oneandone.client.Port(
- protocol=_port['protocol'],
- port=_port['port'],
- alert_if=_port['alert_if'],
- email_notification=_port['email_notification']
- )
- monitoring_policy_ports.append(monitoring_policy_port)
-
- if module.check_mode:
- if monitoring_policy_ports:
- return True
- return False
-
- monitoring_policy = oneandone_conn.add_port(
- monitoring_policy_id=monitoring_policy_id,
- ports=monitoring_policy_ports)
- return monitoring_policy
- except Exception as ex:
- module.fail_json(msg=str(ex))
-
-
-def _delete_monitoring_policy_port(module, oneandone_conn, monitoring_policy_id, port_id):
- """
- Removes a port from a monitoring policy.
- """
- try:
- if module.check_mode:
- monitoring_policy = oneandone_conn.delete_monitoring_policy_port(
- monitoring_policy_id=monitoring_policy_id,
- port_id=port_id)
- if monitoring_policy:
- return True
- return False
-
- monitoring_policy = oneandone_conn.delete_monitoring_policy_port(
- monitoring_policy_id=monitoring_policy_id,
- port_id=port_id)
- return monitoring_policy
- except Exception as ex:
- module.fail_json(msg=str(ex))
-
-
-def _modify_port(module, oneandone_conn, monitoring_policy_id, port_id, port):
- """
- Modifies a monitoring policy port.
- """
- try:
- if module.check_mode:
- cm_port = oneandone_conn.get_monitoring_policy_port(
- monitoring_policy_id=monitoring_policy_id,
- port_id=port_id)
- if cm_port:
- return True
- return False
-
- monitoring_policy_port = oneandone.client.Port(
- protocol=port['protocol'],
- port=port['port'],
- alert_if=port['alert_if'],
- email_notification=port['email_notification']
- )
-
- monitoring_policy = oneandone_conn.modify_port(
- monitoring_policy_id=monitoring_policy_id,
- port_id=port_id,
- port=monitoring_policy_port)
- return monitoring_policy
- except Exception as ex:
- module.fail_json(msg=str(ex))
-
-
-def _add_processes(module, oneandone_conn, monitoring_policy_id, processes):
- """
- Adds new processes to a monitoring policy.
- """
- try:
- monitoring_policy_processes = []
-
- for _process in processes:
- monitoring_policy_process = oneandone.client.Process(
- process=_process['process'],
- alert_if=_process['alert_if'],
- email_notification=_process['email_notification']
- )
- monitoring_policy_processes.append(monitoring_policy_process)
-
- if module.check_mode:
- mp_id = get_monitoring_policy(oneandone_conn, monitoring_policy_id)
- if (monitoring_policy_processes and mp_id):
- return True
- return False
-
- monitoring_policy = oneandone_conn.add_process(
- monitoring_policy_id=monitoring_policy_id,
- processes=monitoring_policy_processes)
- return monitoring_policy
- except Exception as ex:
- module.fail_json(msg=str(ex))
-
-
-def _delete_monitoring_policy_process(module, oneandone_conn, monitoring_policy_id, process_id):
- """
- Removes a process from a monitoring policy.
- """
- try:
- if module.check_mode:
- process = oneandone_conn.get_monitoring_policy_process(
- monitoring_policy_id=monitoring_policy_id,
- process_id=process_id
- )
- if process:
- return True
- return False
-
- monitoring_policy = oneandone_conn.delete_monitoring_policy_process(
- monitoring_policy_id=monitoring_policy_id,
- process_id=process_id)
- return monitoring_policy
- except Exception as ex:
- module.fail_json(msg=str(ex))
-
-
-def _modify_process(module, oneandone_conn, monitoring_policy_id, process_id, process):
- """
- Modifies a monitoring policy process.
- """
- try:
- if module.check_mode:
- cm_process = oneandone_conn.get_monitoring_policy_process(
- monitoring_policy_id=monitoring_policy_id,
- process_id=process_id)
- if cm_process:
- return True
- return False
-
- monitoring_policy_process = oneandone.client.Process(
- process=process['process'],
- alert_if=process['alert_if'],
- email_notification=process['email_notification']
- )
-
- monitoring_policy = oneandone_conn.modify_process(
- monitoring_policy_id=monitoring_policy_id,
- process_id=process_id,
- process=monitoring_policy_process)
- return monitoring_policy
- except Exception as ex:
- module.fail_json(msg=str(ex))
-
-
-def _attach_monitoring_policy_server(module, oneandone_conn, monitoring_policy_id, servers):
- """
- Attaches servers to a monitoring policy.
- """
- try:
- attach_servers = []
-
- for _server_id in servers:
- server_id = get_server(oneandone_conn, _server_id)
- attach_server = oneandone.client.AttachServer(
- server_id=server_id
- )
- attach_servers.append(attach_server)
-
- if module.check_mode:
- if attach_servers:
- return True
- return False
-
- monitoring_policy = oneandone_conn.attach_monitoring_policy_server(
- monitoring_policy_id=monitoring_policy_id,
- servers=attach_servers)
- return monitoring_policy
- except Exception as ex:
- module.fail_json(msg=str(ex))
-
-
-def _detach_monitoring_policy_server(module, oneandone_conn, monitoring_policy_id, server_id):
- """
- Detaches a server from a monitoring policy.
- """
- try:
- if module.check_mode:
- mp_server = oneandone_conn.get_monitoring_policy_server(
- monitoring_policy_id=monitoring_policy_id,
- server_id=server_id)
- if mp_server:
- return True
- return False
-
- monitoring_policy = oneandone_conn.detach_monitoring_policy_server(
- monitoring_policy_id=monitoring_policy_id,
- server_id=server_id)
- return monitoring_policy
- except Exception as ex:
- module.fail_json(msg=str(ex))
-
-
-def update_monitoring_policy(module, oneandone_conn):
- """
- Updates a monitoring_policy based on input arguments.
- Monitoring policy ports, processes and servers can be added/removed to/from
- a monitoring policy. Monitoring policy name, description, email,
- thresholds for cpu, ram, disk, transfer and internal_ping
- can be updated as well.
-
- module : AnsibleModule object
- oneandone_conn: authenticated oneandone object
- """
- try:
- monitoring_policy_id = module.params.get('monitoring_policy')
- name = module.params.get('name')
- description = module.params.get('description')
- email = module.params.get('email')
- thresholds = module.params.get('thresholds')
- add_ports = module.params.get('add_ports')
- update_ports = module.params.get('update_ports')
- remove_ports = module.params.get('remove_ports')
- add_processes = module.params.get('add_processes')
- update_processes = module.params.get('update_processes')
- remove_processes = module.params.get('remove_processes')
- add_servers = module.params.get('add_servers')
- remove_servers = module.params.get('remove_servers')
-
- changed = False
-
- monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy_id, True)
- if monitoring_policy is None:
- _check_mode(module, False)
-
- _monitoring_policy = oneandone.client.MonitoringPolicy(
- name=name,
- description=description,
- email=email
- )
-
- _thresholds = None
-
- if thresholds:
- threshold_entities = ['cpu', 'ram', 'disk', 'internal_ping', 'transfer']
-
- _thresholds = []
- for threshold in thresholds:
- key = list(threshold.keys())[0]
- if key in threshold_entities:
- _threshold = oneandone.client.Threshold(
- entity=key,
- warning_value=threshold[key]['warning']['value'],
- warning_alert=str(threshold[key]['warning']['alert']).lower(),
- critical_value=threshold[key]['critical']['value'],
- critical_alert=str(threshold[key]['critical']['alert']).lower())
- _thresholds.append(_threshold)
-
- if name or description or email or thresholds:
- _check_mode(module, True)
- monitoring_policy = oneandone_conn.modify_monitoring_policy(
- monitoring_policy_id=monitoring_policy['id'],
- monitoring_policy=_monitoring_policy,
- thresholds=_thresholds)
- changed = True
-
- if add_ports:
- if module.check_mode:
- _check_mode(module, _add_ports(module,
- oneandone_conn,
- monitoring_policy['id'],
- add_ports))
-
- monitoring_policy = _add_ports(module, oneandone_conn, monitoring_policy['id'], add_ports)
- changed = True
-
- if update_ports:
- chk_changed = False
- for update_port in update_ports:
- if module.check_mode:
- chk_changed |= _modify_port(module,
- oneandone_conn,
- monitoring_policy['id'],
- update_port['id'],
- update_port)
-
- _modify_port(module,
- oneandone_conn,
- monitoring_policy['id'],
- update_port['id'],
- update_port)
- monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy['id'], True)
- changed = True
-
- if remove_ports:
- chk_changed = False
- for port_id in remove_ports:
- if module.check_mode:
- chk_changed |= _delete_monitoring_policy_port(module,
- oneandone_conn,
- monitoring_policy['id'],
- port_id)
-
- _delete_monitoring_policy_port(module,
- oneandone_conn,
- monitoring_policy['id'],
- port_id)
- _check_mode(module, chk_changed)
- monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy['id'], True)
- changed = True
-
- if add_processes:
- monitoring_policy = _add_processes(module,
- oneandone_conn,
- monitoring_policy['id'],
- add_processes)
- _check_mode(module, monitoring_policy)
- changed = True
-
- if update_processes:
- chk_changed = False
- for update_process in update_processes:
- if module.check_mode:
- chk_changed |= _modify_process(module,
- oneandone_conn,
- monitoring_policy['id'],
- update_process['id'],
- update_process)
-
- _modify_process(module,
- oneandone_conn,
- monitoring_policy['id'],
- update_process['id'],
- update_process)
- _check_mode(module, chk_changed)
- monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy['id'], True)
- changed = True
-
- if remove_processes:
- chk_changed = False
- for process_id in remove_processes:
- if module.check_mode:
- chk_changed |= _delete_monitoring_policy_process(module,
- oneandone_conn,
- monitoring_policy['id'],
- process_id)
-
- _delete_monitoring_policy_process(module,
- oneandone_conn,
- monitoring_policy['id'],
- process_id)
- _check_mode(module, chk_changed)
- monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy['id'], True)
- changed = True
-
- if add_servers:
- monitoring_policy = _attach_monitoring_policy_server(module,
- oneandone_conn,
- monitoring_policy['id'],
- add_servers)
- _check_mode(module, monitoring_policy)
- changed = True
-
- if remove_servers:
- chk_changed = False
- for _server_id in remove_servers:
- server_id = get_server(oneandone_conn, _server_id)
-
- if module.check_mode:
- chk_changed |= _detach_monitoring_policy_server(module,
- oneandone_conn,
- monitoring_policy['id'],
- server_id)
-
- _detach_monitoring_policy_server(module,
- oneandone_conn,
- monitoring_policy['id'],
- server_id)
- _check_mode(module, chk_changed)
- monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy['id'], True)
- changed = True
-
- return (changed, monitoring_policy)
- except Exception as ex:
- module.fail_json(msg=str(ex))
-
-
-def create_monitoring_policy(module, oneandone_conn):
- """
- Creates a new monitoring policy.
-
- module : AnsibleModule object
- oneandone_conn: authenticated oneandone object
- """
- try:
- name = module.params.get('name')
- description = module.params.get('description')
- email = module.params.get('email')
- agent = module.params.get('agent')
- thresholds = module.params.get('thresholds')
- ports = module.params.get('ports')
- processes = module.params.get('processes')
- wait = module.params.get('wait')
- wait_timeout = module.params.get('wait_timeout')
- wait_interval = module.params.get('wait_interval')
-
- _monitoring_policy = oneandone.client.MonitoringPolicy(name,
- description,
- email,
- agent, )
-
- _monitoring_policy.specs['agent'] = str(_monitoring_policy.specs['agent']).lower()
-
- threshold_entities = ['cpu', 'ram', 'disk', 'internal_ping', 'transfer']
-
- _thresholds = []
- for threshold in thresholds:
- key = list(threshold.keys())[0]
- if key in threshold_entities:
- _threshold = oneandone.client.Threshold(
- entity=key,
- warning_value=threshold[key]['warning']['value'],
- warning_alert=str(threshold[key]['warning']['alert']).lower(),
- critical_value=threshold[key]['critical']['value'],
- critical_alert=str(threshold[key]['critical']['alert']).lower())
- _thresholds.append(_threshold)
-
- _ports = []
- for port in ports:
- _port = oneandone.client.Port(
- protocol=port['protocol'],
- port=port['port'],
- alert_if=port['alert_if'],
- email_notification=str(port['email_notification']).lower())
- _ports.append(_port)
-
- _processes = []
- for process in processes:
- _process = oneandone.client.Process(
- process=process['process'],
- alert_if=process['alert_if'],
- email_notification=str(process['email_notification']).lower())
- _processes.append(_process)
-
- _check_mode(module, True)
- monitoring_policy = oneandone_conn.create_monitoring_policy(
- monitoring_policy=_monitoring_policy,
- thresholds=_thresholds,
- ports=_ports,
- processes=_processes
- )
-
- if wait:
- wait_for_resource_creation_completion(
- oneandone_conn,
- OneAndOneResources.monitoring_policy,
- monitoring_policy['id'],
- wait_timeout,
- wait_interval)
-
- changed = True if monitoring_policy else False
-
- _check_mode(module, False)
-
- return (changed, monitoring_policy)
- except Exception as ex:
- module.fail_json(msg=str(ex))
-
-
-def remove_monitoring_policy(module, oneandone_conn):
- """
- Removes a monitoring policy.
-
- module : AnsibleModule object
- oneandone_conn: authenticated oneandone object
- """
- try:
- mp_id = module.params.get('name')
- monitoring_policy_id = get_monitoring_policy(oneandone_conn, mp_id)
- if module.check_mode:
- if monitoring_policy_id is None:
- _check_mode(module, False)
- _check_mode(module, True)
- monitoring_policy = oneandone_conn.delete_monitoring_policy(monitoring_policy_id)
-
- changed = True if monitoring_policy else False
-
- return (changed, {
- 'id': monitoring_policy['id'],
- 'name': monitoring_policy['name']
- })
- except Exception as ex:
- module.fail_json(msg=str(ex))
-
-
-def main():
- module = AnsibleModule(
- argument_spec=dict(
- auth_token=dict(
- type='str', no_log=True,
- default=os.environ.get('ONEANDONE_AUTH_TOKEN')),
- api_url=dict(
- type='str',
- default=os.environ.get('ONEANDONE_API_URL')),
- name=dict(type='str'),
- monitoring_policy=dict(type='str'),
- agent=dict(type='str'),
- email=dict(type='str'),
- description=dict(type='str'),
- thresholds=dict(type='list', elements="dict", default=[]),
- ports=dict(type='list', elements="dict", default=[]),
- processes=dict(type='list', elements="dict", default=[]),
- add_ports=dict(type='list', elements="dict", default=[]),
- update_ports=dict(type='list', elements="dict", default=[]),
- remove_ports=dict(type='list', elements="str", default=[]),
- add_processes=dict(type='list', elements="dict", default=[]),
- update_processes=dict(type='list', elements="dict", default=[]),
- remove_processes=dict(type='list', elements="str", default=[]),
- add_servers=dict(type='list', elements="str", default=[]),
- remove_servers=dict(type='list', elements="str", default=[]),
- wait=dict(type='bool', default=True),
- wait_timeout=dict(type='int', default=600),
- wait_interval=dict(type='int', default=5),
- state=dict(type='str', default='present', choices=['present', 'absent', 'update']),
- ),
- supports_check_mode=True
- )
-
- if not HAS_ONEANDONE_SDK:
- module.fail_json(msg='1and1 required for this module')
-
- if not module.params.get('auth_token'):
- module.fail_json(
- msg='auth_token parameter is required.')
-
- if not module.params.get('api_url'):
- oneandone_conn = oneandone.client.OneAndOneService(
- api_token=module.params.get('auth_token'))
- else:
- oneandone_conn = oneandone.client.OneAndOneService(
- api_token=module.params.get('auth_token'), api_url=module.params.get('api_url'))
-
- state = module.params.get('state')
-
- if state == 'absent':
- if not module.params.get('name'):
- module.fail_json(
- msg="'name' parameter is required to delete a monitoring policy.")
- try:
- (changed, monitoring_policy) = remove_monitoring_policy(module, oneandone_conn)
- except Exception as ex:
- module.fail_json(msg=str(ex))
- elif state == 'update':
- if not module.params.get('monitoring_policy'):
- module.fail_json(
- msg="'monitoring_policy' parameter is required to update a monitoring policy.")
- try:
- (changed, monitoring_policy) = update_monitoring_policy(module, oneandone_conn)
- except Exception as ex:
- module.fail_json(msg=str(ex))
-
- elif state == 'present':
- for param in ('name', 'agent', 'email', 'thresholds', 'ports', 'processes'):
- if not module.params.get(param):
- module.fail_json(
- msg="%s parameter is required for a new monitoring policy." % param)
- try:
- (changed, monitoring_policy) = create_monitoring_policy(module, oneandone_conn)
- except Exception as ex:
- module.fail_json(msg=str(ex))
-
- module.exit_json(changed=changed, monitoring_policy=monitoring_policy)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/oneandone/oneandone_private_network.py b/ansible_collections/community/general/plugins/modules/cloud/oneandone/oneandone_private_network.py
deleted file mode 100644
index 6a16cf68..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/oneandone/oneandone_private_network.py
+++ /dev/null
@@ -1,457 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see .
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-DOCUMENTATION = '''
----
-module: oneandone_private_network
-short_description: Configure 1&1 private networking.
-description:
- - Create, remove, reconfigure, update a private network.
- This module has a dependency on 1and1 >= 1.0
-options:
- state:
- description:
- - Define a network's state to create, remove, or update.
- type: str
- required: false
- default: 'present'
- choices: [ "present", "absent", "update" ]
- auth_token:
- description:
- - Authenticating API token provided by 1&1.
- type: str
- private_network:
- description:
- - The identifier (id or name) of the network used with update state.
- type: str
- api_url:
- description:
- - Custom API URL. Overrides the
- ONEANDONE_API_URL environment variable.
- type: str
- required: false
- name:
- description:
- - Private network name used with present state. Used as identifier (id or name) when used with absent state.
- type: str
- description:
- description:
- - Set a description for the network.
- type: str
- datacenter:
- description:
- - The identifier of the datacenter where the private network will be created
- type: str
- choices: [US, ES, DE, GB]
- network_address:
- description:
- - Set a private network space, i.e. 192.168.1.0
- type: str
- subnet_mask:
- description:
- - Set the netmask for the private network, i.e. 255.255.255.0
- type: str
- add_members:
- description:
- - List of server identifiers (name or id) to be added to the private network.
- type: list
- elements: str
- remove_members:
- description:
- - List of server identifiers (name or id) to be removed from the private network.
- type: list
- elements: str
- wait:
- description:
- - wait for the instance to be in state 'running' before returning
- required: false
- default: "yes"
- type: bool
- wait_timeout:
- description:
- - how long before wait gives up, in seconds
- type: int
- default: 600
- wait_interval:
- description:
- - Defines the number of seconds to wait when using the _wait_for methods
- type: int
- default: 5
-
-requirements:
- - "1and1"
- - "python >= 2.6"
-
-author:
- - Amel Ajdinovic (@aajdinov)
- - Ethan Devenport (@edevenport)
-'''
-
-EXAMPLES = '''
-- name: Create a private network
- community.general.oneandone_private_network:
- auth_token: oneandone_private_api_key
- name: backup_network
- description: Testing creation of a private network with ansible
- network_address: 70.35.193.100
- subnet_mask: 255.0.0.0
- datacenter: US
-
-- name: Destroy a private network
- community.general.oneandone_private_network:
- auth_token: oneandone_private_api_key
- state: absent
- name: backup_network
-
-- name: Modify the private network
- community.general.oneandone_private_network:
- auth_token: oneandone_private_api_key
- state: update
- private_network: backup_network
- network_address: 192.168.2.0
- subnet_mask: 255.255.255.0
-
-- name: Add members to the private network
- community.general.oneandone_private_network:
- auth_token: oneandone_private_api_key
- state: update
- private_network: backup_network
- add_members:
- - server identifier (id or name)
-
-- name: Remove members from the private network
- community.general.oneandone_private_network:
- auth_token: oneandone_private_api_key
- state: update
- private_network: backup_network
- remove_members:
- - server identifier (id or name)
-'''
-
-RETURN = '''
-private_network:
- description: Information about the private network.
- type: dict
- sample: '{"name": "backup_network", "id": "55726DEDA20C99CF6F2AF8F18CAC9963"}'
- returned: always
-'''
-
-import os
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.oneandone import (
- get_private_network,
- get_server,
- get_datacenter,
- OneAndOneResources,
- wait_for_resource_creation_completion,
- wait_for_resource_deletion_completion
-)
-
-HAS_ONEANDONE_SDK = True
-
-try:
- import oneandone.client
-except ImportError:
- HAS_ONEANDONE_SDK = False
-
-DATACENTERS = ['US', 'ES', 'DE', 'GB']
-
-
-def _check_mode(module, result):
- if module.check_mode:
- module.exit_json(
- changed=result
- )
-
-
-def _add_servers(module, oneandone_conn, name, members):
- try:
- private_network_id = get_private_network(oneandone_conn, name)
-
- if module.check_mode:
- if private_network_id and members:
- return True
- return False
-
- network = oneandone_conn.attach_private_network_servers(
- private_network_id=private_network_id,
- server_ids=members)
-
- return network
- except Exception as e:
- module.fail_json(msg=str(e))
-
-
-def _remove_member(module, oneandone_conn, name, member_id):
- try:
- private_network_id = get_private_network(oneandone_conn, name)
-
- if module.check_mode:
- if private_network_id:
- network_member = oneandone_conn.get_private_network_server(
- private_network_id=private_network_id,
- server_id=member_id)
- if network_member:
- return True
- return False
-
- network = oneandone_conn.remove_private_network_server(
- private_network_id=name,
- server_id=member_id)
-
- return network
- except Exception as ex:
- module.fail_json(msg=str(ex))
-
-
-def create_network(module, oneandone_conn):
- """
- Create new private network
-
- module : AnsibleModule object
- oneandone_conn: authenticated oneandone object
-
- Returns a dictionary containing a 'changed' attribute indicating whether
- any network was added.
- """
- name = module.params.get('name')
- description = module.params.get('description')
- network_address = module.params.get('network_address')
- subnet_mask = module.params.get('subnet_mask')
- datacenter = module.params.get('datacenter')
- wait = module.params.get('wait')
- wait_timeout = module.params.get('wait_timeout')
- wait_interval = module.params.get('wait_interval')
-
- if datacenter is not None:
- datacenter_id = get_datacenter(oneandone_conn, datacenter)
- if datacenter_id is None:
- module.fail_json(
- msg='datacenter %s not found.' % datacenter)
-
- try:
- _check_mode(module, True)
- network = oneandone_conn.create_private_network(
- private_network=oneandone.client.PrivateNetwork(
- name=name,
- description=description,
- network_address=network_address,
- subnet_mask=subnet_mask,
- datacenter_id=datacenter_id
- ))
-
- if wait:
- wait_for_resource_creation_completion(
- oneandone_conn,
- OneAndOneResources.private_network,
- network['id'],
- wait_timeout,
- wait_interval)
- network = get_private_network(oneandone_conn,
- network['id'],
- True)
-
- changed = True if network else False
-
- _check_mode(module, False)
-
- return (changed, network)
- except Exception as e:
- module.fail_json(msg=str(e))
-
-
-def update_network(module, oneandone_conn):
- """
- Modifies a private network.
-
- module : AnsibleModule object
- oneandone_conn: authenticated oneandone object
- """
- try:
- _private_network_id = module.params.get('private_network')
- _name = module.params.get('name')
- _description = module.params.get('description')
- _network_address = module.params.get('network_address')
- _subnet_mask = module.params.get('subnet_mask')
- _add_members = module.params.get('add_members')
- _remove_members = module.params.get('remove_members')
-
- changed = False
-
- private_network = get_private_network(oneandone_conn,
- _private_network_id,
- True)
- if private_network is None:
- _check_mode(module, False)
-
- if _name or _description or _network_address or _subnet_mask:
- _check_mode(module, True)
- private_network = oneandone_conn.modify_private_network(
- private_network_id=private_network['id'],
- name=_name,
- description=_description,
- network_address=_network_address,
- subnet_mask=_subnet_mask)
- changed = True
-
- if _add_members:
- instances = []
-
- for member in _add_members:
- instance_id = get_server(oneandone_conn, member)
- instance_obj = oneandone.client.AttachServer(server_id=instance_id)
-
- instances.extend([instance_obj])
- private_network = _add_servers(module, oneandone_conn, private_network['id'], instances)
- _check_mode(module, private_network)
- changed = True
-
- if _remove_members:
- chk_changed = False
- for member in _remove_members:
- instance = get_server(oneandone_conn, member, True)
-
- if module.check_mode:
- chk_changed |= _remove_member(module,
- oneandone_conn,
- private_network['id'],
- instance['id'])
- _check_mode(module, instance and chk_changed)
-
- _remove_member(module,
- oneandone_conn,
- private_network['id'],
- instance['id'])
- private_network = get_private_network(oneandone_conn,
- private_network['id'],
- True)
- changed = True
-
- return (changed, private_network)
- except Exception as ex:
- module.fail_json(msg=str(ex))
-
-
-def remove_network(module, oneandone_conn):
- """
- Removes a private network.
-
- module : AnsibleModule object
- oneandone_conn: authenticated oneandone object.
- """
- try:
- pn_id = module.params.get('name')
- wait_timeout = module.params.get('wait_timeout')
- wait_interval = module.params.get('wait_interval')
-
- private_network_id = get_private_network(oneandone_conn, pn_id)
- if module.check_mode:
- if private_network_id is None:
- _check_mode(module, False)
- _check_mode(module, True)
- private_network = oneandone_conn.delete_private_network(private_network_id)
- wait_for_resource_deletion_completion(oneandone_conn,
- OneAndOneResources.private_network,
- private_network['id'],
- wait_timeout,
- wait_interval)
-
- changed = True if private_network else False
-
- return (changed, {
- 'id': private_network['id'],
- 'name': private_network['name']
- })
- except Exception as e:
- module.fail_json(msg=str(e))
-
-
-def main():
- module = AnsibleModule(
- argument_spec=dict(
- auth_token=dict(
- type='str', no_log=True,
- default=os.environ.get('ONEANDONE_AUTH_TOKEN')),
- api_url=dict(
- type='str',
- default=os.environ.get('ONEANDONE_API_URL')),
- private_network=dict(type='str'),
- name=dict(type='str'),
- description=dict(type='str'),
- network_address=dict(type='str'),
- subnet_mask=dict(type='str'),
- add_members=dict(type='list', elements="str", default=[]),
- remove_members=dict(type='list', elements="str", default=[]),
- datacenter=dict(
- choices=DATACENTERS),
- wait=dict(type='bool', default=True),
- wait_timeout=dict(type='int', default=600),
- wait_interval=dict(type='int', default=5),
- state=dict(type='str', default='present', choices=['present', 'absent', 'update']),
- ),
- supports_check_mode=True
- )
-
- if not HAS_ONEANDONE_SDK:
- module.fail_json(msg='1and1 required for this module')
-
- if not module.params.get('auth_token'):
- module.fail_json(
- msg='auth_token parameter is required.')
-
- if not module.params.get('api_url'):
- oneandone_conn = oneandone.client.OneAndOneService(
- api_token=module.params.get('auth_token'))
- else:
- oneandone_conn = oneandone.client.OneAndOneService(
- api_token=module.params.get('auth_token'), api_url=module.params.get('api_url'))
-
- state = module.params.get('state')
-
- if state == 'absent':
- if not module.params.get('name'):
- module.fail_json(
- msg="'name' parameter is required for deleting a network.")
- try:
- (changed, private_network) = remove_network(module, oneandone_conn)
- except Exception as e:
- module.fail_json(msg=str(e))
- elif state == 'update':
- if not module.params.get('private_network'):
- module.fail_json(
- msg="'private_network' parameter is required for updating a network.")
- try:
- (changed, private_network) = update_network(module, oneandone_conn)
- except Exception as e:
- module.fail_json(msg=str(e))
- elif state == 'present':
- if not module.params.get('name'):
- module.fail_json(
- msg="'name' parameter is required for new networks.")
- try:
- (changed, private_network) = create_network(module, oneandone_conn)
- except Exception as e:
- module.fail_json(msg=str(e))
-
- module.exit_json(changed=changed, private_network=private_network)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/oneandone/oneandone_public_ip.py b/ansible_collections/community/general/plugins/modules/cloud/oneandone/oneandone_public_ip.py
deleted file mode 100644
index 96b1c9f3..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/oneandone/oneandone_public_ip.py
+++ /dev/null
@@ -1,342 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see .
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-DOCUMENTATION = '''
----
-module: oneandone_public_ip
-short_description: Configure 1&1 public IPs.
-description:
- - Create, update, and remove public IPs.
- This module has a dependency on 1and1 >= 1.0
-options:
- state:
- description:
- - Define a public ip state to create, remove, or update.
- type: str
- required: false
- default: 'present'
- choices: [ "present", "absent", "update" ]
- auth_token:
- description:
- - Authenticating API token provided by 1&1.
- type: str
- api_url:
- description:
- - Custom API URL. Overrides the
- ONEANDONE_API_URL environment variable.
- type: str
- required: false
- reverse_dns:
- description:
- - Reverse DNS name. maxLength=256
- type: str
- required: false
- datacenter:
- description:
- - ID of the datacenter where the IP will be created (only for unassigned IPs).
- type: str
- choices: [US, ES, DE, GB]
- default: US
- required: false
- type:
- description:
- - Type of IP. Currently, only IPV4 is available.
- type: str
- choices: ["IPV4", "IPV6"]
- default: 'IPV4'
- required: false
- public_ip_id:
- description:
- - The ID of the public IP used with update and delete states.
- type: str
- wait:
- description:
- - wait for the instance to be in state 'running' before returning
- required: false
- default: "yes"
- type: bool
- wait_timeout:
- description:
- - how long before wait gives up, in seconds
- type: int
- default: 600
- wait_interval:
- description:
- - Defines the number of seconds to wait when using the _wait_for methods
- type: int
- default: 5
-
-requirements:
- - "1and1"
- - "python >= 2.6"
-
-author:
- - Amel Ajdinovic (@aajdinov)
- - Ethan Devenport (@edevenport)
-'''
-
-EXAMPLES = '''
-- name: Create a public IP
- community.general.oneandone_public_ip:
- auth_token: oneandone_private_api_key
- reverse_dns: example.com
- datacenter: US
- type: IPV4
-
-- name: Update a public IP
- community.general.oneandone_public_ip:
- auth_token: oneandone_private_api_key
- public_ip_id: public ip id
- reverse_dns: secondexample.com
- state: update
-
-- name: Delete a public IP
- community.general.oneandone_public_ip:
- auth_token: oneandone_private_api_key
- public_ip_id: public ip id
- state: absent
-'''
-
-RETURN = '''
-public_ip:
- description: Information about the public ip that was processed
- type: dict
- sample: '{"id": "F77CC589EBC120905B4F4719217BFF6D", "ip": "10.5.132.106"}'
- returned: always
-'''
-
-import os
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.oneandone import (
- get_datacenter,
- get_public_ip,
- OneAndOneResources,
- wait_for_resource_creation_completion
-)
-
-HAS_ONEANDONE_SDK = True
-
-try:
- import oneandone.client
-except ImportError:
- HAS_ONEANDONE_SDK = False
-
-DATACENTERS = ['US', 'ES', 'DE', 'GB']
-
-TYPES = ['IPV4', 'IPV6']
-
-
-def _check_mode(module, result):
- if module.check_mode:
- module.exit_json(
- changed=result
- )
-
-
-def create_public_ip(module, oneandone_conn):
- """
- Create new public IP
-
- module : AnsibleModule object
- oneandone_conn: authenticated oneandone object
-
- Returns a dictionary containing a 'changed' attribute indicating whether
- any public IP was added.
- """
- reverse_dns = module.params.get('reverse_dns')
- datacenter = module.params.get('datacenter')
- ip_type = module.params.get('type')
- wait = module.params.get('wait')
- wait_timeout = module.params.get('wait_timeout')
- wait_interval = module.params.get('wait_interval')
-
- if datacenter is not None:
- datacenter_id = get_datacenter(oneandone_conn, datacenter)
- if datacenter_id is None:
- _check_mode(module, False)
- module.fail_json(
- msg='datacenter %s not found.' % datacenter)
-
- try:
- _check_mode(module, True)
- public_ip = oneandone_conn.create_public_ip(
- reverse_dns=reverse_dns,
- ip_type=ip_type,
- datacenter_id=datacenter_id)
-
- if wait:
- wait_for_resource_creation_completion(oneandone_conn,
- OneAndOneResources.public_ip,
- public_ip['id'],
- wait_timeout,
- wait_interval)
- public_ip = oneandone_conn.get_public_ip(public_ip['id'])
-
- changed = True if public_ip else False
-
- return (changed, public_ip)
- except Exception as e:
- module.fail_json(msg=str(e))
-
-
-def update_public_ip(module, oneandone_conn):
- """
- Update a public IP
-
- module : AnsibleModule object
- oneandone_conn: authenticated oneandone object
-
- Returns a dictionary containing a 'changed' attribute indicating whether
- any public IP was changed.
- """
- reverse_dns = module.params.get('reverse_dns')
- public_ip_id = module.params.get('public_ip_id')
- wait = module.params.get('wait')
- wait_timeout = module.params.get('wait_timeout')
- wait_interval = module.params.get('wait_interval')
-
- public_ip = get_public_ip(oneandone_conn, public_ip_id, True)
- if public_ip is None:
- _check_mode(module, False)
- module.fail_json(
- msg='public IP %s not found.' % public_ip_id)
-
- try:
- _check_mode(module, True)
- public_ip = oneandone_conn.modify_public_ip(
- ip_id=public_ip['id'],
- reverse_dns=reverse_dns)
-
- if wait:
- wait_for_resource_creation_completion(oneandone_conn,
- OneAndOneResources.public_ip,
- public_ip['id'],
- wait_timeout,
- wait_interval)
- public_ip = oneandone_conn.get_public_ip(public_ip['id'])
-
- changed = True if public_ip else False
-
- return (changed, public_ip)
- except Exception as e:
- module.fail_json(msg=str(e))
-
-
-def delete_public_ip(module, oneandone_conn):
- """
- Delete a public IP
-
- module : AnsibleModule object
- oneandone_conn: authenticated oneandone object
-
- Returns a dictionary containing a 'changed' attribute indicating whether
- any public IP was deleted.
- """
- public_ip_id = module.params.get('public_ip_id')
-
- public_ip = get_public_ip(oneandone_conn, public_ip_id, True)
- if public_ip is None:
- _check_mode(module, False)
- module.fail_json(
- msg='public IP %s not found.' % public_ip_id)
-
- try:
- _check_mode(module, True)
- deleted_public_ip = oneandone_conn.delete_public_ip(
- ip_id=public_ip['id'])
-
- changed = True if deleted_public_ip else False
-
- return (changed, {
- 'id': public_ip['id']
- })
- except Exception as e:
- module.fail_json(msg=str(e))
-
-
-def main():
- module = AnsibleModule(
- argument_spec=dict(
- auth_token=dict(
- type='str', no_log=True,
- default=os.environ.get('ONEANDONE_AUTH_TOKEN')),
- api_url=dict(
- type='str',
- default=os.environ.get('ONEANDONE_API_URL')),
- public_ip_id=dict(type='str'),
- reverse_dns=dict(type='str'),
- datacenter=dict(
- choices=DATACENTERS,
- default='US'),
- type=dict(
- choices=TYPES,
- default='IPV4'),
- wait=dict(type='bool', default=True),
- wait_timeout=dict(type='int', default=600),
- wait_interval=dict(type='int', default=5),
- state=dict(type='str', default='present', choices=['present', 'absent', 'update']),
- ),
- supports_check_mode=True
- )
-
- if not HAS_ONEANDONE_SDK:
- module.fail_json(msg='1and1 required for this module')
-
- if not module.params.get('auth_token'):
- module.fail_json(
- msg='auth_token parameter is required.')
-
- if not module.params.get('api_url'):
- oneandone_conn = oneandone.client.OneAndOneService(
- api_token=module.params.get('auth_token'))
- else:
- oneandone_conn = oneandone.client.OneAndOneService(
- api_token=module.params.get('auth_token'), api_url=module.params.get('api_url'))
-
- state = module.params.get('state')
-
- if state == 'absent':
- if not module.params.get('public_ip_id'):
- module.fail_json(
- msg="'public_ip_id' parameter is required to delete a public ip.")
- try:
- (changed, public_ip) = delete_public_ip(module, oneandone_conn)
- except Exception as e:
- module.fail_json(msg=str(e))
- elif state == 'update':
- if not module.params.get('public_ip_id'):
- module.fail_json(
- msg="'public_ip_id' parameter is required to update a public ip.")
- try:
- (changed, public_ip) = update_public_ip(module, oneandone_conn)
- except Exception as e:
- module.fail_json(msg=str(e))
-
- elif state == 'present':
- try:
- (changed, public_ip) = create_public_ip(module, oneandone_conn)
- except Exception as e:
- module.fail_json(msg=str(e))
-
- module.exit_json(changed=changed, public_ip=public_ip)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/oneandone/oneandone_server.py b/ansible_collections/community/general/plugins/modules/cloud/oneandone/oneandone_server.py
deleted file mode 100644
index aa651bd7..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/oneandone/oneandone_server.py
+++ /dev/null
@@ -1,707 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see .
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-DOCUMENTATION = '''
----
-module: oneandone_server
-short_description: Create, destroy, start, stop, and reboot a 1&1 Host server.
-description:
- - Create, destroy, update, start, stop, and reboot a 1&1 Host server.
- When the server is created it can optionally wait for it to be 'running' before returning.
-options:
- state:
- description:
- - Define a server's state to create, remove, start or stop it.
- type: str
- default: present
- choices: [ "present", "absent", "running", "stopped" ]
- auth_token:
- description:
- - Authenticating API token provided by 1&1. Overrides the
- ONEANDONE_AUTH_TOKEN environment variable.
- type: str
- api_url:
- description:
- - Custom API URL. Overrides the
- ONEANDONE_API_URL environment variable.
- type: str
- datacenter:
- description:
- - The datacenter location.
- type: str
- default: US
- choices: [ "US", "ES", "DE", "GB" ]
- hostname:
- description:
- - The hostname or ID of the server. Only used when state is 'present'.
- type: str
- description:
- description:
- - The description of the server.
- type: str
- appliance:
- description:
- - The operating system name or ID for the server.
- It is required only for 'present' state.
- type: str
- fixed_instance_size:
- description:
- - The instance size name or ID of the server.
- It is required only for 'present' state, and it is mutually exclusive with
- vcore, cores_per_processor, ram, and hdds parameters.
- - 'The available choices are: C(S), C(M), C(L), C(XL), C(XXL), C(3XL), C(4XL), C(5XL)'
- type: str
- vcore:
- description:
- - The total number of processors.
- It must be provided with cores_per_processor, ram, and hdds parameters.
- type: int
- cores_per_processor:
- description:
- - The number of cores per processor.
- It must be provided with vcore, ram, and hdds parameters.
- type: int
- ram:
- description:
- - The amount of RAM memory.
- It must be provided with with vcore, cores_per_processor, and hdds parameters.
- type: float
- hdds:
- description:
- - A list of hard disks with nested "size" and "is_main" properties.
- It must be provided with vcore, cores_per_processor, and ram parameters.
- type: list
- elements: dict
- private_network:
- description:
- - The private network name or ID.
- type: str
- firewall_policy:
- description:
- - The firewall policy name or ID.
- type: str
- load_balancer:
- description:
- - The load balancer name or ID.
- type: str
- monitoring_policy:
- description:
- - The monitoring policy name or ID.
- type: str
- server:
- description:
- - Server identifier (ID or hostname). It is required for all states except 'running' and 'present'.
- type: str
- count:
- description:
- - The number of servers to create.
- type: int
- default: 1
- ssh_key:
- description:
- - User's public SSH key (contents, not path).
- type: raw
- server_type:
- description:
- - The type of server to be built.
- type: str
- default: "cloud"
- choices: [ "cloud", "baremetal", "k8s_node" ]
- wait:
- description:
- - Wait for the server to be in state 'running' before returning.
- Also used for delete operation (set to 'false' if you don't want to wait
- for each individual server to be deleted before moving on with
- other tasks.)
- type: bool
- default: 'yes'
- wait_timeout:
- description:
- - how long before wait gives up, in seconds
- type: int
- default: 600
- wait_interval:
- description:
- - Defines the number of seconds to wait when using the wait_for methods
- type: int
- default: 5
- auto_increment:
- description:
- - When creating multiple servers at once, whether to differentiate
- hostnames by appending a count after them or substituting the count
- where there is a %02d or %03d in the hostname string.
- type: bool
- default: 'yes'
-
-requirements:
- - "1and1"
- - "python >= 2.6"
-
-author:
- - "Amel Ajdinovic (@aajdinov)"
- - "Ethan Devenport (@edevenport)"
-
-'''
-
-EXAMPLES = '''
-- name: Create three servers and enumerate their names
- community.general.oneandone_server:
- auth_token: oneandone_private_api_key
- hostname: node%02d
- fixed_instance_size: XL
- datacenter: US
- appliance: C5A349786169F140BCBC335675014C08
- auto_increment: true
- count: 3
-
-- name: Create three servers, passing in an ssh_key
- community.general.oneandone_server:
- auth_token: oneandone_private_api_key
- hostname: node%02d
- vcore: 2
- cores_per_processor: 4
- ram: 8.0
- hdds:
- - size: 50
- is_main: false
- datacenter: ES
- appliance: C5A349786169F140BCBC335675014C08
- count: 3
- wait: yes
- wait_timeout: 600
- wait_interval: 10
- ssh_key: SSH_PUBLIC_KEY
-
-- name: Removing server
- community.general.oneandone_server:
- auth_token: oneandone_private_api_key
- state: absent
- server: 'node01'
-
-- name: Starting server
- community.general.oneandone_server:
- auth_token: oneandone_private_api_key
- state: running
- server: 'node01'
-
-- name: Stopping server
- community.general.oneandone_server:
- auth_token: oneandone_private_api_key
- state: stopped
- server: 'node01'
-'''
-
-RETURN = '''
-servers:
- description: Information about each server that was processed
- type: list
- sample: '[{"hostname": "my-server", "id": "server-id"}]'
- returned: always
-'''
-
-import os
-import time
-from ansible.module_utils.six.moves import xrange
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.oneandone import (
- get_datacenter,
- get_fixed_instance_size,
- get_appliance,
- get_private_network,
- get_monitoring_policy,
- get_firewall_policy,
- get_load_balancer,
- get_server,
- OneAndOneResources,
- wait_for_resource_creation_completion,
- wait_for_resource_deletion_completion
-)
-
-HAS_ONEANDONE_SDK = True
-
-try:
- import oneandone.client
-except ImportError:
- HAS_ONEANDONE_SDK = False
-
-DATACENTERS = ['US', 'ES', 'DE', 'GB']
-
-ONEANDONE_SERVER_STATES = (
- 'DEPLOYING',
- 'POWERED_OFF',
- 'POWERED_ON',
- 'POWERING_ON',
- 'POWERING_OFF',
-)
-
-
-def _check_mode(module, result):
- if module.check_mode:
- module.exit_json(
- changed=result
- )
-
-
-def _create_server(module, oneandone_conn, hostname, description,
- fixed_instance_size_id, vcore, cores_per_processor, ram,
- hdds, datacenter_id, appliance_id, ssh_key,
- private_network_id, firewall_policy_id, load_balancer_id,
- monitoring_policy_id, server_type, wait, wait_timeout,
- wait_interval):
-
- try:
- existing_server = get_server(oneandone_conn, hostname)
-
- if existing_server:
- if module.check_mode:
- return False
- return None
-
- if module.check_mode:
- return True
-
- server = oneandone_conn.create_server(
- oneandone.client.Server(
- name=hostname,
- description=description,
- fixed_instance_size_id=fixed_instance_size_id,
- vcore=vcore,
- cores_per_processor=cores_per_processor,
- ram=ram,
- appliance_id=appliance_id,
- datacenter_id=datacenter_id,
- rsa_key=ssh_key,
- private_network_id=private_network_id,
- firewall_policy_id=firewall_policy_id,
- load_balancer_id=load_balancer_id,
- monitoring_policy_id=monitoring_policy_id,
- server_type=server_type,), hdds)
-
- if wait:
- wait_for_resource_creation_completion(
- oneandone_conn,
- OneAndOneResources.server,
- server['id'],
- wait_timeout,
- wait_interval)
- server = oneandone_conn.get_server(server['id']) # refresh
-
- return server
- except Exception as ex:
- module.fail_json(msg=str(ex))
-
-
-def _insert_network_data(server):
- for addr_data in server['ips']:
- if addr_data['type'] == 'IPV6':
- server['public_ipv6'] = addr_data['ip']
- elif addr_data['type'] == 'IPV4':
- server['public_ipv4'] = addr_data['ip']
- return server
-
-
-def create_server(module, oneandone_conn):
- """
- Create new server
-
- module : AnsibleModule object
- oneandone_conn: authenticated oneandone object
-
- Returns a dictionary containing a 'changed' attribute indicating whether
- any server was added, and a 'servers' attribute with the list of the
- created servers' hostname, id and ip addresses.
- """
- hostname = module.params.get('hostname')
- description = module.params.get('description')
- auto_increment = module.params.get('auto_increment')
- count = module.params.get('count')
- fixed_instance_size = module.params.get('fixed_instance_size')
- vcore = module.params.get('vcore')
- cores_per_processor = module.params.get('cores_per_processor')
- ram = module.params.get('ram')
- hdds = module.params.get('hdds')
- datacenter = module.params.get('datacenter')
- appliance = module.params.get('appliance')
- ssh_key = module.params.get('ssh_key')
- private_network = module.params.get('private_network')
- monitoring_policy = module.params.get('monitoring_policy')
- firewall_policy = module.params.get('firewall_policy')
- load_balancer = module.params.get('load_balancer')
- server_type = module.params.get('server_type')
- wait = module.params.get('wait')
- wait_timeout = module.params.get('wait_timeout')
- wait_interval = module.params.get('wait_interval')
-
- datacenter_id = get_datacenter(oneandone_conn, datacenter)
- if datacenter_id is None:
- _check_mode(module, False)
- module.fail_json(
- msg='datacenter %s not found.' % datacenter)
-
- fixed_instance_size_id = None
- if fixed_instance_size:
- fixed_instance_size_id = get_fixed_instance_size(
- oneandone_conn,
- fixed_instance_size)
- if fixed_instance_size_id is None:
- _check_mode(module, False)
- module.fail_json(
- msg='fixed_instance_size %s not found.' % fixed_instance_size)
-
- appliance_id = get_appliance(oneandone_conn, appliance)
- if appliance_id is None:
- _check_mode(module, False)
- module.fail_json(
- msg='appliance %s not found.' % appliance)
-
- private_network_id = None
- if private_network:
- private_network_id = get_private_network(
- oneandone_conn,
- private_network)
- if private_network_id is None:
- _check_mode(module, False)
- module.fail_json(
- msg='private network %s not found.' % private_network)
-
- monitoring_policy_id = None
- if monitoring_policy:
- monitoring_policy_id = get_monitoring_policy(
- oneandone_conn,
- monitoring_policy)
- if monitoring_policy_id is None:
- _check_mode(module, False)
- module.fail_json(
- msg='monitoring policy %s not found.' % monitoring_policy)
-
- firewall_policy_id = None
- if firewall_policy:
- firewall_policy_id = get_firewall_policy(
- oneandone_conn,
- firewall_policy)
- if firewall_policy_id is None:
- _check_mode(module, False)
- module.fail_json(
- msg='firewall policy %s not found.' % firewall_policy)
-
- load_balancer_id = None
- if load_balancer:
- load_balancer_id = get_load_balancer(
- oneandone_conn,
- load_balancer)
- if load_balancer_id is None:
- _check_mode(module, False)
- module.fail_json(
- msg='load balancer %s not found.' % load_balancer)
-
- if auto_increment:
- hostnames = _auto_increment_hostname(count, hostname)
- descriptions = _auto_increment_description(count, description)
- else:
- hostnames = [hostname] * count
- descriptions = [description] * count
-
- hdd_objs = []
- if hdds:
- for hdd in hdds:
- hdd_objs.append(oneandone.client.Hdd(
- size=hdd['size'],
- is_main=hdd['is_main']
- ))
-
- servers = []
- for index, name in enumerate(hostnames):
- server = _create_server(
- module=module,
- oneandone_conn=oneandone_conn,
- hostname=name,
- description=descriptions[index],
- fixed_instance_size_id=fixed_instance_size_id,
- vcore=vcore,
- cores_per_processor=cores_per_processor,
- ram=ram,
- hdds=hdd_objs,
- datacenter_id=datacenter_id,
- appliance_id=appliance_id,
- ssh_key=ssh_key,
- private_network_id=private_network_id,
- monitoring_policy_id=monitoring_policy_id,
- firewall_policy_id=firewall_policy_id,
- load_balancer_id=load_balancer_id,
- server_type=server_type,
- wait=wait,
- wait_timeout=wait_timeout,
- wait_interval=wait_interval)
- if server:
- servers.append(server)
-
- changed = False
-
- if servers:
- for server in servers:
- if server:
- _check_mode(module, True)
- _check_mode(module, False)
- servers = [_insert_network_data(_server) for _server in servers]
- changed = True
-
- _check_mode(module, False)
-
- return (changed, servers)
-
-
-def remove_server(module, oneandone_conn):
- """
- Removes a server.
-
- module : AnsibleModule object
- oneandone_conn: authenticated oneandone object.
-
- Returns a dictionary containing a 'changed' attribute indicating whether
- the server was removed, and a 'removed_server' attribute with
- the removed server's hostname and id.
- """
- server_id = module.params.get('server')
- wait = module.params.get('wait')
- wait_timeout = module.params.get('wait_timeout')
- wait_interval = module.params.get('wait_interval')
-
- changed = False
- removed_server = None
-
- server = get_server(oneandone_conn, server_id, True)
- if server:
- _check_mode(module, True)
- try:
- oneandone_conn.delete_server(server_id=server['id'])
- if wait:
- wait_for_resource_deletion_completion(oneandone_conn,
- OneAndOneResources.server,
- server['id'],
- wait_timeout,
- wait_interval)
- changed = True
- except Exception as ex:
- module.fail_json(
- msg="failed to terminate the server: %s" % str(ex))
-
- removed_server = {
- 'id': server['id'],
- 'hostname': server['name']
- }
- _check_mode(module, False)
-
- return (changed, removed_server)
-
-
-def startstop_server(module, oneandone_conn):
- """
- Starts or Stops a server.
-
- module : AnsibleModule object
- oneandone_conn: authenticated oneandone object.
-
- Returns a dictionary with a 'changed' attribute indicating whether
- anything has changed for the server as a result of this function
- being run, and a 'server' attribute with basic information for
- the server.
- """
- state = module.params.get('state')
- server_id = module.params.get('server')
- wait = module.params.get('wait')
- wait_timeout = module.params.get('wait_timeout')
- wait_interval = module.params.get('wait_interval')
-
- changed = False
-
- # Resolve server
- server = get_server(oneandone_conn, server_id, True)
- if server:
- # Attempt to change the server state, only if it's not already there
- # or on its way.
- try:
- if state == 'stopped' and server['status']['state'] == 'POWERED_ON':
- _check_mode(module, True)
- oneandone_conn.modify_server_status(
- server_id=server['id'],
- action='POWER_OFF',
- method='SOFTWARE')
- elif state == 'running' and server['status']['state'] == 'POWERED_OFF':
- _check_mode(module, True)
- oneandone_conn.modify_server_status(
- server_id=server['id'],
- action='POWER_ON',
- method='SOFTWARE')
- except Exception as ex:
- module.fail_json(
- msg="failed to set server %s to state %s: %s" % (
- server_id, state, str(ex)))
-
- _check_mode(module, False)
-
- # Make sure the server has reached the desired state
- if wait:
- operation_completed = False
- wait_timeout = time.time() + wait_timeout
- while wait_timeout > time.time():
- time.sleep(wait_interval)
- server = oneandone_conn.get_server(server['id']) # refresh
- server_state = server['status']['state']
- if state == 'stopped' and server_state == 'POWERED_OFF':
- operation_completed = True
- break
- if state == 'running' and server_state == 'POWERED_ON':
- operation_completed = True
- break
- if not operation_completed:
- module.fail_json(
- msg="Timeout waiting for server %s to get to state %s" % (
- server_id, state))
-
- changed = True
- server = _insert_network_data(server)
-
- _check_mode(module, False)
-
- return (changed, server)
-
-
-def _auto_increment_hostname(count, hostname):
- """
- Allow a custom incremental count in the hostname when defined with the
- string formatting (%) operator. Otherwise, increment using name-01,
- name-02, name-03, and so forth.
- """
- if '%' not in hostname:
- hostname = "%s-%%01d" % hostname
-
- return [
- hostname % i
- for i in xrange(1, count + 1)
- ]
-
-
-def _auto_increment_description(count, description):
- """
- Allow the incremental count in the description when defined with the
- string formatting (%) operator. Otherwise, repeat the same description.
- """
- if '%' in description:
- return [
- description % i
- for i in xrange(1, count + 1)
- ]
- else:
- return [description] * count
-
-
-def main():
- module = AnsibleModule(
- argument_spec=dict(
- auth_token=dict(
- type='str',
- default=os.environ.get('ONEANDONE_AUTH_TOKEN'),
- no_log=True),
- api_url=dict(
- type='str',
- default=os.environ.get('ONEANDONE_API_URL')),
- hostname=dict(type='str'),
- description=dict(type='str'),
- appliance=dict(type='str'),
- fixed_instance_size=dict(type='str'),
- vcore=dict(type='int'),
- cores_per_processor=dict(type='int'),
- ram=dict(type='float'),
- hdds=dict(type='list', elements='dict'),
- count=dict(type='int', default=1),
- ssh_key=dict(type='raw', no_log=False),
- auto_increment=dict(type='bool', default=True),
- server=dict(type='str'),
- datacenter=dict(
- choices=DATACENTERS,
- default='US'),
- private_network=dict(type='str'),
- firewall_policy=dict(type='str'),
- load_balancer=dict(type='str'),
- monitoring_policy=dict(type='str'),
- server_type=dict(type='str', default='cloud', choices=['cloud', 'baremetal', 'k8s_node']),
- wait=dict(type='bool', default=True),
- wait_timeout=dict(type='int', default=600),
- wait_interval=dict(type='int', default=5),
- state=dict(type='str', default='present', choices=['present', 'absent', 'running', 'stopped']),
- ),
- supports_check_mode=True,
- mutually_exclusive=(['fixed_instance_size', 'vcore'], ['fixed_instance_size', 'cores_per_processor'],
- ['fixed_instance_size', 'ram'], ['fixed_instance_size', 'hdds'],),
- required_together=(['vcore', 'cores_per_processor', 'ram', 'hdds'],)
- )
-
- if not HAS_ONEANDONE_SDK:
- module.fail_json(msg='1and1 required for this module')
-
- if not module.params.get('auth_token'):
- module.fail_json(
- msg='The "auth_token" parameter or ' +
- 'ONEANDONE_AUTH_TOKEN environment variable is required.')
-
- if not module.params.get('api_url'):
- oneandone_conn = oneandone.client.OneAndOneService(
- api_token=module.params.get('auth_token'))
- else:
- oneandone_conn = oneandone.client.OneAndOneService(
- api_token=module.params.get('auth_token'), api_url=module.params.get('api_url'))
-
- state = module.params.get('state')
-
- if state == 'absent':
- if not module.params.get('server'):
- module.fail_json(
- msg="'server' parameter is required for deleting a server.")
- try:
- (changed, servers) = remove_server(module, oneandone_conn)
- except Exception as ex:
- module.fail_json(msg=str(ex))
-
- elif state in ('running', 'stopped'):
- if not module.params.get('server'):
- module.fail_json(
- msg="'server' parameter is required for starting/stopping a server.")
- try:
- (changed, servers) = startstop_server(module, oneandone_conn)
- except Exception as ex:
- module.fail_json(msg=str(ex))
-
- elif state == 'present':
- for param in ('hostname',
- 'appliance',
- 'datacenter'):
- if not module.params.get(param):
- module.fail_json(
- msg="%s parameter is required for new server." % param)
- try:
- (changed, servers) = create_server(module, oneandone_conn)
- except Exception as ex:
- module.fail_json(msg=str(ex))
-
- module.exit_json(changed=changed, servers=servers)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/online/online_server_info.py b/ansible_collections/community/general/plugins/modules/cloud/online/online_server_info.py
deleted file mode 100644
index cf218efd..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/online/online_server_info.py
+++ /dev/null
@@ -1,177 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-
-__metaclass__ = type
-
-DOCUMENTATION = r'''
----
-module: online_server_info
-short_description: Gather information about Online servers.
-description:
- - Gather information about the servers.
- - U(https://www.online.net/en/dedicated-server)
-author:
- - "Remy Leone (@remyleone)"
-extends_documentation_fragment:
-- community.general.online
-
-'''
-
-EXAMPLES = r'''
-- name: Gather Online server information
- community.general.online_server_info:
- api_token: '0d1627e8-bbf0-44c5-a46f-5c4d3aef033f'
- register: result
-
-- ansible.builtin.debug:
- msg: "{{ result.online_server_info }}"
-'''
-
-RETURN = r'''
-online_server_info:
- description:
- - Response from Online API.
- - "For more details please refer to: U(https://console.online.net/en/api/)."
- returned: success
- type: list
- elements: dict
- sample:
- "online_server_info": [
- {
- "abuse": "abuse@example.com",
- "anti_ddos": false,
- "bmc": {
- "session_key": null
- },
- "boot_mode": "normal",
- "contacts": {
- "owner": "foobar",
- "tech": "foobar"
- },
- "disks": [
- {
- "$ref": "/api/v1/server/hardware/disk/68452"
- },
- {
- "$ref": "/api/v1/server/hardware/disk/68453"
- }
- ],
- "drive_arrays": [
- {
- "disks": [
- {
- "$ref": "/api/v1/server/hardware/disk/68452"
- },
- {
- "$ref": "/api/v1/server/hardware/disk/68453"
- }
- ],
- "raid_controller": {
- "$ref": "/api/v1/server/hardware/raidController/9910"
- },
- "raid_level": "RAID1"
- }
- ],
- "hardware_watch": true,
- "hostname": "sd-42",
- "id": 42,
- "ip": [
- {
- "address": "195.154.172.149",
- "mac": "28:92:4a:33:5e:c6",
- "reverse": "195-154-172-149.rev.poneytelecom.eu.",
- "switch_port_state": "up",
- "type": "public"
- },
- {
- "address": "10.90.53.212",
- "mac": "28:92:4a:33:5e:c7",
- "reverse": null,
- "switch_port_state": "up",
- "type": "private"
- }
- ],
- "last_reboot": "2018-08-23T08:32:03.000Z",
- "location": {
- "block": "A",
- "datacenter": "DC3",
- "position": 19,
- "rack": "A23",
- "room": "4 4-4"
- },
- "network": {
- "ip": [
- "195.154.172.149"
- ],
- "ipfo": [],
- "private": [
- "10.90.53.212"
- ]
- },
- "offer": "Pro-1-S-SATA",
- "os": {
- "name": "FreeBSD",
- "version": "11.1-RELEASE"
- },
- "power": "ON",
- "proactive_monitoring": false,
- "raid_controllers": [
- {
- "$ref": "/api/v1/server/hardware/raidController/9910"
- }
- ],
- "support": "Basic service level"
- }
- ]
-'''
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.online import (
- Online, OnlineException, online_argument_spec
-)
-
-
-class OnlineServerInfo(Online):
-
- def __init__(self, module):
- super(OnlineServerInfo, self).__init__(module)
- self.name = 'api/v1/server'
-
- def _get_server_detail(self, server_path):
- try:
- return self.get(path=server_path).json
- except OnlineException as exc:
- self.module.fail_json(msg="A problem occurred while fetching: %s (%s)" % (server_path, exc))
-
- def all_detailed_servers(self):
- servers_api_path = self.get_resources()
-
- server_data = (
- self._get_server_detail(server_api_path)
- for server_api_path in servers_api_path
- )
-
- return [s for s in server_data if s is not None]
-
-
-def main():
- module = AnsibleModule(
- argument_spec=online_argument_spec(),
- supports_check_mode=True,
- )
-
- try:
- servers_info = OnlineServerInfo(module).all_detailed_servers()
- module.exit_json(
- online_server_info=servers_info
- )
- except OnlineException as exc:
- module.fail_json(msg=exc.message)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/online/online_user_info.py b/ansible_collections/community/general/plugins/modules/cloud/online/online_user_info.py
deleted file mode 100644
index cd1b6dfa..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/online/online_user_info.py
+++ /dev/null
@@ -1,75 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-DOCUMENTATION = r'''
-module: online_user_info
-short_description: Gather information about Online user.
-description:
- - Gather information about the user.
-author:
- - "Remy Leone (@remyleone)"
-extends_documentation_fragment:
-- community.general.online
-'''
-
-EXAMPLES = r'''
-- name: Gather Online user info
- community.general.online_user_info:
- register: result
-
-- ansible.builtin.debug:
- msg: "{{ result.online_user_info }}"
-'''
-
-RETURN = r'''
-online_user_info:
- description:
- - Response from Online API.
- - "For more details please refer to: U(https://console.online.net/en/api/)."
- returned: success
- type: dict
- sample:
- "online_user_info": {
- "company": "foobar LLC",
- "email": "foobar@example.com",
- "first_name": "foo",
- "id": 42,
- "last_name": "bar",
- "login": "foobar"
- }
-'''
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.online import (
- Online, OnlineException, online_argument_spec
-)
-
-
-class OnlineUserInfo(Online):
-
- def __init__(self, module):
- super(OnlineUserInfo, self).__init__(module)
- self.name = 'api/v1/user'
-
-
-def main():
- module = AnsibleModule(
- argument_spec=online_argument_spec(),
- supports_check_mode=True,
- )
-
- try:
- module.exit_json(
- online_user_info=OnlineUserInfo(module).get_resources()
- )
- except OnlineException as exc:
- module.fail_json(msg=exc.message)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/opennebula/one_host.py b/ansible_collections/community/general/plugins/modules/cloud/opennebula/one_host.py
deleted file mode 100644
index f205a40a..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/opennebula/one_host.py
+++ /dev/null
@@ -1,285 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright 2018 www.privaz.io Valletech AB
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: one_host
-
-short_description: Manages OpenNebula Hosts
-
-
-requirements:
- - pyone
-
-description:
- - "Manages OpenNebula Hosts"
-
-options:
- name:
- description:
- - Hostname of the machine to manage.
- required: true
- type: str
- state:
- description:
- - Takes the host to the desired lifecycle state.
- - If C(absent) the host will be deleted from the cluster.
- - If C(present) the host will be created in the cluster (includes C(enabled), C(disabled) and C(offline) states).
- - If C(enabled) the host is fully operational.
- - C(disabled), e.g. to perform maintenance operations.
- - C(offline), host is totally offline.
- choices:
- - absent
- - present
- - enabled
- - disabled
- - offline
- default: present
- type: str
- im_mad_name:
- description:
- - The name of the information manager, this values are taken from the oned.conf with the tag name IM_MAD (name)
- default: kvm
- type: str
- vmm_mad_name:
- description:
- - The name of the virtual machine manager mad name, this values are taken from the oned.conf with the tag name VM_MAD (name)
- default: kvm
- type: str
- cluster_id:
- description:
- - The cluster ID.
- default: 0
- type: int
- cluster_name:
- description:
- - The cluster specified by name.
- type: str
- labels:
- description:
- - The labels for this host.
- type: list
- elements: str
- template:
- description:
- - The template or attribute changes to merge into the host template.
- aliases:
- - attributes
- type: dict
-
-extends_documentation_fragment:
-- community.general.opennebula
-
-
-author:
- - Rafael del Valle (@rvalle)
-'''
-
-EXAMPLES = '''
-- name: Create a new host in OpenNebula
- community.general.one_host:
- name: host1
- cluster_id: 1
- api_url: http://127.0.0.1:2633/RPC2
-
-- name: Create a host and adjust its template
- community.general.one_host:
- name: host2
- cluster_name: default
- template:
- LABELS:
- - gold
- - ssd
- RESERVED_CPU: -100
-'''
-
-# TODO: pending setting guidelines on returned values
-RETURN = '''
-'''
-
-# TODO: Documentation on valid state transitions is required to properly implement all valid cases
-# TODO: To be coherent with CLI this module should also provide "flush" functionality
-
-from ansible_collections.community.general.plugins.module_utils.opennebula import OpenNebulaModule
-
-try:
- from pyone import HOST_STATES, HOST_STATUS
-except ImportError:
- pass # handled at module utils
-
-
-# Pseudo definitions...
-
-HOST_ABSENT = -99 # the host is absent (special case defined by this module)
-
-
-class HostModule(OpenNebulaModule):
-
- def __init__(self):
-
- argument_spec = dict(
- name=dict(type='str', required=True),
- state=dict(choices=['present', 'absent', 'enabled', 'disabled', 'offline'], default='present'),
- im_mad_name=dict(type='str', default="kvm"),
- vmm_mad_name=dict(type='str', default="kvm"),
- cluster_id=dict(type='int', default=0),
- cluster_name=dict(type='str'),
- labels=dict(type='list', elements='str'),
- template=dict(type='dict', aliases=['attributes']),
- )
-
- mutually_exclusive = [
- ['cluster_id', 'cluster_name']
- ]
-
- OpenNebulaModule.__init__(self, argument_spec, mutually_exclusive=mutually_exclusive)
-
- def allocate_host(self):
- """
- Creates a host entry in OpenNebula
- Returns: True on success, fails otherwise.
-
- """
- if not self.one.host.allocate(self.get_parameter('name'),
- self.get_parameter('vmm_mad_name'),
- self.get_parameter('im_mad_name'),
- self.get_parameter('cluster_id')):
- self.fail(msg="could not allocate host")
- else:
- self.result['changed'] = True
- return True
-
- def wait_for_host_state(self, host, target_states):
- """
- Utility method that waits for a host state.
- Args:
- host:
- target_states:
-
- """
- return self.wait_for_state('host',
- lambda: self.one.host.info(host.ID).STATE,
- lambda s: HOST_STATES(s).name, target_states,
- invalid_states=[HOST_STATES.ERROR, HOST_STATES.MONITORING_ERROR])
-
- def run(self, one, module, result):
-
- # Get the list of hosts
- host_name = self.get_parameter("name")
- host = self.get_host_by_name(host_name)
-
- # manage host state
- desired_state = self.get_parameter('state')
- if bool(host):
- current_state = host.STATE
- current_state_name = HOST_STATES(host.STATE).name
- else:
- current_state = HOST_ABSENT
- current_state_name = "ABSENT"
-
- # apply properties
- if desired_state == 'present':
- if current_state == HOST_ABSENT:
- self.allocate_host()
- host = self.get_host_by_name(host_name)
- self.wait_for_host_state(host, [HOST_STATES.MONITORED])
- elif current_state in [HOST_STATES.ERROR, HOST_STATES.MONITORING_ERROR]:
- self.fail(msg="invalid host state %s" % current_state_name)
-
- elif desired_state == 'enabled':
- if current_state == HOST_ABSENT:
- self.allocate_host()
- host = self.get_host_by_name(host_name)
- self.wait_for_host_state(host, [HOST_STATES.MONITORED])
- elif current_state in [HOST_STATES.DISABLED, HOST_STATES.OFFLINE]:
- if one.host.status(host.ID, HOST_STATUS.ENABLED):
- self.wait_for_host_state(host, [HOST_STATES.MONITORED])
- result['changed'] = True
- else:
- self.fail(msg="could not enable host")
- elif current_state in [HOST_STATES.MONITORED]:
- pass
- else:
- self.fail(msg="unknown host state %s, cowardly refusing to change state to enable" % current_state_name)
-
- elif desired_state == 'disabled':
- if current_state == HOST_ABSENT:
- self.fail(msg='absent host cannot be put in disabled state')
- elif current_state in [HOST_STATES.MONITORED, HOST_STATES.OFFLINE]:
- if one.host.status(host.ID, HOST_STATUS.DISABLED):
- self.wait_for_host_state(host, [HOST_STATES.DISABLED])
- result['changed'] = True
- else:
- self.fail(msg="could not disable host")
- elif current_state in [HOST_STATES.DISABLED]:
- pass
- else:
- self.fail(msg="unknown host state %s, cowardly refusing to change state to disable" % current_state_name)
-
- elif desired_state == 'offline':
- if current_state == HOST_ABSENT:
- self.fail(msg='absent host cannot be placed in offline state')
- elif current_state in [HOST_STATES.MONITORED, HOST_STATES.DISABLED]:
- if one.host.status(host.ID, HOST_STATUS.OFFLINE):
- self.wait_for_host_state(host, [HOST_STATES.OFFLINE])
- result['changed'] = True
- else:
- self.fail(msg="could not set host offline")
- elif current_state in [HOST_STATES.OFFLINE]:
- pass
- else:
- self.fail(msg="unknown host state %s, cowardly refusing to change state to offline" % current_state_name)
-
- elif desired_state == 'absent':
- if current_state != HOST_ABSENT:
- if one.host.delete(host.ID):
- result['changed'] = True
- else:
- self.fail(msg="could not delete host from cluster")
-
- # if we reach this point we can assume that the host was taken to the desired state
-
- if desired_state != "absent":
- # manipulate or modify the template
- desired_template_changes = self.get_parameter('template')
-
- if desired_template_changes is None:
- desired_template_changes = dict()
-
- # complete the template with specific ansible parameters
- if self.is_parameter('labels'):
- desired_template_changes['LABELS'] = self.get_parameter('labels')
-
- if self.requires_template_update(host.TEMPLATE, desired_template_changes):
- # setup the root element so that pyone will generate XML instead of attribute vector
- desired_template_changes = {"TEMPLATE": desired_template_changes}
- if one.host.update(host.ID, desired_template_changes, 1): # merge the template
- result['changed'] = True
- else:
- self.fail(msg="failed to update the host template")
-
- # the cluster
- if host.CLUSTER_ID != self.get_parameter('cluster_id'):
- if one.cluster.addhost(self.get_parameter('cluster_id'), host.ID):
- result['changed'] = True
- else:
- self.fail(msg="failed to update the host cluster")
-
- # return
- self.exit()
-
-
-def main():
- HostModule().run_module()
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/opennebula/one_image.py b/ansible_collections/community/general/plugins/modules/cloud/opennebula/one_image.py
deleted file mode 100644
index 5a80306f..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/opennebula/one_image.py
+++ /dev/null
@@ -1,423 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-"""
-(c) 2018, Milan Ilic
-
-This file is part of Ansible
-
-Ansible is free software: you can redistribute it and/or modify
-it under the terms of the GNU General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-Ansible is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU General Public License for more details.
-
-You should have received a clone of the GNU General Public License
-along with Ansible. If not, see .
-"""
-
-DOCUMENTATION = '''
----
-module: one_image
-short_description: Manages OpenNebula images
-description:
- - Manages OpenNebula images
-requirements:
- - pyone
-options:
- api_url:
- description:
- - URL of the OpenNebula RPC server.
- - It is recommended to use HTTPS so that the username/password are not
- - transferred over the network unencrypted.
- - If not set then the value of the C(ONE_URL) environment variable is used.
- type: str
- api_username:
- description:
- - Name of the user to login into the OpenNebula RPC server. If not set
- - then the value of the C(ONE_USERNAME) environment variable is used.
- type: str
- api_password:
- description:
- - Password of the user to login into OpenNebula RPC server. If not set
- - then the value of the C(ONE_PASSWORD) environment variable is used.
- type: str
- id:
- description:
- - A C(id) of the image you would like to manage.
- type: int
- name:
- description:
- - A C(name) of the image you would like to manage.
- type: str
- state:
- description:
- - C(present) - state that is used to manage the image
- - C(absent) - delete the image
- - C(cloned) - clone the image
- - C(renamed) - rename the image to the C(new_name)
- choices: ["present", "absent", "cloned", "renamed"]
- default: present
- type: str
- enabled:
- description:
- - Whether the image should be enabled or disabled.
- type: bool
- new_name:
- description:
- - A name that will be assigned to the existing or new image.
- - In the case of cloning, by default C(new_name) will take the name of the origin image with the prefix 'Copy of'.
- type: str
-author:
- - "Milan Ilic (@ilicmilan)"
-'''
-
-EXAMPLES = '''
-- name: Fetch the IMAGE by id
- community.general.one_image:
- id: 45
- register: result
-
-- name: Print the IMAGE properties
- ansible.builtin.debug:
- var: result
-
-- name: Rename existing IMAGE
- community.general.one_image:
- id: 34
- state: renamed
- new_name: bar-image
-
-- name: Disable the IMAGE by id
- community.general.one_image:
- id: 37
- enabled: no
-
-- name: Enable the IMAGE by name
- community.general.one_image:
- name: bar-image
- enabled: yes
-
-- name: Clone the IMAGE by name
- community.general.one_image:
- name: bar-image
- state: cloned
- new_name: bar-image-clone
- register: result
-
-- name: Delete the IMAGE by id
- community.general.one_image:
- id: '{{ result.id }}'
- state: absent
-'''
-
-RETURN = '''
-id:
- description: image id
- type: int
- returned: success
- sample: 153
-name:
- description: image name
- type: str
- returned: success
- sample: app1
-group_id:
- description: image's group id
- type: int
- returned: success
- sample: 1
-group_name:
- description: image's group name
- type: str
- returned: success
- sample: one-users
-owner_id:
- description: image's owner id
- type: int
- returned: success
- sample: 143
-owner_name:
- description: image's owner name
- type: str
- returned: success
- sample: ansible-test
-state:
- description: state of image instance
- type: str
- returned: success
- sample: READY
-used:
- description: is image in use
- type: bool
- returned: success
- sample: true
-running_vms:
- description: count of running vms that use this image
- type: int
- returned: success
- sample: 7
-'''
-
-try:
- import pyone
- HAS_PYONE = True
-except ImportError:
- HAS_PYONE = False
-
-from ansible.module_utils.basic import AnsibleModule
-import os
-
-
-def get_image(module, client, predicate):
- # Filter -2 means fetch all images user can Use
- pool = client.imagepool.info(-2, -1, -1, -1)
-
- for image in pool.IMAGE:
- if predicate(image):
- return image
-
- return None
-
-
-def get_image_by_name(module, client, image_name):
- return get_image(module, client, lambda image: (image.NAME == image_name))
-
-
-def get_image_by_id(module, client, image_id):
- return get_image(module, client, lambda image: (image.ID == image_id))
-
-
-def get_image_instance(module, client, requested_id, requested_name):
- if requested_id:
- return get_image_by_id(module, client, requested_id)
- else:
- return get_image_by_name(module, client, requested_name)
-
-
-IMAGE_STATES = ['INIT', 'READY', 'USED', 'DISABLED', 'LOCKED', 'ERROR', 'CLONE', 'DELETE', 'USED_PERS', 'LOCKED_USED', 'LOCKED_USED_PERS']
-
-
-def get_image_info(image):
- info = {
- 'id': image.ID,
- 'name': image.NAME,
- 'state': IMAGE_STATES[image.STATE],
- 'running_vms': image.RUNNING_VMS,
- 'used': bool(image.RUNNING_VMS),
- 'user_name': image.UNAME,
- 'user_id': image.UID,
- 'group_name': image.GNAME,
- 'group_id': image.GID,
- }
-
- return info
-
-
-def wait_for_state(module, client, image_id, wait_timeout, state_predicate):
- import time
- start_time = time.time()
-
- while (time.time() - start_time) < wait_timeout:
- image = client.image.info(image_id)
- state = image.STATE
-
- if state_predicate(state):
- return image
-
- time.sleep(1)
-
- module.fail_json(msg="Wait timeout has expired!")
-
-
-def wait_for_ready(module, client, image_id, wait_timeout=60):
- return wait_for_state(module, client, image_id, wait_timeout, lambda state: (state in [IMAGE_STATES.index('READY')]))
-
-
-def wait_for_delete(module, client, image_id, wait_timeout=60):
- return wait_for_state(module, client, image_id, wait_timeout, lambda state: (state in [IMAGE_STATES.index('DELETE')]))
-
-
-def enable_image(module, client, image, enable):
- image = client.image.info(image.ID)
- changed = False
-
- state = image.STATE
-
- if state not in [IMAGE_STATES.index('READY'), IMAGE_STATES.index('DISABLED'), IMAGE_STATES.index('ERROR')]:
- if enable:
- module.fail_json(msg="Cannot enable " + IMAGE_STATES[state] + " image!")
- else:
- module.fail_json(msg="Cannot disable " + IMAGE_STATES[state] + " image!")
-
- if ((enable and state != IMAGE_STATES.index('READY')) or
- (not enable and state != IMAGE_STATES.index('DISABLED'))):
- changed = True
-
- if changed and not module.check_mode:
- client.image.enable(image.ID, enable)
-
- result = get_image_info(image)
- result['changed'] = changed
-
- return result
-
-
-def clone_image(module, client, image, new_name):
- if new_name is None:
- new_name = "Copy of " + image.NAME
-
- tmp_image = get_image_by_name(module, client, new_name)
- if tmp_image:
- result = get_image_info(tmp_image)
- result['changed'] = False
- return result
-
- if image.STATE == IMAGE_STATES.index('DISABLED'):
- module.fail_json(msg="Cannot clone DISABLED image")
-
- if not module.check_mode:
- new_id = client.image.clone(image.ID, new_name)
- wait_for_ready(module, client, new_id)
- image = client.image.info(new_id)
-
- result = get_image_info(image)
- result['changed'] = True
-
- return result
-
-
-def rename_image(module, client, image, new_name):
- if new_name is None:
- module.fail_json(msg="'new_name' option has to be specified when the state is 'renamed'")
-
- if new_name == image.NAME:
- result = get_image_info(image)
- result['changed'] = False
- return result
-
- tmp_image = get_image_by_name(module, client, new_name)
- if tmp_image:
- module.fail_json(msg="Name '" + new_name + "' is already taken by IMAGE with id=" + str(tmp_image.ID))
-
- if not module.check_mode:
- client.image.rename(image.ID, new_name)
-
- result = get_image_info(image)
- result['changed'] = True
- return result
-
-
-def delete_image(module, client, image):
-
- if not image:
- return {'changed': False}
-
- if image.RUNNING_VMS > 0:
- module.fail_json(msg="Cannot delete image. There are " + str(image.RUNNING_VMS) + " VMs using it.")
-
- if not module.check_mode:
- client.image.delete(image.ID)
- wait_for_delete(module, client, image.ID)
-
- return {'changed': True}
-
-
-def get_connection_info(module):
-
- url = module.params.get('api_url')
- username = module.params.get('api_username')
- password = module.params.get('api_password')
-
- if not url:
- url = os.environ.get('ONE_URL')
-
- if not username:
- username = os.environ.get('ONE_USERNAME')
-
- if not password:
- password = os.environ.get('ONE_PASSWORD')
-
- if not(url and username and password):
- module.fail_json(msg="One or more connection parameters (api_url, api_username, api_password) were not specified")
- from collections import namedtuple
-
- auth_params = namedtuple('auth', ('url', 'username', 'password'))
-
- return auth_params(url=url, username=username, password=password)
-
-
-def main():
- fields = {
- "api_url": {"required": False, "type": "str"},
- "api_username": {"required": False, "type": "str"},
- "api_password": {"required": False, "type": "str", "no_log": True},
- "id": {"required": False, "type": "int"},
- "name": {"required": False, "type": "str"},
- "state": {
- "default": "present",
- "choices": ['present', 'absent', 'cloned', 'renamed'],
- "type": "str"
- },
- "enabled": {"required": False, "type": "bool"},
- "new_name": {"required": False, "type": "str"},
- }
-
- module = AnsibleModule(argument_spec=fields,
- mutually_exclusive=[['id', 'name']],
- supports_check_mode=True)
-
- if not HAS_PYONE:
- module.fail_json(msg='This module requires pyone to work!')
-
- auth = get_connection_info(module)
- params = module.params
- id = params.get('id')
- name = params.get('name')
- state = params.get('state')
- enabled = params.get('enabled')
- new_name = params.get('new_name')
- client = pyone.OneServer(auth.url, session=auth.username + ':' + auth.password)
-
- result = {}
-
- if not id and state == 'renamed':
- module.fail_json(msg="Option 'id' is required when the state is 'renamed'")
-
- image = get_image_instance(module, client, id, name)
- if not image and state != 'absent':
- if id:
- module.fail_json(msg="There is no image with id=" + str(id))
- else:
- module.fail_json(msg="There is no image with name=" + name)
-
- if state == 'absent':
- result = delete_image(module, client, image)
- else:
- result = get_image_info(image)
- changed = False
- result['changed'] = False
-
- if enabled is not None:
- result = enable_image(module, client, image, enabled)
- if state == "cloned":
- result = clone_image(module, client, image, new_name)
- elif state == "renamed":
- result = rename_image(module, client, image, new_name)
-
- changed = changed or result['changed']
- result['changed'] = changed
-
- module.exit_json(**result)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/opennebula/one_image_info.py b/ansible_collections/community/general/plugins/modules/cloud/opennebula/one_image_info.py
deleted file mode 100644
index e03b8ad7..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/opennebula/one_image_info.py
+++ /dev/null
@@ -1,289 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-"""
-(c) 2018, Milan Ilic
-
-This file is part of Ansible
-
-Ansible is free software: you can redistribute it and/or modify
-it under the terms of the GNU General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-Ansible is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU General Public License for more details.
-
-You should have received a clone of the GNU General Public License
-along with Ansible. If not, see .
-"""
-
-DOCUMENTATION = '''
----
-module: one_image_info
-short_description: Gather information on OpenNebula images
-description:
- - Gather information on OpenNebula images.
- - This module was called C(one_image_facts) before Ansible 2.9. The usage did not change.
-requirements:
- - pyone
-options:
- api_url:
- description:
- - URL of the OpenNebula RPC server.
- - It is recommended to use HTTPS so that the username/password are not
- - transferred over the network unencrypted.
- - If not set then the value of the C(ONE_URL) environment variable is used.
- type: str
- api_username:
- description:
- - Name of the user to login into the OpenNebula RPC server. If not set
- - then the value of the C(ONE_USERNAME) environment variable is used.
- type: str
- api_password:
- description:
- - Password of the user to login into OpenNebula RPC server. If not set
- - then the value of the C(ONE_PASSWORD) environment variable is used.
- type: str
- ids:
- description:
- - A list of images ids whose facts you want to gather.
- aliases: ['id']
- type: list
- elements: str
- name:
- description:
- - A C(name) of the image whose facts will be gathered.
- - If the C(name) begins with '~' the C(name) will be used as regex pattern
- - which restricts the list of images (whose facts will be returned) whose names match specified regex.
- - Also, if the C(name) begins with '~*' case-insensitive matching will be performed.
- - See examples for more details.
- type: str
-author:
- - "Milan Ilic (@ilicmilan)"
- - "Jan Meerkamp (@meerkampdvv)"
-'''
-
-EXAMPLES = '''
-- name: Gather facts about all images
- community.general.one_image_info:
- register: result
-
-- name: Print all images facts
- ansible.builtin.debug:
- msg: result
-
-- name: Gather facts about an image using ID
- community.general.one_image_info:
- ids:
- - 123
-
-- name: Gather facts about an image using the name
- community.general.one_image_info:
- name: 'foo-image'
- register: foo_image
-
-- name: Gather facts about all IMAGEs whose name matches regex 'app-image-.*'
- community.general.one_image_info:
- name: '~app-image-.*'
- register: app_images
-
-- name: Gather facts about all IMAGEs whose name matches regex 'foo-image-.*' ignoring cases
- community.general.one_image_info:
- name: '~*foo-image-.*'
- register: foo_images
-'''
-
-RETURN = '''
-images:
- description: A list of images info
- type: complex
- returned: success
- contains:
- id:
- description: image id
- type: int
- sample: 153
- name:
- description: image name
- type: str
- sample: app1
- group_id:
- description: image's group id
- type: int
- sample: 1
- group_name:
- description: image's group name
- type: str
- sample: one-users
- owner_id:
- description: image's owner id
- type: int
- sample: 143
- owner_name:
- description: image's owner name
- type: str
- sample: ansible-test
- state:
- description: state of image instance
- type: str
- sample: READY
- used:
- description: is image in use
- type: bool
- sample: true
- running_vms:
- description: count of running vms that use this image
- type: int
- sample: 7
-'''
-
-try:
- import pyone
- HAS_PYONE = True
-except ImportError:
- HAS_PYONE = False
-
-from ansible.module_utils.basic import AnsibleModule
-import os
-
-
-def get_all_images(client):
- pool = client.imagepool.info(-2, -1, -1, -1)
- # Filter -2 means fetch all images user can Use
-
- return pool
-
-
-IMAGE_STATES = ['INIT', 'READY', 'USED', 'DISABLED', 'LOCKED', 'ERROR', 'CLONE', 'DELETE', 'USED_PERS', 'LOCKED_USED', 'LOCKED_USED_PERS']
-
-
-def get_image_info(image):
- info = {
- 'id': image.ID,
- 'name': image.NAME,
- 'state': IMAGE_STATES[image.STATE],
- 'running_vms': image.RUNNING_VMS,
- 'used': bool(image.RUNNING_VMS),
- 'user_name': image.UNAME,
- 'user_id': image.UID,
- 'group_name': image.GNAME,
- 'group_id': image.GID,
- }
- return info
-
-
-def get_images_by_ids(module, client, ids):
- images = []
- pool = get_all_images(client)
-
- for image in pool.IMAGE:
- if str(image.ID) in ids:
- images.append(image)
- ids.remove(str(image.ID))
- if len(ids) == 0:
- break
-
- if len(ids) > 0:
- module.fail_json(msg='There is no IMAGE(s) with id(s)=' + ', '.join('{id}'.format(id=str(image_id)) for image_id in ids))
-
- return images
-
-
-def get_images_by_name(module, client, name_pattern):
-
- images = []
- pattern = None
-
- pool = get_all_images(client)
-
- if name_pattern.startswith('~'):
- import re
- if name_pattern[1] == '*':
- pattern = re.compile(name_pattern[2:], re.IGNORECASE)
- else:
- pattern = re.compile(name_pattern[1:])
-
- for image in pool.IMAGE:
- if pattern is not None:
- if pattern.match(image.NAME):
- images.append(image)
- elif name_pattern == image.NAME:
- images.append(image)
- break
-
- # if the specific name is indicated
- if pattern is None and len(images) == 0:
- module.fail_json(msg="There is no IMAGE with name=" + name_pattern)
-
- return images
-
-
-def get_connection_info(module):
-
- url = module.params.get('api_url')
- username = module.params.get('api_username')
- password = module.params.get('api_password')
-
- if not url:
- url = os.environ.get('ONE_URL')
-
- if not username:
- username = os.environ.get('ONE_USERNAME')
-
- if not password:
- password = os.environ.get('ONE_PASSWORD')
-
- if not(url and username and password):
- module.fail_json(msg="One or more connection parameters (api_url, api_username, api_password) were not specified")
- from collections import namedtuple
-
- auth_params = namedtuple('auth', ('url', 'username', 'password'))
-
- return auth_params(url=url, username=username, password=password)
-
-
-def main():
- fields = {
- "api_url": {"required": False, "type": "str"},
- "api_username": {"required": False, "type": "str"},
- "api_password": {"required": False, "type": "str", "no_log": True},
- "ids": {"required": False, "aliases": ['id'], "type": "list", "elements": "str"},
- "name": {"required": False, "type": "str"},
- }
-
- module = AnsibleModule(argument_spec=fields,
- mutually_exclusive=[['ids', 'name']],
- supports_check_mode=True)
-
- if not HAS_PYONE:
- module.fail_json(msg='This module requires pyone to work!')
-
- auth = get_connection_info(module)
- params = module.params
- ids = params.get('ids')
- name = params.get('name')
- client = pyone.OneServer(auth.url, session=auth.username + ':' + auth.password)
-
- if ids:
- images = get_images_by_ids(module, client, ids)
- elif name:
- images = get_images_by_name(module, client, name)
- else:
- images = get_all_images(client).IMAGE
-
- result = {
- 'images': [get_image_info(image) for image in images],
- }
-
- module.exit_json(**result)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/opennebula/one_service.py b/ansible_collections/community/general/plugins/modules/cloud/opennebula/one_service.py
deleted file mode 100644
index 68f8398f..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/opennebula/one_service.py
+++ /dev/null
@@ -1,768 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-"""
-(c) 2017, Milan Ilic
-
-This file is part of Ansible
-
-Ansible is free software: you can redistribute it and/or modify
-it under the terms of the GNU General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-Ansible is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU General Public License for more details.
-
-You should have received a copy of the GNU General Public License
-along with Ansible. If not, see .
-"""
-
-DOCUMENTATION = '''
----
-module: one_service
-short_description: Deploy and manage OpenNebula services
-description:
- - Manage OpenNebula services
-options:
- api_url:
- description:
- - URL of the OpenNebula OneFlow API server.
- - It is recommended to use HTTPS so that the username/password are not transferred over the network unencrypted.
- - If not set then the value of the ONEFLOW_URL environment variable is used.
- type: str
- api_username:
- description:
- - Name of the user to login into the OpenNebula OneFlow API server. If not set then the value of the C(ONEFLOW_USERNAME) environment variable is used.
- type: str
- api_password:
- description:
- - Password of the user to login into OpenNebula OneFlow API server. If not set then the value of the C(ONEFLOW_PASSWORD) environment variable is used.
- type: str
- template_name:
- description:
- - Name of service template to use to create a new instance of a service
- type: str
- template_id:
- description:
- - ID of a service template to use to create a new instance of a service
- type: int
- service_id:
- description:
- - ID of a service instance that you would like to manage
- type: int
- service_name:
- description:
- - Name of a service instance that you would like to manage
- type: str
- unique:
- description:
- - Setting C(unique=yes) will make sure that there is only one service instance running with a name set with C(service_name) when
- - instantiating a service from a template specified with C(template_id)/C(template_name). Check examples below.
- type: bool
- default: no
- state:
- description:
- - C(present) - instantiate a service from a template specified with C(template_id)/C(template_name).
- - C(absent) - terminate an instance of a service specified with C(service_id)/C(service_name).
- choices: ["present", "absent"]
- default: present
- type: str
- mode:
- description:
- - Set permission mode of a service instance in octet format, e.g. C(600) to give owner C(use) and C(manage) and nothing to group and others.
- type: str
- owner_id:
- description:
- - ID of the user which will be set as the owner of the service
- type: int
- group_id:
- description:
- - ID of the group which will be set as the group of the service
- type: int
- wait:
- description:
- - Wait for the instance to reach RUNNING state after DEPLOYING or COOLDOWN state after SCALING
- type: bool
- default: no
- wait_timeout:
- description:
- - How long before wait gives up, in seconds
- default: 300
- type: int
- custom_attrs:
- description:
- - Dictionary of key/value custom attributes which will be used when instantiating a new service.
- default: {}
- type: dict
- role:
- description:
- - Name of the role whose cardinality should be changed
- type: str
- cardinality:
- description:
- - Number of VMs for the specified role
- type: int
- force:
- description:
- - Force the new cardinality even if it is outside the limits
- type: bool
- default: no
-author:
- - "Milan Ilic (@ilicmilan)"
-'''
-
-EXAMPLES = '''
-- name: Instantiate a new service
- community.general.one_service:
- template_id: 90
- register: result
-
-- name: Print service properties
- ansible.builtin.debug:
- msg: result
-
-- name: Instantiate a new service with specified service_name, service group and mode
- community.general.one_service:
- template_name: 'app1_template'
- service_name: 'app1'
- group_id: 1
- mode: '660'
-
-- name: Instantiate a new service with template_id and pass custom_attrs dict
- community.general.one_service:
- template_id: 90
- custom_attrs:
- public_network_id: 21
- private_network_id: 26
-
-- name: Instantiate a new service 'foo' if the service doesn't already exist, otherwise do nothing
- community.general.one_service:
- template_id: 53
- service_name: 'foo'
- unique: yes
-
-- name: Delete a service by ID
- community.general.one_service:
- service_id: 153
- state: absent
-
-- name: Get service info
- community.general.one_service:
- service_id: 153
- register: service_info
-
-- name: Change service owner, group and mode
- community.general.one_service:
- service_name: 'app2'
- owner_id: 34
- group_id: 113
- mode: '600'
-
-- name: Instantiate service and wait for it to become RUNNING
- community.general.one_service:
- template_id: 43
- service_name: 'foo1'
-
-- name: Wait service to become RUNNING
- community.general.one_service:
- service_id: 112
- wait: yes
-
-- name: Change role cardinality
- community.general.one_service:
- service_id: 153
- role: bar
- cardinality: 5
-
-- name: Change role cardinality and wait for it to be applied
- community.general.one_service:
- service_id: 112
- role: foo
- cardinality: 7
- wait: yes
-'''
-
-RETURN = '''
-service_id:
- description: service id
- type: int
- returned: success
- sample: 153
-service_name:
- description: service name
- type: str
- returned: success
- sample: app1
-group_id:
- description: service's group id
- type: int
- returned: success
- sample: 1
-group_name:
- description: service's group name
- type: str
- returned: success
- sample: one-users
-owner_id:
- description: service's owner id
- type: int
- returned: success
- sample: 143
-owner_name:
- description: service's owner name
- type: str
- returned: success
- sample: ansible-test
-state:
- description: state of service instance
- type: str
- returned: success
- sample: RUNNING
-mode:
- description: service's mode
- type: int
- returned: success
- sample: 660
-roles:
- description: list of dictionaries of roles, each role is described by name, cardinality, state and nodes ids
- type: list
- returned: success
- sample: '[{"cardinality": 1,"name": "foo","state": "RUNNING","ids": [ 123, 456 ]},
- {"cardinality": 2,"name": "bar","state": "RUNNING", "ids": [ 452, 567, 746 ]}]'
-'''
-
-import os
-import sys
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.urls import open_url
-
-STATES = ("PENDING", "DEPLOYING", "RUNNING", "UNDEPLOYING", "WARNING", "DONE",
- "FAILED_UNDEPLOYING", "FAILED_DEPLOYING", "SCALING", "FAILED_SCALING", "COOLDOWN")
-
-
-def get_all_templates(module, auth):
- try:
- all_templates = open_url(url=(auth.url + "/service_template"), method="GET", force_basic_auth=True, url_username=auth.user, url_password=auth.password)
- except Exception as e:
- module.fail_json(msg=str(e))
-
- return module.from_json(all_templates.read())
-
-
-def get_template(module, auth, pred):
- all_templates_dict = get_all_templates(module, auth)
-
- found = 0
- found_template = None
- template_name = ''
-
- if "DOCUMENT_POOL" in all_templates_dict and "DOCUMENT" in all_templates_dict["DOCUMENT_POOL"]:
- for template in all_templates_dict["DOCUMENT_POOL"]["DOCUMENT"]:
- if pred(template):
- found = found + 1
- found_template = template
- template_name = template["NAME"]
-
- if found <= 0:
- return None
- elif found > 1:
- module.fail_json(msg="There is no template with unique name: " + template_name)
- else:
- return found_template
-
-
-def get_all_services(module, auth):
- try:
- response = open_url(auth.url + "/service", method="GET", force_basic_auth=True, url_username=auth.user, url_password=auth.password)
- except Exception as e:
- module.fail_json(msg=str(e))
-
- return module.from_json(response.read())
-
-
-def get_service(module, auth, pred):
- all_services_dict = get_all_services(module, auth)
-
- found = 0
- found_service = None
- service_name = ''
-
- if "DOCUMENT_POOL" in all_services_dict and "DOCUMENT" in all_services_dict["DOCUMENT_POOL"]:
- for service in all_services_dict["DOCUMENT_POOL"]["DOCUMENT"]:
- if pred(service):
- found = found + 1
- found_service = service
- service_name = service["NAME"]
-
- # fail if there are more services with same name
- if found > 1:
- module.fail_json(msg="There are multiple services with a name: '" +
- service_name + "'. You have to use a unique service name or use 'service_id' instead.")
- elif found <= 0:
- return None
- else:
- return found_service
-
-
-def get_service_by_id(module, auth, service_id):
- return get_service(module, auth, lambda service: (int(service["ID"]) == int(service_id))) if service_id else None
-
-
-def get_service_by_name(module, auth, service_name):
- return get_service(module, auth, lambda service: (service["NAME"] == service_name))
-
-
-def get_service_info(module, auth, service):
-
- result = {
- "service_id": int(service["ID"]),
- "service_name": service["NAME"],
- "group_id": int(service["GID"]),
- "group_name": service["GNAME"],
- "owner_id": int(service["UID"]),
- "owner_name": service["UNAME"],
- "state": STATES[service["TEMPLATE"]["BODY"]["state"]]
- }
-
- roles_status = service["TEMPLATE"]["BODY"]["roles"]
- roles = []
- for role in roles_status:
- nodes_ids = []
- if "nodes" in role:
- for node in role["nodes"]:
- nodes_ids.append(node["deploy_id"])
- roles.append({"name": role["name"], "cardinality": role["cardinality"], "state": STATES[int(role["state"])], "ids": nodes_ids})
-
- result["roles"] = roles
- result["mode"] = int(parse_service_permissions(service))
-
- return result
-
-
-def create_service(module, auth, template_id, service_name, custom_attrs, unique, wait, wait_timeout):
- # make sure that the values in custom_attrs dict are strings
- custom_attrs_with_str = dict((k, str(v)) for k, v in custom_attrs.items())
-
- data = {
- "action": {
- "perform": "instantiate",
- "params": {
- "merge_template": {
- "custom_attrs_values": custom_attrs_with_str,
- "name": service_name
- }
- }
- }
- }
-
- try:
- response = open_url(auth.url + "/service_template/" + str(template_id) + "/action", method="POST",
- data=module.jsonify(data), force_basic_auth=True, url_username=auth.user, url_password=auth.password)
- except Exception as e:
- module.fail_json(msg=str(e))
-
- service_result = module.from_json(response.read())["DOCUMENT"]
-
- return service_result
-
-
-def wait_for_service_to_become_ready(module, auth, service_id, wait_timeout):
- import time
- start_time = time.time()
-
- while (time.time() - start_time) < wait_timeout:
- try:
- status_result = open_url(auth.url + "/service/" + str(service_id), method="GET",
- force_basic_auth=True, url_username=auth.user, url_password=auth.password)
- except Exception as e:
- module.fail_json(msg="Request for service status has failed. Error message: " + str(e))
-
- status_result = module.from_json(status_result.read())
- service_state = status_result["DOCUMENT"]["TEMPLATE"]["BODY"]["state"]
-
- if service_state in [STATES.index("RUNNING"), STATES.index("COOLDOWN")]:
- return status_result["DOCUMENT"]
- elif service_state not in [STATES.index("PENDING"), STATES.index("DEPLOYING"), STATES.index("SCALING")]:
- log_message = ''
- for log_info in status_result["DOCUMENT"]["TEMPLATE"]["BODY"]["log"]:
- if log_info["severity"] == "E":
- log_message = log_message + log_info["message"]
- break
-
- module.fail_json(msg="Deploying is unsuccessful. Service state: " + STATES[service_state] + ". Error message: " + log_message)
-
- time.sleep(1)
-
- module.fail_json(msg="Wait timeout has expired")
-
-
-def change_service_permissions(module, auth, service_id, permissions):
-
- data = {
- "action": {
- "perform": "chmod",
- "params": {"octet": permissions}
- }
- }
-
- try:
- status_result = open_url(auth.url + "/service/" + str(service_id) + "/action", method="POST", force_basic_auth=True,
- url_username=auth.user, url_password=auth.password, data=module.jsonify(data))
- except Exception as e:
- module.fail_json(msg=str(e))
-
-
-def change_service_owner(module, auth, service_id, owner_id):
- data = {
- "action": {
- "perform": "chown",
- "params": {"owner_id": owner_id}
- }
- }
-
- try:
- status_result = open_url(auth.url + "/service/" + str(service_id) + "/action", method="POST", force_basic_auth=True,
- url_username=auth.user, url_password=auth.password, data=module.jsonify(data))
- except Exception as e:
- module.fail_json(msg=str(e))
-
-
-def change_service_group(module, auth, service_id, group_id):
-
- data = {
- "action": {
- "perform": "chgrp",
- "params": {"group_id": group_id}
- }
- }
-
- try:
- status_result = open_url(auth.url + "/service/" + str(service_id) + "/action", method="POST", force_basic_auth=True,
- url_username=auth.user, url_password=auth.password, data=module.jsonify(data))
- except Exception as e:
- module.fail_json(msg=str(e))
-
-
-def change_role_cardinality(module, auth, service_id, role, cardinality, force):
-
- data = {
- "cardinality": cardinality,
- "force": force
- }
-
- try:
- status_result = open_url(auth.url + "/service/" + str(service_id) + "/role/" + role, method="PUT",
- force_basic_auth=True, url_username=auth.user, url_password=auth.password, data=module.jsonify(data))
- except Exception as e:
- module.fail_json(msg=str(e))
-
- if status_result.getcode() != 204:
- module.fail_json(msg="Failed to change cardinality for role: " + role + ". Return code: " + str(status_result.getcode()))
-
-
-def check_change_service_owner(module, service, owner_id):
- old_owner_id = int(service["UID"])
-
- return old_owner_id != owner_id
-
-
-def check_change_service_group(module, service, group_id):
- old_group_id = int(service["GID"])
-
- return old_group_id != group_id
-
-
-def parse_service_permissions(service):
- perm_dict = service["PERMISSIONS"]
- '''
- This is the structure of the 'PERMISSIONS' dictionary:
-
- "PERMISSIONS": {
- "OWNER_U": "1",
- "OWNER_M": "1",
- "OWNER_A": "0",
- "GROUP_U": "0",
- "GROUP_M": "0",
- "GROUP_A": "0",
- "OTHER_U": "0",
- "OTHER_M": "0",
- "OTHER_A": "0"
- }
- '''
-
- owner_octal = int(perm_dict["OWNER_U"]) * 4 + int(perm_dict["OWNER_M"]) * 2 + int(perm_dict["OWNER_A"])
- group_octal = int(perm_dict["GROUP_U"]) * 4 + int(perm_dict["GROUP_M"]) * 2 + int(perm_dict["GROUP_A"])
- other_octal = int(perm_dict["OTHER_U"]) * 4 + int(perm_dict["OTHER_M"]) * 2 + int(perm_dict["OTHER_A"])
-
- permissions = str(owner_octal) + str(group_octal) + str(other_octal)
-
- return permissions
-
-
-def check_change_service_permissions(module, service, permissions):
- old_permissions = parse_service_permissions(service)
-
- return old_permissions != permissions
-
-
-def check_change_role_cardinality(module, service, role_name, cardinality):
- roles_list = service["TEMPLATE"]["BODY"]["roles"]
-
- for role in roles_list:
- if role["name"] == role_name:
- return int(role["cardinality"]) != cardinality
-
- module.fail_json(msg="There is no role with name: " + role_name)
-
-
-def create_service_and_operation(module, auth, template_id, service_name, owner_id, group_id, permissions, custom_attrs, unique, wait, wait_timeout):
- if not service_name:
- service_name = ''
- changed = False
- service = None
-
- if unique:
- service = get_service_by_name(module, auth, service_name)
-
- if not service:
- if not module.check_mode:
- service = create_service(module, auth, template_id, service_name, custom_attrs, unique, wait, wait_timeout)
- changed = True
-
- # if check_mode=true and there would be changes, service doesn't exist and we can not get it
- if module.check_mode and changed:
- return {"changed": True}
-
- result = service_operation(module, auth, owner_id=owner_id, group_id=group_id, wait=wait,
- wait_timeout=wait_timeout, permissions=permissions, service=service)
-
- if result["changed"]:
- changed = True
-
- result["changed"] = changed
-
- return result
-
-
-def service_operation(module, auth, service_id=None, owner_id=None, group_id=None, permissions=None,
- role=None, cardinality=None, force=None, wait=False, wait_timeout=None, service=None):
-
- changed = False
-
- if not service:
- service = get_service_by_id(module, auth, service_id)
- else:
- service_id = service["ID"]
-
- if not service:
- module.fail_json(msg="There is no service with id: " + str(service_id))
-
- if owner_id:
- if check_change_service_owner(module, service, owner_id):
- if not module.check_mode:
- change_service_owner(module, auth, service_id, owner_id)
- changed = True
- if group_id:
- if check_change_service_group(module, service, group_id):
- if not module.check_mode:
- change_service_group(module, auth, service_id, group_id)
- changed = True
- if permissions:
- if check_change_service_permissions(module, service, permissions):
- if not module.check_mode:
- change_service_permissions(module, auth, service_id, permissions)
- changed = True
-
- if role:
- if check_change_role_cardinality(module, service, role, cardinality):
- if not module.check_mode:
- change_role_cardinality(module, auth, service_id, role, cardinality, force)
- changed = True
-
- if wait and not module.check_mode:
- service = wait_for_service_to_become_ready(module, auth, service_id, wait_timeout)
-
- # if something has changed, fetch service info again
- if changed:
- service = get_service_by_id(module, auth, service_id)
-
- service_info = get_service_info(module, auth, service)
- service_info["changed"] = changed
-
- return service_info
-
-
-def delete_service(module, auth, service_id):
- service = get_service_by_id(module, auth, service_id)
- if not service:
- return {"changed": False}
-
- service_info = get_service_info(module, auth, service)
-
- service_info["changed"] = True
-
- if module.check_mode:
- return service_info
-
- try:
- result = open_url(auth.url + '/service/' + str(service_id), method="DELETE", force_basic_auth=True, url_username=auth.user, url_password=auth.password)
- except Exception as e:
- module.fail_json(msg="Service deletion has failed. Error message: " + str(e))
-
- return service_info
-
-
-def get_template_by_name(module, auth, template_name):
- return get_template(module, auth, lambda template: (template["NAME"] == template_name))
-
-
-def get_template_by_id(module, auth, template_id):
- return get_template(module, auth, lambda template: (int(template["ID"]) == int(template_id))) if template_id else None
-
-
-def get_template_id(module, auth, requested_id, requested_name):
- template = get_template_by_id(module, auth, requested_id) if requested_id else get_template_by_name(module, auth, requested_name)
-
- if template:
- return template["ID"]
-
- return None
-
-
-def get_service_id_by_name(module, auth, service_name):
- service = get_service_by_name(module, auth, service_name)
-
- if service:
- return service["ID"]
-
- return None
-
-
-def get_connection_info(module):
-
- url = module.params.get('api_url')
- username = module.params.get('api_username')
- password = module.params.get('api_password')
-
- if not url:
- url = os.environ.get('ONEFLOW_URL')
-
- if not username:
- username = os.environ.get('ONEFLOW_USERNAME')
-
- if not password:
- password = os.environ.get('ONEFLOW_PASSWORD')
-
- if not(url and username and password):
- module.fail_json(msg="One or more connection parameters (api_url, api_username, api_password) were not specified")
- from collections import namedtuple
-
- auth_params = namedtuple('auth', ('url', 'user', 'password'))
-
- return auth_params(url=url, user=username, password=password)
-
-
-def main():
- fields = {
- "api_url": {"required": False, "type": "str"},
- "api_username": {"required": False, "type": "str"},
- "api_password": {"required": False, "type": "str", "no_log": True},
- "service_name": {"required": False, "type": "str"},
- "service_id": {"required": False, "type": "int"},
- "template_name": {"required": False, "type": "str"},
- "template_id": {"required": False, "type": "int"},
- "state": {
- "default": "present",
- "choices": ['present', 'absent'],
- "type": "str"
- },
- "mode": {"required": False, "type": "str"},
- "owner_id": {"required": False, "type": "int"},
- "group_id": {"required": False, "type": "int"},
- "unique": {"default": False, "type": "bool"},
- "wait": {"default": False, "type": "bool"},
- "wait_timeout": {"default": 300, "type": "int"},
- "custom_attrs": {"default": {}, "type": "dict"},
- "role": {"required": False, "type": "str"},
- "cardinality": {"required": False, "type": "int"},
- "force": {"default": False, "type": "bool"}
- }
-
- module = AnsibleModule(argument_spec=fields,
- mutually_exclusive=[
- ['template_id', 'template_name', 'service_id'],
- ['service_id', 'service_name'],
- ['template_id', 'template_name', 'role'],
- ['template_id', 'template_name', 'cardinality'],
- ['service_id', 'custom_attrs']
- ],
- required_together=[['role', 'cardinality']],
- supports_check_mode=True)
-
- auth = get_connection_info(module)
- params = module.params
- service_name = params.get('service_name')
- service_id = params.get('service_id')
-
- requested_template_id = params.get('template_id')
- requested_template_name = params.get('template_name')
- state = params.get('state')
- permissions = params.get('mode')
- owner_id = params.get('owner_id')
- group_id = params.get('group_id')
- unique = params.get('unique')
- wait = params.get('wait')
- wait_timeout = params.get('wait_timeout')
- custom_attrs = params.get('custom_attrs')
- role = params.get('role')
- cardinality = params.get('cardinality')
- force = params.get('force')
-
- template_id = None
-
- if requested_template_id or requested_template_name:
- template_id = get_template_id(module, auth, requested_template_id, requested_template_name)
- if not template_id:
- if requested_template_id:
- module.fail_json(msg="There is no template with template_id: " + str(requested_template_id))
- elif requested_template_name:
- module.fail_json(msg="There is no template with name: " + requested_template_name)
-
- if unique and not service_name:
- module.fail_json(msg="You cannot use unique without passing service_name!")
-
- if template_id and state == 'absent':
- module.fail_json(msg="State absent is not valid for template")
-
- if template_id and state == 'present': # Instantiate a service
- result = create_service_and_operation(module, auth, template_id, service_name, owner_id,
- group_id, permissions, custom_attrs, unique, wait, wait_timeout)
- else:
- if not (service_id or service_name):
- module.fail_json(msg="To manage the service at least the service id or service name should be specified!")
- if custom_attrs:
- module.fail_json(msg="You can only set custom_attrs when instantiate service!")
-
- if not service_id:
- service_id = get_service_id_by_name(module, auth, service_name)
- # The task should be failed when we want to manage a non-existent service identified by its name
- if not service_id and state == 'present':
- module.fail_json(msg="There is no service with name: " + service_name)
-
- if state == 'absent':
- result = delete_service(module, auth, service_id)
- else:
- result = service_operation(module, auth, service_id, owner_id, group_id, permissions, role, cardinality, force, wait, wait_timeout)
-
- module.exit_json(**result)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/opennebula/one_template.py b/ansible_collections/community/general/plugins/modules/cloud/opennebula/one_template.py
deleted file mode 100644
index b1d2c69c..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/opennebula/one_template.py
+++ /dev/null
@@ -1,277 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright: (c) 2021, Georg Gadinger
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-DOCUMENTATION = '''
----
-module: one_template
-
-short_description: Manages OpenNebula templates
-
-version_added: 2.4.0
-
-requirements:
- - pyone
-
-description:
- - "Manages OpenNebula templates."
-
-options:
- id:
- description:
- - A I(id) of the template you would like to manage. If not set then a
- - new template will be created with the given I(name).
- type: int
- name:
- description:
- - A I(name) of the template you would like to manage. If a template with
- - the given name does not exist it will be created, otherwise it will be
- - managed by this module.
- type: str
- template:
- description:
- - A string containing the template contents.
- type: str
- state:
- description:
- - C(present) - state that is used to manage the template.
- - C(absent) - delete the template.
- choices: ["present", "absent"]
- default: present
- type: str
-
-notes:
- - Supports C(check_mode). Note that check mode always returns C(changed=true) for existing templates, even if the template would not actually change.
-
-extends_documentation_fragment:
- - community.general.opennebula
-
-author:
- - "Georg Gadinger (@nilsding)"
-'''
-
-EXAMPLES = '''
-- name: Fetch the TEMPLATE by id
- community.general.one_template:
- id: 6459
- register: result
-
-- name: Print the TEMPLATE properties
- ansible.builtin.debug:
- var: result
-
-- name: Fetch the TEMPLATE by name
- community.general.one_template:
- name: tf-prd-users-workerredis-p6379a
- register: result
-
-- name: Create a new or update an existing TEMPLATE
- community.general.one_template:
- name: generic-opensuse
- template: |
- CONTEXT = [
- HOSTNAME = "generic-opensuse"
- ]
- CPU = "1"
- CUSTOM_ATTRIBUTE = ""
- DISK = [
- CACHE = "writeback",
- DEV_PREFIX = "sd",
- DISCARD = "unmap",
- IMAGE = "opensuse-leap-15.2",
- IMAGE_UNAME = "oneadmin",
- IO = "threads",
- SIZE = "" ]
- MEMORY = "2048"
- NIC = [
- MODEL = "virtio",
- NETWORK = "testnet",
- NETWORK_UNAME = "oneadmin" ]
- OS = [
- ARCH = "x86_64",
- BOOT = "disk0" ]
- SCHED_REQUIREMENTS = "CLUSTER_ID=\\"100\\""
- VCPU = "2"
-
-- name: Delete the TEMPLATE by id
- community.general.one_template:
- id: 6459
- state: absent
-'''
-
-RETURN = '''
-id:
- description: template id
- type: int
- returned: when I(state=present)
- sample: 153
-name:
- description: template name
- type: str
- returned: when I(state=present)
- sample: app1
-template:
- description: the parsed template
- type: dict
- returned: when I(state=present)
-group_id:
- description: template's group id
- type: int
- returned: when I(state=present)
- sample: 1
-group_name:
- description: template's group name
- type: str
- returned: when I(state=present)
- sample: one-users
-owner_id:
- description: template's owner id
- type: int
- returned: when I(state=present)
- sample: 143
-owner_name:
- description: template's owner name
- type: str
- returned: when I(state=present)
- sample: ansible-test
-'''
-
-
-from ansible_collections.community.general.plugins.module_utils.opennebula import OpenNebulaModule
-
-
-class TemplateModule(OpenNebulaModule):
- def __init__(self):
- argument_spec = dict(
- id=dict(type='int', required=False),
- name=dict(type='str', required=False),
- state=dict(type='str', choices=['present', 'absent'], default='present'),
- template=dict(type='str', required=False),
- )
-
- mutually_exclusive = [
- ['id', 'name']
- ]
-
- required_one_of = [('id', 'name')]
-
- required_if = [
- ['state', 'present', ['template']]
- ]
-
- OpenNebulaModule.__init__(self,
- argument_spec,
- supports_check_mode=True,
- mutually_exclusive=mutually_exclusive,
- required_one_of=required_one_of,
- required_if=required_if)
-
- def run(self, one, module, result):
- params = module.params
- id = params.get('id')
- name = params.get('name')
- desired_state = params.get('state')
- template_data = params.get('template')
-
- self.result = {}
-
- template = self.get_template_instance(id, name)
- needs_creation = False
- if not template and desired_state != 'absent':
- if id:
- module.fail_json(msg="There is no template with id=" + str(id))
- else:
- needs_creation = True
-
- if desired_state == 'absent':
- self.result = self.delete_template(template)
- else:
- if needs_creation:
- self.result = self.create_template(name, template_data)
- else:
- self.result = self.update_template(template, template_data)
-
- self.exit()
-
- def get_template(self, predicate):
- # -3 means "Resources belonging to the user"
- # the other two parameters are used for pagination, -1 for both essentially means "return all"
- pool = self.one.templatepool.info(-3, -1, -1)
-
- for template in pool.VMTEMPLATE:
- if predicate(template):
- return template
-
- return None
-
- def get_template_by_id(self, template_id):
- return self.get_template(lambda template: (template.ID == template_id))
-
- def get_template_by_name(self, name):
- return self.get_template(lambda template: (template.NAME == name))
-
- def get_template_instance(self, requested_id, requested_name):
- if requested_id:
- return self.get_template_by_id(requested_id)
- else:
- return self.get_template_by_name(requested_name)
-
- def get_template_info(self, template):
- info = {
- 'id': template.ID,
- 'name': template.NAME,
- 'template': template.TEMPLATE,
- 'user_name': template.UNAME,
- 'user_id': template.UID,
- 'group_name': template.GNAME,
- 'group_id': template.GID,
- }
-
- return info
-
- def create_template(self, name, template_data):
- if not self.module.check_mode:
- self.one.template.allocate("NAME = \"" + name + "\"\n" + template_data)
-
- result = self.get_template_info(self.get_template_by_name(name))
- result['changed'] = True
-
- return result
-
- def update_template(self, template, template_data):
- if not self.module.check_mode:
- # 0 = replace the whole template
- self.one.template.update(template.ID, template_data, 0)
-
- result = self.get_template_info(self.get_template_by_id(template.ID))
- if self.module.check_mode:
- # Unfortunately it is not easy to detect if the template would have changed, therefore always report a change here.
- result['changed'] = True
- else:
- # if the previous parsed template data is not equal to the updated one, this has changed
- result['changed'] = template.TEMPLATE != result['template']
-
- return result
-
- def delete_template(self, template):
- if not template:
- return {'changed': False}
-
- if not self.module.check_mode:
- self.one.template.delete(template.ID)
-
- return {'changed': True}
-
-
-def main():
- TemplateModule().run_module()
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/opennebula/one_vm.py b/ansible_collections/community/general/plugins/modules/cloud/opennebula/one_vm.py
deleted file mode 100644
index 86061f73..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/opennebula/one_vm.py
+++ /dev/null
@@ -1,1635 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-"""
-(c) 2017, Milan Ilic
-(c) 2019, Jan Meerkamp
-
-This file is part of Ansible
-
-Ansible is free software: you can redistribute it and/or modify
-it under the terms of the GNU General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-Ansible is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU General Public License for more details.
-
-You should have received a copy of the GNU General Public License
-along with Ansible. If not, see .
-"""
-
-DOCUMENTATION = '''
----
-module: one_vm
-short_description: Creates or terminates OpenNebula instances
-description:
- - Manages OpenNebula instances
-requirements:
- - pyone
-options:
- api_url:
- description:
- - URL of the OpenNebula RPC server.
- - It is recommended to use HTTPS so that the username/password are not
- - transferred over the network unencrypted.
- - If not set then the value of the C(ONE_URL) environment variable is used.
- type: str
- api_username:
- description:
- - Name of the user to login into the OpenNebula RPC server. If not set
- - then the value of the C(ONE_USERNAME) environment variable is used.
- type: str
- api_password:
- description:
- - Password of the user to login into OpenNebula RPC server. If not set
- - then the value of the C(ONE_PASSWORD) environment variable is used.
- - if both I(api_username) or I(api_password) are not set, then it will try
- - authenticate with ONE auth file. Default path is "~/.one/one_auth".
- - Set environment variable C(ONE_AUTH) to override this path.
- type: str
- template_name:
- description:
- - Name of VM template to use to create a new instace
- type: str
- template_id:
- description:
- - ID of a VM template to use to create a new instance
- type: int
- vm_start_on_hold:
- description:
- - Set to true to put vm on hold while creating
- default: False
- type: bool
- instance_ids:
- description:
- - A list of instance ids used for states':' C(absent), C(running), C(rebooted), C(poweredoff)
- aliases: ['ids']
- type: list
- elements: int
- state:
- description:
- - C(present) - create instances from a template specified with C(template_id)/C(template_name).
- - C(running) - run instances
- - C(poweredoff) - power-off instances
- - C(rebooted) - reboot instances
- - C(absent) - terminate instances
- choices: ["present", "absent", "running", "rebooted", "poweredoff"]
- default: present
- type: str
- hard:
- description:
- - Reboot, power-off or terminate instances C(hard)
- default: no
- type: bool
- wait:
- description:
- - Wait for the instance to reach its desired state before returning. Keep
- - in mind if you are waiting for instance to be in running state it
- - doesn't mean that you will be able to SSH on that machine only that
- - boot process have started on that instance, see 'wait_for' example for
- - details.
- default: yes
- type: bool
- wait_timeout:
- description:
- - How long before wait gives up, in seconds
- default: 300
- type: int
- attributes:
- description:
- - A dictionary of key/value attributes to add to new instances, or for
- - setting C(state) of instances with these attributes.
- - Keys are case insensitive and OpenNebula automatically converts them to upper case.
- - Be aware C(NAME) is a special attribute which sets the name of the VM when it's deployed.
- - C(#) character(s) can be appended to the C(NAME) and the module will automatically add
- - indexes to the names of VMs.
- - For example':' C(NAME':' foo-###) would create VMs with names C(foo-000), C(foo-001),...
- - When used with C(count_attributes) and C(exact_count) the module will
- - match the base name without the index part.
- default: {}
- type: dict
- labels:
- description:
- - A list of labels to associate with new instances, or for setting
- - C(state) of instances with these labels.
- default: []
- type: list
- elements: str
- count_attributes:
- description:
- - A dictionary of key/value attributes that can only be used with
- - C(exact_count) to determine how many nodes based on a specific
- - attributes criteria should be deployed. This can be expressed in
- - multiple ways and is shown in the EXAMPLES section.
- type: dict
- count_labels:
- description:
- - A list of labels that can only be used with C(exact_count) to determine
- - how many nodes based on a specific labels criteria should be deployed.
- - This can be expressed in multiple ways and is shown in the EXAMPLES
- - section.
- type: list
- elements: str
- count:
- description:
- - Number of instances to launch
- default: 1
- type: int
- exact_count:
- description:
- - Indicates how many instances that match C(count_attributes) and
- - C(count_labels) parameters should be deployed. Instances are either
- - created or terminated based on this value.
- - NOTE':' Instances with the least IDs will be terminated first.
- type: int
- mode:
- description:
- - Set permission mode of the instance in octet format, e.g. C(600) to give owner C(use) and C(manage) and nothing to group and others.
- type: str
- owner_id:
- description:
- - ID of the user which will be set as the owner of the instance
- type: int
- group_id:
- description:
- - ID of the group which will be set as the group of the instance
- type: int
- memory:
- description:
- - The size of the memory for new instances (in MB, GB, ...)
- type: str
- disk_size:
- description:
- - The size of the disk created for new instances (in MB, GB, TB,...).
- - NOTE':' If The Template hats Multiple Disks the Order of the Sizes is
- - matched against the order specified in C(template_id)/C(template_name).
- type: list
- elements: str
- cpu:
- description:
- - Percentage of CPU divided by 100 required for the new instance. Half a
- - processor is written 0.5.
- type: float
- vcpu:
- description:
- - Number of CPUs (cores) new VM will have.
- type: int
- networks:
- description:
- - A list of dictionaries with network parameters. See examples for more details.
- default: []
- type: list
- elements: dict
- disk_saveas:
- description:
- - Creates an image from a VM disk.
- - It is a dictionary where you have to specify C(name) of the new image.
- - Optionally you can specify C(disk_id) of the disk you want to save. By default C(disk_id) is 0.
- - I(NOTE)':' This operation will only be performed on the first VM (if more than one VM ID is passed)
- - and the VM has to be in the C(poweredoff) state.
- - Also this operation will fail if an image with specified C(name) already exists.
- type: dict
- persistent:
- description:
- - Create a private persistent copy of the template plus any image defined in DISK, and instantiate that copy.
- default: NO
- type: bool
- version_added: '0.2.0'
- datastore_id:
- description:
- - Name of Datastore to use to create a new instace
- version_added: '0.2.0'
- type: int
- datastore_name:
- description:
- - Name of Datastore to use to create a new instace
- version_added: '0.2.0'
- type: str
-author:
- - "Milan Ilic (@ilicmilan)"
- - "Jan Meerkamp (@meerkampdvv)"
-'''
-
-
-EXAMPLES = '''
-- name: Create a new instance
- community.general.one_vm:
- template_id: 90
- register: result
-
-- name: Print VM properties
- ansible.builtin.debug:
- msg: result
-
-- name: Deploy a new VM on hold
- community.general.one_vm:
- template_name: 'app1_template'
- vm_start_on_hold: 'True'
-
-- name: Deploy a new VM and set its name to 'foo'
- community.general.one_vm:
- template_name: 'app1_template'
- attributes:
- name: foo
-
-- name: Deploy a new VM and set its group_id and mode
- community.general.one_vm:
- template_id: 90
- group_id: 16
- mode: 660
-
-- name: Deploy a new VM as persistent
- community.general.one_vm:
- template_id: 90
- persistent: yes
-
-- name: Change VM's permissions to 640
- community.general.one_vm:
- instance_ids: 5
- mode: 640
-
-- name: Deploy 2 new instances and set memory, vcpu, disk_size and 3 networks
- community.general.one_vm:
- template_id: 15
- disk_size: 35.2 GB
- memory: 4 GB
- vcpu: 4
- count: 2
- networks:
- - NETWORK_ID: 27
- - NETWORK: "default-network"
- NETWORK_UNAME: "app-user"
- SECURITY_GROUPS: "120,124"
- - NETWORK_ID: 27
- SECURITY_GROUPS: "10"
-
-- name: Deploy a new instance which uses a Template with two Disks
- community.general.one_vm:
- template_id: 42
- disk_size:
- - 35.2 GB
- - 50 GB
- memory: 4 GB
- vcpu: 4
- count: 1
- networks:
- - NETWORK_ID: 27
-
-- name: "Deploy an new instance with attribute 'bar: bar1' and set its name to 'foo'"
- community.general.one_vm:
- template_id: 53
- attributes:
- name: foo
- bar: bar1
-
-- name: "Enforce that 2 instances with attributes 'foo1: app1' and 'foo2: app2' are deployed"
- community.general.one_vm:
- template_id: 53
- attributes:
- foo1: app1
- foo2: app2
- exact_count: 2
- count_attributes:
- foo1: app1
- foo2: app2
-
-- name: Enforce that 4 instances with an attribute 'bar' are deployed
- community.general.one_vm:
- template_id: 53
- attributes:
- name: app
- bar: bar2
- exact_count: 4
- count_attributes:
- bar:
-
-# Deploy 2 new instances with attribute 'foo: bar' and labels 'app1' and 'app2' and names in format 'fooapp-##'
-# Names will be: fooapp-00 and fooapp-01
-- name: Deploy 2 new instances
- community.general.one_vm:
- template_id: 53
- attributes:
- name: fooapp-##
- foo: bar
- labels:
- - app1
- - app2
- count: 2
-
-# Deploy 2 new instances with attribute 'app: app1' and names in format 'fooapp-###'
-# Names will be: fooapp-002 and fooapp-003
-- name: Deploy 2 new instances
- community.general.one_vm:
- template_id: 53
- attributes:
- name: fooapp-###
- app: app1
- count: 2
-
-# Reboot all instances with name in format 'fooapp-#'
-# Instances 'fooapp-00', 'fooapp-01', 'fooapp-002' and 'fooapp-003' will be rebooted
-- name: Reboot all instances with names in a certain format
- community.general.one_vm:
- attributes:
- name: fooapp-#
- state: rebooted
-
-# Enforce that only 1 instance with name in format 'fooapp-#' is deployed
-# The task will delete oldest instances, so only the 'fooapp-003' will remain
-- name: Enforce that only 1 instance with name in a certain format is deployed
- community.general.one_vm:
- template_id: 53
- exact_count: 1
- count_attributes:
- name: fooapp-#
-
-- name: Deploy an new instance with a network
- community.general.one_vm:
- template_id: 53
- networks:
- - NETWORK_ID: 27
- register: vm
-
-- name: Wait for SSH to come up
- ansible.builtin.wait_for_connection:
- delegate_to: '{{ vm.instances[0].networks[0].ip }}'
-
-- name: Terminate VMs by ids
- community.general.one_vm:
- instance_ids:
- - 153
- - 160
- state: absent
-
-- name: Reboot all VMs that have labels 'foo' and 'app1'
- community.general.one_vm:
- labels:
- - foo
- - app1
- state: rebooted
-
-- name: "Fetch all VMs that have name 'foo' and attribute 'app: bar'"
- community.general.one_vm:
- attributes:
- name: foo
- app: bar
- register: results
-
-- name: Deploy 2 new instances with labels 'foo1' and 'foo2'
- community.general.one_vm:
- template_name: app_template
- labels:
- - foo1
- - foo2
- count: 2
-
-- name: Enforce that only 1 instance with label 'foo1' will be running
- community.general.one_vm:
- template_name: app_template
- labels:
- - foo1
- exact_count: 1
- count_labels:
- - foo1
-
-- name: Terminate all instances that have attribute foo
- community.general.one_vm:
- template_id: 53
- exact_count: 0
- count_attributes:
- foo:
-
-- name: "Power-off the VM and save VM's disk with id=0 to the image with name 'foo-image'"
- community.general.one_vm:
- instance_ids: 351
- state: poweredoff
- disk_saveas:
- name: foo-image
-
-- name: "Save VM's disk with id=1 to the image with name 'bar-image'"
- community.general.one_vm:
- instance_ids: 351
- disk_saveas:
- name: bar-image
- disk_id: 1
-'''
-
-RETURN = '''
-instances_ids:
- description: a list of instances ids whose state is changed or which are fetched with C(instance_ids) option.
- type: list
- returned: success
- sample: [ 1234, 1235 ]
-instances:
- description: a list of instances info whose state is changed or which are fetched with C(instance_ids) option.
- type: complex
- returned: success
- contains:
- vm_id:
- description: vm id
- type: int
- sample: 153
- vm_name:
- description: vm name
- type: str
- sample: foo
- template_id:
- description: vm's template id
- type: int
- sample: 153
- group_id:
- description: vm's group id
- type: int
- sample: 1
- group_name:
- description: vm's group name
- type: str
- sample: one-users
- owner_id:
- description: vm's owner id
- type: int
- sample: 143
- owner_name:
- description: vm's owner name
- type: str
- sample: app-user
- mode:
- description: vm's mode
- type: str
- returned: success
- sample: 660
- state:
- description: state of an instance
- type: str
- sample: ACTIVE
- lcm_state:
- description: lcm state of an instance that is only relevant when the state is ACTIVE
- type: str
- sample: RUNNING
- cpu:
- description: Percentage of CPU divided by 100
- type: float
- sample: 0.2
- vcpu:
- description: Number of CPUs (cores)
- type: int
- sample: 2
- memory:
- description: The size of the memory in MB
- type: str
- sample: 4096 MB
- disk_size:
- description: The size of the disk in MB
- type: str
- sample: 20480 MB
- networks:
- description: a list of dictionaries with info about IP, NAME, MAC, SECURITY_GROUPS for each NIC
- type: list
- sample: [
- {
- "ip": "10.120.5.33",
- "mac": "02:00:0a:78:05:21",
- "name": "default-test-private",
- "security_groups": "0,10"
- },
- {
- "ip": "10.120.5.34",
- "mac": "02:00:0a:78:05:22",
- "name": "default-test-private",
- "security_groups": "0"
- }
- ]
- uptime_h:
- description: Uptime of the instance in hours
- type: int
- sample: 35
- labels:
- description: A list of string labels that are associated with the instance
- type: list
- sample: [
- "foo",
- "spec-label"
- ]
- attributes:
- description: A dictionary of key/values attributes that are associated with the instance
- type: dict
- sample: {
- "HYPERVISOR": "kvm",
- "LOGO": "images/logos/centos.png",
- "TE_GALAXY": "bar",
- "USER_INPUTS": null
- }
-tagged_instances:
- description:
- - A list of instances info based on a specific attributes and/or
- - labels that are specified with C(count_attributes) and C(count_labels)
- - options.
- type: complex
- returned: success
- contains:
- vm_id:
- description: vm id
- type: int
- sample: 153
- vm_name:
- description: vm name
- type: str
- sample: foo
- template_id:
- description: vm's template id
- type: int
- sample: 153
- group_id:
- description: vm's group id
- type: int
- sample: 1
- group_name:
- description: vm's group name
- type: str
- sample: one-users
- owner_id:
- description: vm's user id
- type: int
- sample: 143
- owner_name:
- description: vm's user name
- type: str
- sample: app-user
- mode:
- description: vm's mode
- type: str
- returned: success
- sample: 660
- state:
- description: state of an instance
- type: str
- sample: ACTIVE
- lcm_state:
- description: lcm state of an instance that is only relevant when the state is ACTIVE
- type: str
- sample: RUNNING
- cpu:
- description: Percentage of CPU divided by 100
- type: float
- sample: 0.2
- vcpu:
- description: Number of CPUs (cores)
- type: int
- sample: 2
- memory:
- description: The size of the memory in MB
- type: str
- sample: 4096 MB
- disk_size:
- description: The size of the disk in MB
- type: list
- sample: [
- "20480 MB",
- "10240 MB"
- ]
- networks:
- description: a list of dictionaries with info about IP, NAME, MAC, SECURITY_GROUPS for each NIC
- type: list
- sample: [
- {
- "ip": "10.120.5.33",
- "mac": "02:00:0a:78:05:21",
- "name": "default-test-private",
- "security_groups": "0,10"
- },
- {
- "ip": "10.120.5.34",
- "mac": "02:00:0a:78:05:22",
- "name": "default-test-private",
- "security_groups": "0"
- }
- ]
- uptime_h:
- description: Uptime of the instance in hours
- type: int
- sample: 35
- labels:
- description: A list of string labels that are associated with the instance
- type: list
- sample: [
- "foo",
- "spec-label"
- ]
- attributes:
- description: A dictionary of key/values attributes that are associated with the instance
- type: dict
- sample: {
- "HYPERVISOR": "kvm",
- "LOGO": "images/logos/centos.png",
- "TE_GALAXY": "bar",
- "USER_INPUTS": null
- }
-'''
-
-try:
- import pyone
- HAS_PYONE = True
-except ImportError:
- HAS_PYONE = False
-
-from ansible.module_utils.basic import AnsibleModule
-import os
-
-
-def get_template(module, client, predicate):
-
- pool = client.templatepool.info(-2, -1, -1, -1)
- # Filter -2 means fetch all templates user can Use
- found = 0
- found_template = None
- template_name = ''
-
- for template in pool.VMTEMPLATE:
- if predicate(template):
- found = found + 1
- found_template = template
- template_name = template.NAME
-
- if found == 0:
- return None
- elif found > 1:
- module.fail_json(msg='There are more templates with name: ' + template_name)
- return found_template
-
-
-def get_template_by_name(module, client, template_name):
- return get_template(module, client, lambda template: (template.NAME == template_name))
-
-
-def get_template_by_id(module, client, template_id):
- return get_template(module, client, lambda template: (template.ID == template_id))
-
-
-def get_template_id(module, client, requested_id, requested_name):
- template = get_template_by_id(module, client, requested_id) if requested_id is not None else get_template_by_name(module, client, requested_name)
- if template:
- return template.ID
- else:
- return None
-
-
-def get_datastore(module, client, predicate):
- pool = client.datastorepool.info()
- found = 0
- found_datastore = None
- datastore_name = ''
-
- for datastore in pool.DATASTORE:
- if predicate(datastore):
- found = found + 1
- found_datastore = datastore
- datastore_name = datastore.NAME
-
- if found == 0:
- return None
- elif found > 1:
- module.fail_json(msg='There are more datastores with name: ' + datastore_name)
- return found_datastore
-
-
-def get_datastore_by_name(module, client, datastore_name):
- return get_datastore(module, client, lambda datastore: (datastore.NAME == datastore_name))
-
-
-def get_datastore_by_id(module, client, datastore_id):
- return get_datastore(module, client, lambda datastore: (datastore.ID == datastore_id))
-
-
-def get_datastore_id(module, client, requested_id, requested_name):
- datastore = get_datastore_by_id(module, client, requested_id) if requested_id else get_datastore_by_name(module, client, requested_name)
- if datastore:
- return datastore.ID
- else:
- return None
-
-
-def get_vm_by_id(client, vm_id):
- try:
- vm = client.vm.info(int(vm_id))
- except BaseException:
- return None
- return vm
-
-
-def get_vms_by_ids(module, client, state, ids):
- vms = []
-
- for vm_id in ids:
- vm = get_vm_by_id(client, vm_id)
- if vm is None and state != 'absent':
- module.fail_json(msg='There is no VM with id=' + str(vm_id))
- vms.append(vm)
-
- return vms
-
-
-def get_vm_info(client, vm):
-
- vm = client.vm.info(vm.ID)
-
- networks_info = []
-
- disk_size = []
- if 'DISK' in vm.TEMPLATE:
- if isinstance(vm.TEMPLATE['DISK'], list):
- for disk in vm.TEMPLATE['DISK']:
- disk_size.append(disk['SIZE'] + ' MB')
- else:
- disk_size.append(vm.TEMPLATE['DISK']['SIZE'] + ' MB')
-
- if 'NIC' in vm.TEMPLATE:
- if isinstance(vm.TEMPLATE['NIC'], list):
- for nic in vm.TEMPLATE['NIC']:
- networks_info.append({
- 'ip': nic.get('IP', ''),
- 'mac': nic.get('MAC', ''),
- 'name': nic.get('NETWORK', ''),
- 'security_groups': nic.get('SECURITY_GROUPS', '')
- })
- else:
- networks_info.append({
- 'ip': vm.TEMPLATE['NIC'].get('IP', ''),
- 'mac': vm.TEMPLATE['NIC'].get('MAC', ''),
- 'name': vm.TEMPLATE['NIC'].get('NETWORK', ''),
- 'security_groups':
- vm.TEMPLATE['NIC'].get('SECURITY_GROUPS', '')
- })
- import time
-
- current_time = time.localtime()
- vm_start_time = time.localtime(vm.STIME)
-
- vm_uptime = time.mktime(current_time) - time.mktime(vm_start_time)
- vm_uptime /= (60 * 60)
-
- permissions_str = parse_vm_permissions(client, vm)
-
- # LCM_STATE is VM's sub-state that is relevant only when STATE is ACTIVE
- vm_lcm_state = None
- if vm.STATE == VM_STATES.index('ACTIVE'):
- vm_lcm_state = LCM_STATES[vm.LCM_STATE]
-
- vm_labels, vm_attributes = get_vm_labels_and_attributes_dict(client, vm.ID)
-
- info = {
- 'template_id': int(vm.TEMPLATE['TEMPLATE_ID']),
- 'vm_id': vm.ID,
- 'vm_name': vm.NAME,
- 'state': VM_STATES[vm.STATE],
- 'lcm_state': vm_lcm_state,
- 'owner_name': vm.UNAME,
- 'owner_id': vm.UID,
- 'networks': networks_info,
- 'disk_size': disk_size,
- 'memory': vm.TEMPLATE['MEMORY'] + ' MB',
- 'vcpu': vm.TEMPLATE['VCPU'],
- 'cpu': vm.TEMPLATE['CPU'],
- 'group_name': vm.GNAME,
- 'group_id': vm.GID,
- 'uptime_h': int(vm_uptime),
- 'attributes': vm_attributes,
- 'mode': permissions_str,
- 'labels': vm_labels
- }
-
- return info
-
-
-def parse_vm_permissions(client, vm):
- vm_PERMISSIONS = client.vm.info(vm.ID).PERMISSIONS
-
- owner_octal = int(vm_PERMISSIONS.OWNER_U) * 4 + int(vm_PERMISSIONS.OWNER_M) * 2 + int(vm_PERMISSIONS.OWNER_A)
- group_octal = int(vm_PERMISSIONS.GROUP_U) * 4 + int(vm_PERMISSIONS.GROUP_M) * 2 + int(vm_PERMISSIONS.GROUP_A)
- other_octal = int(vm_PERMISSIONS.OTHER_U) * 4 + int(vm_PERMISSIONS.OTHER_M) * 2 + int(vm_PERMISSIONS.OTHER_A)
-
- permissions = str(owner_octal) + str(group_octal) + str(other_octal)
-
- return permissions
-
-
-def set_vm_permissions(module, client, vms, permissions):
- changed = False
-
- for vm in vms:
- vm = client.vm.info(vm.ID)
- old_permissions = parse_vm_permissions(client, vm)
- changed = changed or old_permissions != permissions
-
- if not module.check_mode and old_permissions != permissions:
- permissions_str = bin(int(permissions, base=8))[2:] # 600 -> 110000000
- mode_bits = [int(d) for d in permissions_str]
- try:
- client.vm.chmod(
- vm.ID, mode_bits[0], mode_bits[1], mode_bits[2], mode_bits[3], mode_bits[4], mode_bits[5], mode_bits[6], mode_bits[7], mode_bits[8])
- except pyone.OneAuthorizationException:
- module.fail_json(msg="Permissions changing is unsuccessful, but instances are present if you deployed them.")
-
- return changed
-
-
-def set_vm_ownership(module, client, vms, owner_id, group_id):
- changed = False
-
- for vm in vms:
- vm = client.vm.info(vm.ID)
- if owner_id is None:
- owner_id = vm.UID
- if group_id is None:
- group_id = vm.GID
-
- changed = changed or owner_id != vm.UID or group_id != vm.GID
-
- if not module.check_mode and (owner_id != vm.UID or group_id != vm.GID):
- try:
- client.vm.chown(vm.ID, owner_id, group_id)
- except pyone.OneAuthorizationException:
- module.fail_json(msg="Ownership changing is unsuccessful, but instances are present if you deployed them.")
-
- return changed
-
-
-def get_size_in_MB(module, size_str):
-
- SYMBOLS = ['B', 'KB', 'MB', 'GB', 'TB']
-
- s = size_str
- init = size_str
- num = ""
- while s and s[0:1].isdigit() or s[0:1] == '.':
- num += s[0]
- s = s[1:]
- num = float(num)
- symbol = s.strip()
-
- if symbol not in SYMBOLS:
- module.fail_json(msg="Cannot interpret %r %r %d" % (init, symbol, num))
-
- prefix = {'B': 1}
-
- for i, s in enumerate(SYMBOLS[1:]):
- prefix[s] = 1 << (i + 1) * 10
-
- size_in_bytes = int(num * prefix[symbol])
- size_in_MB = size_in_bytes / (1024 * 1024)
-
- return size_in_MB
-
-
-def create_disk_str(module, client, template_id, disk_size_list):
-
- if not disk_size_list:
- return ''
-
- template = client.template.info(template_id)
- if isinstance(template.TEMPLATE['DISK'], list):
- # check if the number of disks is correct
- if len(template.TEMPLATE['DISK']) != len(disk_size_list):
- module.fail_json(msg='This template has ' + str(len(template.TEMPLATE['DISK'])) + ' disks but you defined ' + str(len(disk_size_list)))
- result = ''
- index = 0
- for DISKS in template.TEMPLATE['DISK']:
- disk = {}
- diskresult = ''
- # Get all info about existed disk e.g. IMAGE_ID,...
- for key, value in DISKS.items():
- disk[key] = value
- # copy disk attributes if it is not the size attribute
- diskresult += 'DISK = [' + ','.join('{key}="{val}"'.format(key=key, val=val) for key, val in disk.items() if key != 'SIZE')
- # Set the Disk Size
- diskresult += ', SIZE=' + str(int(get_size_in_MB(module, disk_size_list[index]))) + ']\n'
- result += diskresult
- index += 1
- else:
- if len(disk_size_list) > 1:
- module.fail_json(msg='This template has one disk but you defined ' + str(len(disk_size_list)))
- disk = {}
- # Get all info about existed disk e.g. IMAGE_ID,...
- for key, value in template.TEMPLATE['DISK'].items():
- disk[key] = value
- # copy disk attributes if it is not the size attribute
- result = 'DISK = [' + ','.join('{key}="{val}"'.format(key=key, val=val) for key, val in disk.items() if key != 'SIZE')
- # Set the Disk Size
- result += ', SIZE=' + str(int(get_size_in_MB(module, disk_size_list[0]))) + ']\n'
-
- return result
-
-
-def create_attributes_str(attributes_dict, labels_list):
-
- attributes_str = ''
-
- if labels_list:
- attributes_str += 'LABELS="' + ','.join('{label}'.format(label=label) for label in labels_list) + '"\n'
- if attributes_dict:
- attributes_str += '\n'.join('{key}="{val}"'.format(key=key.upper(), val=val) for key, val in attributes_dict.items()) + '\n'
-
- return attributes_str
-
-
-def create_nics_str(network_attrs_list):
- nics_str = ''
-
- for network in network_attrs_list:
- # Packing key-value dict in string with format key="value", key="value"
- network_str = ','.join('{key}="{val}"'.format(key=key, val=val) for key, val in network.items())
- nics_str = nics_str + 'NIC = [' + network_str + ']\n'
-
- return nics_str
-
-
-def create_vm(module, client, template_id, attributes_dict, labels_list, disk_size, network_attrs_list, vm_start_on_hold, vm_persistent):
-
- if attributes_dict:
- vm_name = attributes_dict.get('NAME', '')
-
- disk_str = create_disk_str(module, client, template_id, disk_size)
- vm_extra_template_str = create_attributes_str(attributes_dict, labels_list) + create_nics_str(network_attrs_list) + disk_str
- try:
- vm_id = client.template.instantiate(template_id, vm_name, vm_start_on_hold, vm_extra_template_str, vm_persistent)
- except pyone.OneException as e:
- module.fail_json(msg=str(e))
- vm = get_vm_by_id(client, vm_id)
-
- return get_vm_info(client, vm)
-
-
-def generate_next_index(vm_filled_indexes_list, num_sign_cnt):
- counter = 0
- cnt_str = str(counter).zfill(num_sign_cnt)
-
- while cnt_str in vm_filled_indexes_list:
- counter = counter + 1
- cnt_str = str(counter).zfill(num_sign_cnt)
-
- return cnt_str
-
-
-def get_vm_labels_and_attributes_dict(client, vm_id):
- vm_USER_TEMPLATE = client.vm.info(vm_id).USER_TEMPLATE
-
- attrs_dict = {}
- labels_list = []
-
- for key, value in vm_USER_TEMPLATE.items():
- if key != 'LABELS':
- attrs_dict[key] = value
- else:
- if key is not None:
- labels_list = value.split(',')
-
- return labels_list, attrs_dict
-
-
-def get_all_vms_by_attributes(client, attributes_dict, labels_list):
- pool = client.vmpool.info(-2, -1, -1, -1).VM
- vm_list = []
- name = ''
- if attributes_dict:
- name = attributes_dict.pop('NAME', '')
-
- if name != '':
- base_name = name[:len(name) - name.count('#')]
- # Check does the name have indexed format
- with_hash = name.endswith('#')
-
- for vm in pool:
- if vm.NAME.startswith(base_name):
- if with_hash and vm.NAME[len(base_name):].isdigit():
- # If the name has indexed format and after base_name it has only digits it'll be matched
- vm_list.append(vm)
- elif not with_hash and vm.NAME == name:
- # If the name is not indexed it has to be same
- vm_list.append(vm)
- pool = vm_list
-
- import copy
-
- vm_list = copy.copy(pool)
-
- for vm in pool:
- remove_list = []
- vm_labels_list, vm_attributes_dict = get_vm_labels_and_attributes_dict(client, vm.ID)
-
- if attributes_dict and len(attributes_dict) > 0:
- for key, val in attributes_dict.items():
- if key in vm_attributes_dict:
- if val and vm_attributes_dict[key] != val:
- remove_list.append(vm)
- break
- else:
- remove_list.append(vm)
- break
- vm_list = list(set(vm_list).difference(set(remove_list)))
-
- remove_list = []
- if labels_list and len(labels_list) > 0:
- for label in labels_list:
- if label not in vm_labels_list:
- remove_list.append(vm)
- break
- vm_list = list(set(vm_list).difference(set(remove_list)))
-
- return vm_list
-
-
-def create_count_of_vms(
- module, client, template_id, count, attributes_dict, labels_list, disk_size, network_attrs_list, wait, wait_timeout, vm_start_on_hold, vm_persistent):
- new_vms_list = []
-
- vm_name = ''
- if attributes_dict:
- vm_name = attributes_dict.get('NAME', '')
-
- if module.check_mode:
- return True, [], []
-
- # Create list of used indexes
- vm_filled_indexes_list = None
- num_sign_cnt = vm_name.count('#')
- if vm_name != '' and num_sign_cnt > 0:
- vm_list = get_all_vms_by_attributes(client, {'NAME': vm_name}, None)
- base_name = vm_name[:len(vm_name) - num_sign_cnt]
- vm_name = base_name
- # Make list which contains used indexes in format ['000', '001',...]
- vm_filled_indexes_list = list((vm.NAME[len(base_name):].zfill(num_sign_cnt)) for vm in vm_list)
-
- while count > 0:
- new_vm_name = vm_name
- # Create indexed name
- if vm_filled_indexes_list is not None:
- next_index = generate_next_index(vm_filled_indexes_list, num_sign_cnt)
- vm_filled_indexes_list.append(next_index)
- new_vm_name += next_index
- # Update NAME value in the attributes in case there is index
- attributes_dict['NAME'] = new_vm_name
- new_vm_dict = create_vm(module, client, template_id, attributes_dict, labels_list, disk_size, network_attrs_list, vm_start_on_hold, vm_persistent)
- new_vm_id = new_vm_dict.get('vm_id')
- new_vm = get_vm_by_id(client, new_vm_id)
- new_vms_list.append(new_vm)
- count -= 1
-
- if vm_start_on_hold:
- if wait:
- for vm in new_vms_list:
- wait_for_hold(module, client, vm, wait_timeout)
- else:
- if wait:
- for vm in new_vms_list:
- wait_for_running(module, client, vm, wait_timeout)
-
- return True, new_vms_list, []
-
-
-def create_exact_count_of_vms(module, client, template_id, exact_count, attributes_dict, count_attributes_dict,
- labels_list, count_labels_list, disk_size, network_attrs_list, hard, wait, wait_timeout, vm_start_on_hold, vm_persistent):
-
- vm_list = get_all_vms_by_attributes(client, count_attributes_dict, count_labels_list)
-
- vm_count_diff = exact_count - len(vm_list)
- changed = vm_count_diff != 0
-
- new_vms_list = []
- instances_list = []
- tagged_instances_list = vm_list
-
- if module.check_mode:
- return changed, instances_list, tagged_instances_list
-
- if vm_count_diff > 0:
- # Add more VMs
- changed, instances_list, tagged_instances = create_count_of_vms(module, client, template_id, vm_count_diff, attributes_dict,
- labels_list, disk_size, network_attrs_list, wait, wait_timeout,
- vm_start_on_hold, vm_persistent)
-
- tagged_instances_list += instances_list
- elif vm_count_diff < 0:
- # Delete surplus VMs
- old_vms_list = []
-
- while vm_count_diff < 0:
- old_vm = vm_list.pop(0)
- old_vms_list.append(old_vm)
- terminate_vm(module, client, old_vm, hard)
- vm_count_diff += 1
-
- if wait:
- for vm in old_vms_list:
- wait_for_done(module, client, vm, wait_timeout)
-
- instances_list = old_vms_list
- # store only the remaining instances
- old_vms_set = set(old_vms_list)
- tagged_instances_list = [vm for vm in vm_list if vm not in old_vms_set]
-
- return changed, instances_list, tagged_instances_list
-
-
-VM_STATES = ['INIT', 'PENDING', 'HOLD', 'ACTIVE', 'STOPPED', 'SUSPENDED', 'DONE', '', 'POWEROFF', 'UNDEPLOYED', 'CLONING', 'CLONING_FAILURE']
-LCM_STATES = ['LCM_INIT', 'PROLOG', 'BOOT', 'RUNNING', 'MIGRATE', 'SAVE_STOP',
- 'SAVE_SUSPEND', 'SAVE_MIGRATE', 'PROLOG_MIGRATE', 'PROLOG_RESUME',
- 'EPILOG_STOP', 'EPILOG', 'SHUTDOWN', 'STATE13', 'STATE14', 'CLEANUP_RESUBMIT', 'UNKNOWN', 'HOTPLUG', 'SHUTDOWN_POWEROFF',
- 'BOOT_UNKNOWN', 'BOOT_POWEROFF', 'BOOT_SUSPENDED', 'BOOT_STOPPED', 'CLEANUP_DELETE', 'HOTPLUG_SNAPSHOT', 'HOTPLUG_NIC',
- 'HOTPLUG_SAVEAS', 'HOTPLUG_SAVEAS_POWEROFF', 'HOTPULG_SAVEAS_SUSPENDED', 'SHUTDOWN_UNDEPLOY']
-
-
-def wait_for_state(module, client, vm, wait_timeout, state_predicate):
- import time
- start_time = time.time()
-
- while (time.time() - start_time) < wait_timeout:
- vm = client.vm.info(vm.ID)
- state = vm.STATE
- lcm_state = vm.LCM_STATE
-
- if state_predicate(state, lcm_state):
- return vm
- elif state not in [VM_STATES.index('INIT'), VM_STATES.index('PENDING'), VM_STATES.index('HOLD'),
- VM_STATES.index('ACTIVE'), VM_STATES.index('CLONING'), VM_STATES.index('POWEROFF')]:
- module.fail_json(msg='Action is unsuccessful. VM state: ' + VM_STATES[state])
-
- time.sleep(1)
-
- module.fail_json(msg="Wait timeout has expired!")
-
-
-def wait_for_running(module, client, vm, wait_timeout):
- return wait_for_state(module, client, vm, wait_timeout, lambda state,
- lcm_state: (state in [VM_STATES.index('ACTIVE')] and lcm_state in [LCM_STATES.index('RUNNING')]))
-
-
-def wait_for_done(module, client, vm, wait_timeout):
- return wait_for_state(module, client, vm, wait_timeout, lambda state, lcm_state: (state in [VM_STATES.index('DONE')]))
-
-
-def wait_for_hold(module, client, vm, wait_timeout):
- return wait_for_state(module, client, vm, wait_timeout, lambda state, lcm_state: (state in [VM_STATES.index('HOLD')]))
-
-
-def wait_for_poweroff(module, client, vm, wait_timeout):
- return wait_for_state(module, client, vm, wait_timeout, lambda state, lcm_state: (state in [VM_STATES.index('POWEROFF')]))
-
-
-def terminate_vm(module, client, vm, hard=False):
- changed = False
-
- if not vm:
- return changed
-
- changed = True
-
- if not module.check_mode:
- if hard:
- client.vm.action('terminate-hard', vm.ID)
- else:
- client.vm.action('terminate', vm.ID)
-
- return changed
-
-
-def terminate_vms(module, client, vms, hard):
- changed = False
-
- for vm in vms:
- changed = terminate_vm(module, client, vm, hard) or changed
-
- return changed
-
-
-def poweroff_vm(module, client, vm, hard):
- vm = client.vm.info(vm.ID)
- changed = False
-
- lcm_state = vm.LCM_STATE
- state = vm.STATE
-
- if lcm_state not in [LCM_STATES.index('SHUTDOWN'), LCM_STATES.index('SHUTDOWN_POWEROFF')] and state not in [VM_STATES.index('POWEROFF')]:
- changed = True
-
- if changed and not module.check_mode:
- if not hard:
- client.vm.action('poweroff', vm.ID)
- else:
- client.vm.action('poweroff-hard', vm.ID)
-
- return changed
-
-
-def poweroff_vms(module, client, vms, hard):
- changed = False
-
- for vm in vms:
- changed = poweroff_vm(module, client, vm, hard) or changed
-
- return changed
-
-
-def reboot_vms(module, client, vms, wait_timeout, hard):
-
- if not module.check_mode:
- # Firstly, power-off all instances
- for vm in vms:
- vm = client.vm.info(vm.ID)
- lcm_state = vm.LCM_STATE
- state = vm.STATE
- if lcm_state not in [LCM_STATES.index('SHUTDOWN_POWEROFF')] and state not in [VM_STATES.index('POWEROFF')]:
- poweroff_vm(module, client, vm, hard)
-
- # Wait for all to be power-off
- for vm in vms:
- wait_for_poweroff(module, client, vm, wait_timeout)
-
- for vm in vms:
- resume_vm(module, client, vm)
-
- return True
-
-
-def resume_vm(module, client, vm):
- vm = client.vm.info(vm.ID)
- changed = False
-
- state = vm.STATE
- if state in [VM_STATES.index('HOLD')]:
- changed = release_vm(module, client, vm)
- return changed
-
- lcm_state = vm.LCM_STATE
- if lcm_state == LCM_STATES.index('SHUTDOWN_POWEROFF'):
- module.fail_json(msg="Cannot perform action 'resume' because this action is not available " +
- "for LCM_STATE: 'SHUTDOWN_POWEROFF'. Wait for the VM to shutdown properly")
- if lcm_state not in [LCM_STATES.index('RUNNING')]:
- changed = True
-
- if changed and not module.check_mode:
- client.vm.action('resume', vm.ID)
-
- return changed
-
-
-def resume_vms(module, client, vms):
- changed = False
-
- for vm in vms:
- changed = resume_vm(module, client, vm) or changed
-
- return changed
-
-
-def release_vm(module, client, vm):
- vm = client.vm.info(vm.ID)
- changed = False
-
- state = vm.STATE
- if state != VM_STATES.index('HOLD'):
- module.fail_json(msg="Cannot perform action 'release' because this action is not available " +
- "because VM is not in state 'HOLD'.")
- else:
- changed = True
-
- if changed and not module.check_mode:
- client.vm.action('release', vm.ID)
-
- return changed
-
-
-def check_name_attribute(module, attributes):
- if attributes.get("NAME"):
- import re
- if re.match(r'^[^#]+#*$', attributes.get("NAME")) is None:
- module.fail_json(msg="Ilegal 'NAME' attribute: '" + attributes.get("NAME") +
- "' .Signs '#' are allowed only at the end of the name and the name cannot contain only '#'.")
-
-
-TEMPLATE_RESTRICTED_ATTRIBUTES = ["CPU", "VCPU", "OS", "FEATURES", "MEMORY", "DISK", "NIC", "INPUT", "GRAPHICS",
- "CONTEXT", "CREATED_BY", "CPU_COST", "DISK_COST", "MEMORY_COST",
- "TEMPLATE_ID", "VMID", "AUTOMATIC_DS_REQUIREMENTS", "DEPLOY_FOLDER", "LABELS"]
-
-
-def check_attributes(module, attributes):
- for key in attributes.keys():
- if key in TEMPLATE_RESTRICTED_ATTRIBUTES:
- module.fail_json(msg='Restricted attribute `' + key + '` cannot be used when filtering VMs.')
- # Check the format of the name attribute
- check_name_attribute(module, attributes)
-
-
-def disk_save_as(module, client, vm, disk_saveas, wait_timeout):
- if not disk_saveas.get('name'):
- module.fail_json(msg="Key 'name' is required for 'disk_saveas' option")
-
- image_name = disk_saveas.get('name')
- disk_id = disk_saveas.get('disk_id', 0)
-
- if not module.check_mode:
- if vm.STATE != VM_STATES.index('POWEROFF'):
- module.fail_json(msg="'disksaveas' option can be used only when the VM is in 'POWEROFF' state")
- try:
- client.vm.disksaveas(vm.ID, disk_id, image_name, 'OS', -1)
- except pyone.OneException as e:
- module.fail_json(msg=str(e))
- wait_for_poweroff(module, client, vm, wait_timeout) # wait for VM to leave the hotplug_saveas_poweroff state
-
-
-def get_connection_info(module):
-
- url = module.params.get('api_url')
- username = module.params.get('api_username')
- password = module.params.get('api_password')
-
- if not url:
- url = os.environ.get('ONE_URL')
-
- if not username:
- username = os.environ.get('ONE_USERNAME')
-
- if not password:
- password = os.environ.get('ONE_PASSWORD')
-
- if not username:
- if not password:
- authfile = os.environ.get('ONE_AUTH')
- if authfile is None:
- authfile = os.path.join(os.environ.get("HOME"), ".one", "one_auth")
- try:
- with open(authfile, "r") as fp:
- authstring = fp.read().rstrip()
- username = authstring.split(":")[0]
- password = authstring.split(":")[1]
- except (OSError, IOError):
- module.fail_json(msg=("Could not find or read ONE_AUTH file at '%s'" % authfile))
- except Exception:
- module.fail_json(msg=("Error occurs when read ONE_AUTH file at '%s'" % authfile))
- if not url:
- module.fail_json(msg="Opennebula API url (api_url) is not specified")
- from collections import namedtuple
-
- auth_params = namedtuple('auth', ('url', 'username', 'password'))
-
- return auth_params(url=url, username=username, password=password)
-
-
-def main():
- fields = {
- "api_url": {"required": False, "type": "str"},
- "api_username": {"required": False, "type": "str"},
- "api_password": {"required": False, "type": "str", "no_log": True},
- "instance_ids": {"required": False, "aliases": ['ids'], "type": "list", "elements": "int"},
- "template_name": {"required": False, "type": "str"},
- "template_id": {"required": False, "type": "int"},
- "vm_start_on_hold": {"default": False, "type": "bool"},
- "state": {
- "default": "present",
- "choices": ['present', 'absent', 'rebooted', 'poweredoff', 'running'],
- "type": "str"
- },
- "mode": {"required": False, "type": "str"},
- "owner_id": {"required": False, "type": "int"},
- "group_id": {"required": False, "type": "int"},
- "wait": {"default": True, "type": "bool"},
- "wait_timeout": {"default": 300, "type": "int"},
- "hard": {"default": False, "type": "bool"},
- "memory": {"required": False, "type": "str"},
- "cpu": {"required": False, "type": "float"},
- "vcpu": {"required": False, "type": "int"},
- "disk_size": {"required": False, "type": "list", "elements": "str"},
- "datastore_name": {"required": False, "type": "str"},
- "datastore_id": {"required": False, "type": "int"},
- "networks": {"default": [], "type": "list", "elements": "dict"},
- "count": {"default": 1, "type": "int"},
- "exact_count": {"required": False, "type": "int"},
- "attributes": {"default": {}, "type": "dict"},
- "count_attributes": {"required": False, "type": "dict"},
- "labels": {"default": [], "type": "list", "elements": "str"},
- "count_labels": {"required": False, "type": "list", "elements": "str"},
- "disk_saveas": {"type": "dict"},
- "persistent": {"default": False, "type": "bool"}
- }
-
- module = AnsibleModule(argument_spec=fields,
- mutually_exclusive=[
- ['template_id', 'template_name', 'instance_ids'],
- ['template_id', 'template_name', 'disk_saveas'],
- ['instance_ids', 'count_attributes', 'count'],
- ['instance_ids', 'count_labels', 'count'],
- ['instance_ids', 'exact_count'],
- ['instance_ids', 'attributes'],
- ['instance_ids', 'labels'],
- ['disk_saveas', 'attributes'],
- ['disk_saveas', 'labels'],
- ['exact_count', 'count'],
- ['count', 'hard'],
- ['instance_ids', 'cpu'], ['instance_ids', 'vcpu'],
- ['instance_ids', 'memory'], ['instance_ids', 'disk_size'],
- ['instance_ids', 'networks'],
- ['persistent', 'disk_size']
- ],
- supports_check_mode=True)
-
- if not HAS_PYONE:
- module.fail_json(msg='This module requires pyone to work!')
-
- auth = get_connection_info(module)
- params = module.params
- instance_ids = params.get('instance_ids')
- requested_template_name = params.get('template_name')
- requested_template_id = params.get('template_id')
- put_vm_on_hold = params.get('vm_start_on_hold')
- state = params.get('state')
- permissions = params.get('mode')
- owner_id = params.get('owner_id')
- group_id = params.get('group_id')
- wait = params.get('wait')
- wait_timeout = params.get('wait_timeout')
- hard = params.get('hard')
- memory = params.get('memory')
- cpu = params.get('cpu')
- vcpu = params.get('vcpu')
- disk_size = params.get('disk_size')
- requested_datastore_id = params.get('datastore_id')
- requested_datastore_name = params.get('datastore_name')
- networks = params.get('networks')
- count = params.get('count')
- exact_count = params.get('exact_count')
- attributes = params.get('attributes')
- count_attributes = params.get('count_attributes')
- labels = params.get('labels')
- count_labels = params.get('count_labels')
- disk_saveas = params.get('disk_saveas')
- persistent = params.get('persistent')
-
- if not (auth.username and auth.password):
- module.warn("Credentials missing")
- else:
- one_client = pyone.OneServer(auth.url, session=auth.username + ':' + auth.password)
-
- if attributes:
- attributes = dict((key.upper(), value) for key, value in attributes.items())
- check_attributes(module, attributes)
-
- if count_attributes:
- count_attributes = dict((key.upper(), value) for key, value in count_attributes.items())
- if not attributes:
- import copy
- module.warn('When you pass `count_attributes` without `attributes` option when deploying, `attributes` option will have same values implicitly.')
- attributes = copy.copy(count_attributes)
- check_attributes(module, count_attributes)
-
- if count_labels and not labels:
- module.warn('When you pass `count_labels` without `labels` option when deploying, `labels` option will have same values implicitly.')
- labels = count_labels
-
- # Fetch template
- template_id = None
- if requested_template_id is not None or requested_template_name:
- template_id = get_template_id(module, one_client, requested_template_id, requested_template_name)
- if template_id is None:
- if requested_template_id is not None:
- module.fail_json(msg='There is no template with template_id: ' + str(requested_template_id))
- elif requested_template_name:
- module.fail_json(msg="There is no template with name: " + requested_template_name)
-
- # Fetch datastore
- datastore_id = None
- if requested_datastore_id or requested_datastore_name:
- datastore_id = get_datastore_id(module, one_client, requested_datastore_id, requested_datastore_name)
- if datastore_id is None:
- if requested_datastore_id:
- module.fail_json(msg='There is no datastore with datastore_id: ' + str(requested_datastore_id))
- elif requested_datastore_name:
- module.fail_json(msg="There is no datastore with name: " + requested_datastore_name)
- else:
- attributes['SCHED_DS_REQUIREMENTS'] = 'ID=' + str(datastore_id)
-
- if exact_count and template_id is None:
- module.fail_json(msg='Option `exact_count` needs template_id or template_name')
-
- if exact_count is not None and not (count_attributes or count_labels):
- module.fail_json(msg='Either `count_attributes` or `count_labels` has to be specified with option `exact_count`.')
- if (count_attributes or count_labels) and exact_count is None:
- module.fail_json(msg='Option `exact_count` has to be specified when either `count_attributes` or `count_labels` is used.')
- if template_id is not None and state != 'present':
- module.fail_json(msg="Only state 'present' is valid for the template")
-
- if memory:
- attributes['MEMORY'] = str(int(get_size_in_MB(module, memory)))
- if cpu:
- attributes['CPU'] = str(cpu)
- if vcpu:
- attributes['VCPU'] = str(vcpu)
-
- if exact_count is not None and state != 'present':
- module.fail_json(msg='The `exact_count` option is valid only for the `present` state')
- if exact_count is not None and exact_count < 0:
- module.fail_json(msg='`exact_count` cannot be less than 0')
- if count <= 0:
- module.fail_json(msg='`count` has to be greater than 0')
-
- if permissions is not None:
- import re
- if re.match("^[0-7]{3}$", permissions) is None:
- module.fail_json(msg="Option `mode` has to have exactly 3 digits and be in the octet format e.g. 600")
-
- if exact_count is not None:
- # Deploy an exact count of VMs
- changed, instances_list, tagged_instances_list = create_exact_count_of_vms(module, one_client, template_id, exact_count, attributes,
- count_attributes, labels, count_labels, disk_size,
- networks, hard, wait, wait_timeout, put_vm_on_hold, persistent)
- vms = tagged_instances_list
- elif template_id is not None and state == 'present':
- # Deploy count VMs
- changed, instances_list, tagged_instances_list = create_count_of_vms(module, one_client, template_id, count,
- attributes, labels, disk_size, networks, wait, wait_timeout,
- put_vm_on_hold, persistent)
- # instances_list - new instances
- # tagged_instances_list - all instances with specified `count_attributes` and `count_labels`
- vms = instances_list
- else:
- # Fetch data of instances, or change their state
- if not (instance_ids or attributes or labels):
- module.fail_json(msg="At least one of `instance_ids`,`attributes`,`labels` must be passed!")
-
- if memory or cpu or vcpu or disk_size or networks:
- module.fail_json(msg="Parameters as `memory`, `cpu`, `vcpu`, `disk_size` and `networks` you can only set when deploying a VM!")
-
- if hard and state not in ['rebooted', 'poweredoff', 'absent', 'present']:
- module.fail_json(msg="The 'hard' option can be used only for one of these states: 'rebooted', 'poweredoff', 'absent' and 'present'")
-
- vms = []
- tagged = False
- changed = False
-
- if instance_ids:
- vms = get_vms_by_ids(module, one_client, state, instance_ids)
- else:
- tagged = True
- vms = get_all_vms_by_attributes(one_client, attributes, labels)
-
- if len(vms) == 0 and state != 'absent' and state != 'present':
- module.fail_json(msg='There are no instances with specified `instance_ids`, `attributes` and/or `labels`')
-
- if len(vms) == 0 and state == 'present' and not tagged:
- module.fail_json(msg='There are no instances with specified `instance_ids`.')
-
- if tagged and state == 'absent':
- module.fail_json(msg='Option `instance_ids` is required when state is `absent`.')
-
- if state == 'absent':
- changed = terminate_vms(module, one_client, vms, hard)
- elif state == 'rebooted':
- changed = reboot_vms(module, one_client, vms, wait_timeout, hard)
- elif state == 'poweredoff':
- changed = poweroff_vms(module, one_client, vms, hard)
- elif state == 'running':
- changed = resume_vms(module, one_client, vms)
-
- instances_list = vms
- tagged_instances_list = []
-
- if permissions is not None:
- changed = set_vm_permissions(module, one_client, vms, permissions) or changed
-
- if owner_id is not None or group_id is not None:
- changed = set_vm_ownership(module, one_client, vms, owner_id, group_id) or changed
-
- if wait and not module.check_mode and state != 'present':
- wait_for = {
- 'absent': wait_for_done,
- 'rebooted': wait_for_running,
- 'poweredoff': wait_for_poweroff,
- 'running': wait_for_running
- }
- for vm in vms:
- if vm is not None:
- wait_for[state](module, one_client, vm, wait_timeout)
-
- if disk_saveas is not None:
- if len(vms) == 0:
- module.fail_json(msg="There is no VM whose disk will be saved.")
- disk_save_as(module, one_client, vms[0], disk_saveas, wait_timeout)
- changed = True
-
- # instances - a list of instances info whose state is changed or which are fetched with C(instance_ids) option
- instances = list(get_vm_info(one_client, vm) for vm in instances_list if vm is not None)
- instances_ids = list(vm.ID for vm in instances_list if vm is not None)
- # tagged_instances - A list of instances info based on a specific attributes and/or labels that are specified with C(count_attributes) and C(count_labels)
- tagged_instances = list(get_vm_info(one_client, vm) for vm in tagged_instances_list if vm is not None)
-
- result = {'changed': changed, 'instances': instances, 'instances_ids': instances_ids, 'tagged_instances': tagged_instances}
-
- module.exit_json(**result)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/oracle/oci_vcn.py b/ansible_collections/community/general/plugins/modules/cloud/oracle/oci_vcn.py
deleted file mode 100644
index a82914bd..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/oracle/oci_vcn.py
+++ /dev/null
@@ -1,222 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# Copyright (c) 2017, 2018, Oracle and/or its affiliates.
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-
-__metaclass__ = type
-
-DOCUMENTATION = '''
----
-module: oci_vcn
-short_description: Manage Virtual Cloud Networks(VCN) in OCI
-description:
- - This module allows the user to create, delete and update virtual cloud networks(VCNs) in OCI.
- The complete Oracle Cloud Infrastructure Ansible Modules can be downloaded from
- U(https://github.com/oracle/oci-ansible-modules/releases).
-options:
- cidr_block:
- description: The CIDR IP address block of the VCN. Required when creating a VCN with I(state=present).
- type: str
- required: false
- compartment_id:
- description: The OCID of the compartment to contain the VCN. Required when creating a VCN with I(state=present).
- This option is mutually exclusive with I(vcn_id).
- type: str
- display_name:
- description: A user-friendly name. Does not have to be unique, and it's changeable.
- type: str
- aliases: [ 'name' ]
- dns_label:
- description: A DNS label for the VCN, used in conjunction with the VNIC's hostname and subnet's DNS label to
- form a fully qualified domain name (FQDN) for each VNIC within this subnet (for example,
- bminstance-1.subnet123.vcn1.oraclevcn.com). Not required to be unique, but it's a best practice
- to set unique DNS labels for VCNs in your tenancy. Must be an alphanumeric string that begins
- with a letter. The value cannot be changed.
- type: str
- state:
- description: Create or update a VCN with I(state=present). Use I(state=absent) to delete a VCN.
- type: str
- default: present
- choices: ['present', 'absent']
- vcn_id:
- description: The OCID of the VCN. Required when deleting a VCN with I(state=absent) or updating a VCN
- with I(state=present). This option is mutually exclusive with I(compartment_id).
- type: str
- aliases: [ 'id' ]
-author: "Rohit Chaware (@rohitChaware)"
-extends_documentation_fragment:
-- community.general.oracle
-- community.general.oracle_creatable_resource
-- community.general.oracle_wait_options
-- community.general.oracle_tags
-
-'''
-
-EXAMPLES = """
-- name: Create a VCN
- community.general.oci_vcn:
- cidr_block: '10.0.0.0/16'
- compartment_id: 'ocid1.compartment.oc1..xxxxxEXAMPLExxxxx'
- display_name: my_vcn
- dns_label: ansiblevcn
-
-- name: Updates the specified VCN's display name
- community.general.oci_vcn:
- vcn_id: ocid1.vcn.oc1.phx.xxxxxEXAMPLExxxxx
- display_name: ansible_vcn
-
-- name: Delete the specified VCN
- community.general.oci_vcn:
- vcn_id: ocid1.vcn.oc1.phx.xxxxxEXAMPLExxxxx
- state: absent
-"""
-
-RETURN = """
-vcn:
- description: Information about the VCN
- returned: On successful create and update operation
- type: dict
- sample: {
- "cidr_block": "10.0.0.0/16",
- compartment_id": "ocid1.compartment.oc1..xxxxxEXAMPLExxxxx",
- "default_dhcp_options_id": "ocid1.dhcpoptions.oc1.phx.xxxxxEXAMPLExxxxx",
- "default_route_table_id": "ocid1.routetable.oc1.phx.xxxxxEXAMPLExxxxx",
- "default_security_list_id": "ocid1.securitylist.oc1.phx.xxxxxEXAMPLExxxxx",
- "display_name": "ansible_vcn",
- "dns_label": "ansiblevcn",
- "id": "ocid1.vcn.oc1.phx.xxxxxEXAMPLExxxxx",
- "lifecycle_state": "AVAILABLE",
- "time_created": "2017-11-13T20:22:40.626000+00:00",
- "vcn_domain_name": "ansiblevcn.oraclevcn.com"
- }
-"""
-
-from ansible.module_utils.basic import AnsibleModule, missing_required_lib
-from ansible_collections.community.general.plugins.module_utils.oracle import oci_utils
-
-try:
- from oci.core.virtual_network_client import VirtualNetworkClient
- from oci.core.models import CreateVcnDetails
- from oci.core.models import UpdateVcnDetails
-
- HAS_OCI_PY_SDK = True
-except ImportError:
- HAS_OCI_PY_SDK = False
-
-
-def delete_vcn(virtual_network_client, module):
- result = oci_utils.delete_and_wait(
- resource_type="vcn",
- client=virtual_network_client,
- get_fn=virtual_network_client.get_vcn,
- kwargs_get={"vcn_id": module.params["vcn_id"]},
- delete_fn=virtual_network_client.delete_vcn,
- kwargs_delete={"vcn_id": module.params["vcn_id"]},
- module=module,
- )
- return result
-
-
-def update_vcn(virtual_network_client, module):
- result = oci_utils.check_and_update_resource(
- resource_type="vcn",
- client=virtual_network_client,
- get_fn=virtual_network_client.get_vcn,
- kwargs_get={"vcn_id": module.params["vcn_id"]},
- update_fn=virtual_network_client.update_vcn,
- primitive_params_update=["vcn_id"],
- kwargs_non_primitive_update={UpdateVcnDetails: "update_vcn_details"},
- module=module,
- update_attributes=list(UpdateVcnDetails().attribute_map.keys()),
- )
- return result
-
-
-def create_vcn(virtual_network_client, module):
- create_vcn_details = CreateVcnDetails()
- for attribute in create_vcn_details.attribute_map.keys():
- if attribute in module.params:
- setattr(create_vcn_details, attribute, module.params[attribute])
-
- result = oci_utils.create_and_wait(
- resource_type="vcn",
- create_fn=virtual_network_client.create_vcn,
- kwargs_create={"create_vcn_details": create_vcn_details},
- client=virtual_network_client,
- get_fn=virtual_network_client.get_vcn,
- get_param="vcn_id",
- module=module,
- )
- return result
-
-
-def main():
- module_args = oci_utils.get_taggable_arg_spec(
- supports_create=True, supports_wait=True
- )
- module_args.update(
- dict(
- cidr_block=dict(type="str", required=False),
- compartment_id=dict(type="str", required=False),
- display_name=dict(type="str", required=False, aliases=["name"]),
- dns_label=dict(type="str", required=False),
- state=dict(
- type="str",
- required=False,
- default="present",
- choices=["absent", "present"],
- ),
- vcn_id=dict(type="str", required=False, aliases=["id"]),
- )
- )
-
- module = AnsibleModule(
- argument_spec=module_args,
- supports_check_mode=False,
- mutually_exclusive=[["compartment_id", "vcn_id"]],
- )
-
- if not HAS_OCI_PY_SDK:
- module.fail_json(msg=missing_required_lib("oci"))
-
- virtual_network_client = oci_utils.create_service_client(
- module, VirtualNetworkClient
- )
-
- exclude_attributes = {"display_name": True, "dns_label": True}
- state = module.params["state"]
- vcn_id = module.params["vcn_id"]
-
- if state == "absent":
- if vcn_id is not None:
- result = delete_vcn(virtual_network_client, module)
- else:
- module.fail_json(
- msg="Specify vcn_id with state as 'absent' to delete a VCN."
- )
-
- else:
- if vcn_id is not None:
- result = update_vcn(virtual_network_client, module)
- else:
- result = oci_utils.check_and_create_resource(
- resource_type="vcn",
- create_fn=create_vcn,
- kwargs_create={
- "virtual_network_client": virtual_network_client,
- "module": module,
- },
- list_fn=virtual_network_client.list_vcns,
- kwargs_list={"compartment_id": module.params["compartment_id"]},
- module=module,
- model=CreateVcnDetails(),
- exclude_attributes=exclude_attributes,
- )
-
- module.exit_json(**result)
-
-
-if __name__ == "__main__":
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/ovh/ovh_ip_failover.py b/ansible_collections/community/general/plugins/modules/cloud/ovh/ovh_ip_failover.py
deleted file mode 100644
index 26179eb8..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/ovh/ovh_ip_failover.py
+++ /dev/null
@@ -1,261 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: ovh_ip_failover
-short_description: Manage OVH IP failover address
-description:
- - Manage OVH (French European hosting provider) IP Failover Address. For now, this module can only be used to move
- an ip failover (or failover block) between services
-author: "Pascal HERAUD (@pascalheraud)"
-notes:
- - Uses the python OVH Api U(https://github.com/ovh/python-ovh).
- You have to create an application (a key and secret) with a consummer
- key as described into U(https://docs.ovh.com/gb/en/customer/first-steps-with-ovh-api/)
-requirements:
- - ovh >= 0.4.8
-options:
- name:
- required: true
- description:
- - The IP address to manage (can be a single IP like 1.1.1.1
- or a block like 1.1.1.1/28 )
- type: str
- service:
- required: true
- description:
- - The name of the OVH service this IP address should be routed
- type: str
- endpoint:
- required: true
- description:
- - The endpoint to use ( for instance ovh-eu)
- type: str
- wait_completion:
- required: false
- default: true
- type: bool
- description:
- - If true, the module will wait for the IP address to be moved.
- If false, exit without waiting. The taskId will be returned
- in module output
- wait_task_completion:
- required: false
- default: 0
- description:
- - If not 0, the module will wait for this task id to be
- completed. Use wait_task_completion if you want to wait for
- completion of a previously executed task with
- wait_completion=false. You can execute this module repeatedly on
- a list of failover IPs using wait_completion=false (see examples)
- type: int
- application_key:
- required: true
- description:
- - The applicationKey to use
- type: str
- application_secret:
- required: true
- description:
- - The application secret to use
- type: str
- consumer_key:
- required: true
- description:
- - The consumer key to use
- type: str
- timeout:
- required: false
- default: 120
- description:
- - The timeout in seconds used to wait for a task to be
- completed. Default is 120 seconds.
- type: int
-
-'''
-
-EXAMPLES = '''
-# Route an IP address 1.1.1.1 to the service ns666.ovh.net
-- community.general.ovh_ip_failover:
- name: 1.1.1.1
- service: ns666.ovh.net
- endpoint: ovh-eu
- application_key: yourkey
- application_secret: yoursecret
- consumer_key: yourconsumerkey
-- community.general.ovh_ip_failover:
- name: 1.1.1.1
- service: ns666.ovh.net
- endpoint: ovh-eu
- wait_completion: false
- application_key: yourkey
- application_secret: yoursecret
- consumer_key: yourconsumerkey
- register: moved
-- community.general.ovh_ip_failover:
- name: 1.1.1.1
- service: ns666.ovh.net
- endpoint: ovh-eu
- wait_task_completion: "{{moved.taskId}}"
- application_key: yourkey
- application_secret: yoursecret
- consumer_key: yourconsumerkey
-'''
-
-RETURN = '''
-'''
-
-import time
-
-try:
- import ovh
- import ovh.exceptions
- from ovh.exceptions import APIError
- HAS_OVH = True
-except ImportError:
- HAS_OVH = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.six.moves.urllib.parse import quote_plus
-
-
-def getOvhClient(ansibleModule):
- endpoint = ansibleModule.params.get('endpoint')
- application_key = ansibleModule.params.get('application_key')
- application_secret = ansibleModule.params.get('application_secret')
- consumer_key = ansibleModule.params.get('consumer_key')
-
- return ovh.Client(
- endpoint=endpoint,
- application_key=application_key,
- application_secret=application_secret,
- consumer_key=consumer_key
- )
-
-
-def waitForNoTask(client, name, timeout):
- currentTimeout = timeout
- while client.get('/ip/{0}/task'.format(quote_plus(name)),
- function='genericMoveFloatingIp',
- status='todo'):
- time.sleep(1) # Delay for 1 sec
- currentTimeout -= 1
- if currentTimeout < 0:
- return False
- return True
-
-
-def waitForTaskDone(client, name, taskId, timeout):
- currentTimeout = timeout
- while True:
- task = client.get('/ip/{0}/task/{1}'.format(quote_plus(name), taskId))
- if task['status'] == 'done':
- return True
- time.sleep(5) # Delay for 5 sec because it's long to wait completion, do not harass the API
- currentTimeout -= 5
- if currentTimeout < 0:
- return False
-
-
-def main():
- module = AnsibleModule(
- argument_spec=dict(
- name=dict(required=True),
- service=dict(required=True),
- endpoint=dict(required=True),
- wait_completion=dict(default=True, type='bool'),
- wait_task_completion=dict(default=0, type='int'),
- application_key=dict(required=True, no_log=True),
- application_secret=dict(required=True, no_log=True),
- consumer_key=dict(required=True, no_log=True),
- timeout=dict(default=120, type='int')
- ),
- supports_check_mode=True
- )
-
- result = dict(
- changed=False
- )
-
- if not HAS_OVH:
- module.fail_json(msg='ovh-api python module is required to run this module ')
-
- # Get parameters
- name = module.params.get('name')
- service = module.params.get('service')
- timeout = module.params.get('timeout')
- wait_completion = module.params.get('wait_completion')
- wait_task_completion = module.params.get('wait_task_completion')
-
- # Connect to OVH API
- client = getOvhClient(module)
-
- # Check that the load balancing exists
- try:
- ips = client.get('/ip', ip=name, type='failover')
- except APIError as apiError:
- module.fail_json(
- msg='Unable to call OVH api for getting the list of ips, '
- 'check application key, secret, consumerkey and parameters. '
- 'Error returned by OVH api was : {0}'.format(apiError))
-
- if name not in ips and '{0}/32'.format(name) not in ips:
- module.fail_json(msg='IP {0} does not exist'.format(name))
-
- # Check that no task is pending before going on
- try:
- if not waitForNoTask(client, name, timeout):
- module.fail_json(
- msg='Timeout of {0} seconds while waiting for no pending '
- 'tasks before executing the module '.format(timeout))
- except APIError as apiError:
- module.fail_json(
- msg='Unable to call OVH api for getting the list of pending tasks '
- 'of the ip, check application key, secret, consumerkey '
- 'and parameters. Error returned by OVH api was : {0}'
- .format(apiError))
-
- try:
- ipproperties = client.get('/ip/{0}'.format(quote_plus(name)))
- except APIError as apiError:
- module.fail_json(
- msg='Unable to call OVH api for getting the properties '
- 'of the ip, check application key, secret, consumerkey '
- 'and parameters. Error returned by OVH api was : {0}'
- .format(apiError))
-
- if ipproperties['routedTo']['serviceName'] != service:
- if not module.check_mode:
- if wait_task_completion == 0:
- # Move the IP and get the created taskId
- task = client.post('/ip/{0}/move'.format(quote_plus(name)), to=service)
- taskId = task['taskId']
- result['moved'] = True
- else:
- # Just wait for the given taskId to be completed
- taskId = wait_task_completion
- result['moved'] = False
- result['taskId'] = taskId
- if wait_completion or wait_task_completion != 0:
- if not waitForTaskDone(client, name, taskId, timeout):
- module.fail_json(
- msg='Timeout of {0} seconds while waiting for completion '
- 'of move ip to service'.format(timeout))
- result['waited'] = True
- else:
- result['waited'] = False
- result['changed'] = True
-
- module.exit_json(**result)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/ovh/ovh_ip_loadbalancing_backend.py b/ansible_collections/community/general/plugins/modules/cloud/ovh/ovh_ip_loadbalancing_backend.py
deleted file mode 100644
index 28d6f3a1..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/ovh/ovh_ip_loadbalancing_backend.py
+++ /dev/null
@@ -1,312 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: ovh_ip_loadbalancing_backend
-short_description: Manage OVH IP LoadBalancing backends
-description:
- - Manage OVH (French European hosting provider) LoadBalancing IP backends
-author: Pascal Heraud (@pascalheraud)
-notes:
- - Uses the python OVH Api U(https://github.com/ovh/python-ovh).
- You have to create an application (a key and secret) with a consumer
- key as described into U(https://docs.ovh.com/gb/en/customer/first-steps-with-ovh-api/)
-requirements:
- - ovh > 0.3.5
-options:
- name:
- required: true
- description:
- - Name of the LoadBalancing internal name (ip-X.X.X.X)
- type: str
- backend:
- required: true
- description:
- - The IP address of the backend to update / modify / delete
- type: str
- state:
- default: present
- choices: ['present', 'absent']
- description:
- - Determines whether the backend is to be created/modified
- or deleted
- type: str
- probe:
- default: 'none'
- choices: ['none', 'http', 'icmp' , 'oco']
- description:
- - Determines the type of probe to use for this backend
- type: str
- weight:
- default: 8
- description:
- - Determines the weight for this backend
- type: int
- endpoint:
- required: true
- description:
- - The endpoint to use ( for instance ovh-eu)
- type: str
- application_key:
- required: true
- description:
- - The applicationKey to use
- type: str
- application_secret:
- required: true
- description:
- - The application secret to use
- type: str
- consumer_key:
- required: true
- description:
- - The consumer key to use
- type: str
- timeout:
- default: 120
- description:
- - The timeout in seconds used to wait for a task to be
- completed.
- type: int
-
-'''
-
-EXAMPLES = '''
-- name: Adds or modify the backend '212.1.1.1' to a loadbalancing 'ip-1.1.1.1'
- ovh_ip_loadbalancing:
- name: ip-1.1.1.1
- backend: 212.1.1.1
- state: present
- probe: none
- weight: 8
- endpoint: ovh-eu
- application_key: yourkey
- application_secret: yoursecret
- consumer_key: yourconsumerkey
-
-- name: Removes a backend '212.1.1.1' from a loadbalancing 'ip-1.1.1.1'
- ovh_ip_loadbalancing:
- name: ip-1.1.1.1
- backend: 212.1.1.1
- state: absent
- endpoint: ovh-eu
- application_key: yourkey
- application_secret: yoursecret
- consumer_key: yourconsumerkey
-'''
-
-RETURN = '''
-'''
-
-import time
-
-try:
- import ovh
- import ovh.exceptions
- from ovh.exceptions import APIError
- HAS_OVH = True
-except ImportError:
- HAS_OVH = False
-
-from ansible.module_utils.basic import AnsibleModule
-
-
-def getOvhClient(ansibleModule):
- endpoint = ansibleModule.params.get('endpoint')
- application_key = ansibleModule.params.get('application_key')
- application_secret = ansibleModule.params.get('application_secret')
- consumer_key = ansibleModule.params.get('consumer_key')
-
- return ovh.Client(
- endpoint=endpoint,
- application_key=application_key,
- application_secret=application_secret,
- consumer_key=consumer_key
- )
-
-
-def waitForNoTask(client, name, timeout):
- currentTimeout = timeout
- while len(client.get('/ip/loadBalancing/{0}/task'.format(name))) > 0:
- time.sleep(1) # Delay for 1 sec
- currentTimeout -= 1
- if currentTimeout < 0:
- return False
- return True
-
-
-def main():
- module = AnsibleModule(
- argument_spec=dict(
- name=dict(required=True),
- backend=dict(required=True),
- weight=dict(default=8, type='int'),
- probe=dict(default='none',
- choices=['none', 'http', 'icmp', 'oco']),
- state=dict(default='present', choices=['present', 'absent']),
- endpoint=dict(required=True),
- application_key=dict(required=True, no_log=True),
- application_secret=dict(required=True, no_log=True),
- consumer_key=dict(required=True, no_log=True),
- timeout=dict(default=120, type='int')
- )
- )
-
- if not HAS_OVH:
- module.fail_json(msg='ovh-api python module'
- 'is required to run this module ')
-
- # Get parameters
- name = module.params.get('name')
- state = module.params.get('state')
- backend = module.params.get('backend')
- weight = module.params.get('weight')
- probe = module.params.get('probe')
- timeout = module.params.get('timeout')
-
- # Connect to OVH API
- client = getOvhClient(module)
-
- # Check that the load balancing exists
- try:
- loadBalancings = client.get('/ip/loadBalancing')
- except APIError as apiError:
- module.fail_json(
- msg='Unable to call OVH api for getting the list of loadBalancing, '
- 'check application key, secret, consumerkey and parameters. '
- 'Error returned by OVH api was : {0}'.format(apiError))
-
- if name not in loadBalancings:
- module.fail_json(msg='IP LoadBalancing {0} does not exist'.format(name))
-
- # Check that no task is pending before going on
- try:
- if not waitForNoTask(client, name, timeout):
- module.fail_json(
- msg='Timeout of {0} seconds while waiting for no pending '
- 'tasks before executing the module '.format(timeout))
- except APIError as apiError:
- module.fail_json(
- msg='Unable to call OVH api for getting the list of pending tasks '
- 'of the loadBalancing, check application key, secret, consumerkey '
- 'and parameters. Error returned by OVH api was : {0}'
- .format(apiError))
-
- try:
- backends = client.get('/ip/loadBalancing/{0}/backend'.format(name))
- except APIError as apiError:
- module.fail_json(
- msg='Unable to call OVH api for getting the list of backends '
- 'of the loadBalancing, check application key, secret, consumerkey '
- 'and parameters. Error returned by OVH api was : {0}'
- .format(apiError))
-
- backendExists = backend in backends
- moduleChanged = False
- if state == "absent":
- if backendExists:
- # Remove backend
- try:
- client.delete(
- '/ip/loadBalancing/{0}/backend/{1}'.format(name, backend))
- if not waitForNoTask(client, name, timeout):
- module.fail_json(
- msg='Timeout of {0} seconds while waiting for completion '
- 'of removing backend task'.format(timeout))
- except APIError as apiError:
- module.fail_json(
- msg='Unable to call OVH api for deleting the backend, '
- 'check application key, secret, consumerkey and '
- 'parameters. Error returned by OVH api was : {0}'
- .format(apiError))
- moduleChanged = True
- else:
- if backendExists:
- # Get properties
- try:
- backendProperties = client.get(
- '/ip/loadBalancing/{0}/backend/{1}'.format(name, backend))
- except APIError as apiError:
- module.fail_json(
- msg='Unable to call OVH api for getting the backend properties, '
- 'check application key, secret, consumerkey and '
- 'parameters. Error returned by OVH api was : {0}'
- .format(apiError))
-
- if (backendProperties['weight'] != weight):
- # Change weight
- try:
- client.post(
- '/ip/loadBalancing/{0}/backend/{1}/setWeight'
- .format(name, backend), weight=weight)
- if not waitForNoTask(client, name, timeout):
- module.fail_json(
- msg='Timeout of {0} seconds while waiting for completion '
- 'of setWeight to backend task'
- .format(timeout))
- except APIError as apiError:
- module.fail_json(
- msg='Unable to call OVH api for updating the weight of the '
- 'backend, check application key, secret, consumerkey '
- 'and parameters. Error returned by OVH api was : {0}'
- .format(apiError))
- moduleChanged = True
-
- if (backendProperties['probe'] != probe):
- # Change probe
- backendProperties['probe'] = probe
- try:
- client.put(
- '/ip/loadBalancing/{0}/backend/{1}'
- .format(name, backend), probe=probe)
- if not waitForNoTask(client, name, timeout):
- module.fail_json(
- msg='Timeout of {0} seconds while waiting for completion of '
- 'setProbe to backend task'
- .format(timeout))
- except APIError as apiError:
- module.fail_json(
- msg='Unable to call OVH api for updating the probe of '
- 'the backend, check application key, secret, '
- 'consumerkey and parameters. Error returned by OVH api '
- 'was : {0}'
- .format(apiError))
- moduleChanged = True
-
- else:
- # Creates backend
- try:
- try:
- client.post('/ip/loadBalancing/{0}/backend'.format(name),
- ipBackend=backend, probe=probe, weight=weight)
- except APIError as apiError:
- module.fail_json(
- msg='Unable to call OVH api for creating the backend, check '
- 'application key, secret, consumerkey and parameters. '
- 'Error returned by OVH api was : {0}'
- .format(apiError))
-
- if not waitForNoTask(client, name, timeout):
- module.fail_json(
- msg='Timeout of {0} seconds while waiting for completion of '
- 'backend creation task'.format(timeout))
- except APIError as apiError:
- module.fail_json(
- msg='Unable to call OVH api for creating the backend, check '
- 'application key, secret, consumerkey and parameters. '
- 'Error returned by OVH api was : {0}'.format(apiError))
- moduleChanged = True
-
- module.exit_json(changed=moduleChanged)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/ovh/ovh_monthly_billing.py b/ansible_collections/community/general/plugins/modules/cloud/ovh/ovh_monthly_billing.py
deleted file mode 100644
index 75c70a79..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/ovh/ovh_monthly_billing.py
+++ /dev/null
@@ -1,157 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2019, Francois Lallart (@fraff)
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-
-__metaclass__ = type
-
-DOCUMENTATION = '''
----
-module: ovh_monthly_billing
-author: Francois Lallart (@fraff)
-version_added: '0.2.0'
-short_description: Manage OVH monthly billing
-description:
- - Enable monthly billing on OVH cloud intances (be aware OVH does not allow to disable it).
-requirements: [ "ovh" ]
-options:
- project_id:
- required: true
- type: str
- description:
- - ID of the project, get it with U(https://api.ovh.com/console/#/cloud/project#GET)
- instance_id:
- required: true
- type: str
- description:
- - ID of the instance, get it with U(https://api.ovh.com/console/#/cloud/project/%7BserviceName%7D/instance#GET)
- endpoint:
- type: str
- description:
- - The endpoint to use (for instance ovh-eu)
- application_key:
- type: str
- description:
- - The applicationKey to use
- application_secret:
- type: str
- description:
- - The application secret to use
- consumer_key:
- type: str
- description:
- - The consumer key to use
-'''
-
-EXAMPLES = '''
-- name: Basic usage, using auth from /etc/ovh.conf
- community.general.ovh_monthly_billing:
- project_id: 0c727a20aa144485b70c44dee9123b46
- instance_id: 8fa89ad2-8f08-4220-9fa4-9695ea23e948
-
-# Get openstack cloud ID and instance ID, OVH use them in its API
-- name: Get openstack cloud ID and instance ID
- os_server_info:
- cloud: myProjectName
- region_name: myRegionName
- server: myServerName
- register: openstack_servers
-
-- name: Use IDs
- community.general.ovh_monthly_billing:
- project_id: "{{ openstack_servers.0.tenant_id }}"
- instance_id: "{{ openstack_servers.0.id }}"
- application_key: yourkey
- application_secret: yoursecret
- consumer_key: yourconsumerkey
-'''
-
-RETURN = '''
-'''
-
-import os
-import sys
-import traceback
-
-try:
- import ovh
- import ovh.exceptions
- from ovh.exceptions import APIError
- HAS_OVH = True
-except ImportError:
- HAS_OVH = False
- OVH_IMPORT_ERROR = traceback.format_exc()
-
-from ansible.module_utils.basic import AnsibleModule
-
-
-def main():
- module = AnsibleModule(
- argument_spec=dict(
- project_id=dict(required=True),
- instance_id=dict(required=True),
- endpoint=dict(required=False),
- application_key=dict(required=False, no_log=True),
- application_secret=dict(required=False, no_log=True),
- consumer_key=dict(required=False, no_log=True),
- ),
- supports_check_mode=True
- )
-
- # Get parameters
- project_id = module.params.get('project_id')
- instance_id = module.params.get('instance_id')
- endpoint = module.params.get('endpoint')
- application_key = module.params.get('application_key')
- application_secret = module.params.get('application_secret')
- consumer_key = module.params.get('consumer_key')
- project = ""
- instance = ""
- ovh_billing_status = ""
-
- if not HAS_OVH:
- module.fail_json(msg='python-ovh is required to run this module, see https://github.com/ovh/python-ovh')
-
- # Connect to OVH API
- client = ovh.Client(
- endpoint=endpoint,
- application_key=application_key,
- application_secret=application_secret,
- consumer_key=consumer_key
- )
-
- # Check that the instance exists
- try:
- project = client.get('/cloud/project/{0}'.format(project_id))
- except ovh.exceptions.ResourceNotFoundError:
- module.fail_json(msg='project {0} does not exist'.format(project_id))
-
- # Check that the instance exists
- try:
- instance = client.get('/cloud/project/{0}/instance/{1}'.format(project_id, instance_id))
- except ovh.exceptions.ResourceNotFoundError:
- module.fail_json(msg='instance {0} does not exist in project {1}'.format(instance_id, project_id))
-
- # Is monthlyBilling already enabled or pending ?
- if instance['monthlyBilling'] is not None:
- if instance['monthlyBilling']['status'] in ['ok', 'activationPending']:
- module.exit_json(changed=False, ovh_billing_status=instance['monthlyBilling'])
-
- if module.check_mode:
- module.exit_json(changed=True, msg="Dry Run!")
-
- try:
- ovh_billing_status = client.post('/cloud/project/{0}/instance/{1}/activeMonthlyBilling'.format(project_id, instance_id))
- module.exit_json(changed=True, ovh_billing_status=ovh_billing_status['monthlyBilling'])
- except APIError as apiError:
- module.fail_json(changed=False, msg="Failed to call OVH API: {0}".format(apiError))
-
- # We should never reach here
- module.fail_json(msg='Internal ovh_monthly_billing module error')
-
-
-if __name__ == "__main__":
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/packet/packet_device.py b/ansible_collections/community/general/plugins/modules/cloud/packet/packet_device.py
deleted file mode 100644
index 5912a6f4..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/packet/packet_device.py
+++ /dev/null
@@ -1,670 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# (c) 2016, Tomas Karasek
-# (c) 2016, Matt Baldwin
-# (c) 2016, Thibaud Morel l'Horset
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: packet_device
-
-short_description: Manage a bare metal server in the Packet Host.
-
-description:
- - Manage a bare metal server in the Packet Host (a "device" in the API terms).
- - When the machine is created it can optionally wait for public IP address, or for active state.
- - This module has a dependency on packet >= 1.0.
- - API is documented at U(https://www.packet.net/developers/api/devices).
-
-
-author:
- - Tomas Karasek (@t0mk)
- - Matt Baldwin (@baldwinSPC)
- - Thibaud Morel l'Horset (@teebes)
-
-options:
- auth_token:
- description:
- - Packet API token. You can also supply it in env var C(PACKET_API_TOKEN).
- type: str
-
- count:
- description:
- - The number of devices to create. Count number can be included in hostname via the %d string formatter.
- default: 1
- type: int
-
- count_offset:
- description:
- - From which number to start the count.
- default: 1
- type: int
-
- device_ids:
- description:
- - List of device IDs on which to operate.
- type: list
- elements: str
-
- tags:
- description:
- - List of device tags.
- - Currently implemented only for device creation.
- type: list
- elements: str
- version_added: '0.2.0'
-
- facility:
- description:
- - Facility slug for device creation. See Packet API for current list - U(https://www.packet.net/developers/api/facilities/).
- type: str
-
- features:
- description:
- - Dict with "features" for device creation. See Packet API docs for details.
- type: dict
-
- hostnames:
- description:
- - A hostname of a device, or a list of hostnames.
- - If given string or one-item list, you can use the C("%d") Python string format to expand numbers from I(count).
- - If only one hostname, it might be expanded to list if I(count)>1.
- aliases: [name]
- type: list
- elements: str
-
- locked:
- description:
- - Whether to lock a created device.
- default: false
- aliases: [lock]
- type: bool
-
- operating_system:
- description:
- - OS slug for device creation. See Packet API for current list - U(https://www.packet.net/developers/api/operatingsystems/).
- type: str
-
- plan:
- description:
- - Plan slug for device creation. See Packet API for current list - U(https://www.packet.net/developers/api/plans/).
- type: str
-
- project_id:
- description:
- - ID of project of the device.
- required: true
- type: str
-
- state:
- description:
- - Desired state of the device.
- - If set to C(present) (the default), the module call will return immediately after the device-creating HTTP request successfully returns.
- - If set to C(active), the module call will block until all the specified devices are in state active due to the Packet API, or until I(wait_timeout).
- choices: [present, absent, active, inactive, rebooted]
- default: present
- type: str
-
- user_data:
- description:
- - Userdata blob made available to the machine
- type: str
-
- wait_for_public_IPv:
- description:
- - Whether to wait for the instance to be assigned a public IPv4/IPv6 address.
- - If set to 4, it will wait until IPv4 is assigned to the instance.
- - If set to 6, wait until public IPv6 is assigned to the instance.
- choices: [4,6]
- type: int
-
- wait_timeout:
- description:
- - How long (seconds) to wait either for automatic IP address assignment, or for the device to reach the C(active) I(state).
- - If I(wait_for_public_IPv) is set and I(state) is C(active), the module will wait for both events consequently, applying the timeout twice.
- default: 900
- type: int
-
- ipxe_script_url:
- description:
- - URL of custom iPXE script for provisioning.
- - More about custom iPXE for Packet devices at U(https://help.packet.net/technical/infrastructure/custom-ipxe).
- type: str
-
- always_pxe:
- description:
- - Persist PXE as the first boot option.
- - Normally, the PXE process happens only on the first boot. Set this arg to have your device continuously boot to iPXE.
- default: false
- type: bool
-
-
-requirements:
- - "packet-python >= 1.35"
-
-notes:
- - Doesn't support check mode.
-
-'''
-
-EXAMPLES = '''
-# All the examples assume that you have your Packet API token in env var PACKET_API_TOKEN.
-# You can also pass it to the auth_token parameter of the module instead.
-
-# Creating devices
-
-- name: Create 1 device
- hosts: localhost
- tasks:
- - community.general.packet_device:
- project_id: 89b497ee-5afc-420a-8fb5-56984898f4df
- hostnames: myserver
- tags: ci-xyz
- operating_system: ubuntu_16_04
- plan: baremetal_0
- facility: sjc1
-
-# Create the same device and wait until it is in state "active", (when it's
-# ready for other API operations). Fail if the device is not "active" in
-# 10 minutes.
-
-- name: Create device and wait up to 10 minutes for active state
- hosts: localhost
- tasks:
- - community.general.packet_device:
- project_id: 89b497ee-5afc-420a-8fb5-56984898f4df
- hostnames: myserver
- operating_system: ubuntu_16_04
- plan: baremetal_0
- facility: sjc1
- state: active
- wait_timeout: 600
-
-- name: Create 3 ubuntu devices called server-01, server-02 and server-03
- hosts: localhost
- tasks:
- - community.general.packet_device:
- project_id: 89b497ee-5afc-420a-8fb5-56984898f4df
- hostnames: server-%02d
- count: 3
- operating_system: ubuntu_16_04
- plan: baremetal_0
- facility: sjc1
-
-- name: Create 3 coreos devices with userdata, wait until they get IPs and then wait for SSH
- hosts: localhost
- tasks:
- - name: Create 3 devices and register their facts
- community.general.packet_device:
- hostnames: [coreos-one, coreos-two, coreos-three]
- operating_system: coreos_stable
- plan: baremetal_0
- facility: ewr1
- locked: true
- project_id: 89b497ee-5afc-420a-8fb5-56984898f4df
- wait_for_public_IPv: 4
- user_data: |
- #cloud-config
- ssh_authorized_keys:
- - {{ lookup('file', 'my_packet_sshkey') }}
- coreos:
- etcd:
- discovery: https://discovery.etcd.io/6a28e078895c5ec737174db2419bb2f3
- addr: $private_ipv4:4001
- peer-addr: $private_ipv4:7001
- fleet:
- public-ip: $private_ipv4
- units:
- - name: etcd.service
- command: start
- - name: fleet.service
- command: start
- register: newhosts
-
- - name: Wait for ssh
- ansible.builtin.wait_for:
- delay: 1
- host: "{{ item.public_ipv4 }}"
- port: 22
- state: started
- timeout: 500
- with_items: "{{ newhosts.devices }}"
-
-
-# Other states of devices
-
-- name: Remove 3 devices by uuid
- hosts: localhost
- tasks:
- - community.general.packet_device:
- project_id: 89b497ee-5afc-420a-8fb5-56984898f4df
- state: absent
- device_ids:
- - 1fb4faf8-a638-4ac7-8f47-86fe514c30d8
- - 2eb4faf8-a638-4ac7-8f47-86fe514c3043
- - 6bb4faf8-a638-4ac7-8f47-86fe514c301f
-'''
-
-RETURN = '''
-changed:
- description: True if a device was altered in any way (created, modified or removed)
- type: bool
- sample: True
- returned: success
-
-devices:
- description: Information about each device that was processed
- type: list
- sample: '[{"hostname": "my-server.com", "id": "2a5122b9-c323-4d5c-b53c-9ad3f54273e7",
- "public_ipv4": "147.229.15.12", "private-ipv4": "10.0.15.12",
- "tags": [], "locked": false, "state": "provisioning",
- "public_ipv6": ""2604:1380:2:5200::3"}]'
- returned: success
-''' # NOQA
-
-
-import os
-import re
-import time
-import uuid
-import traceback
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.common.text.converters import to_native
-
-HAS_PACKET_SDK = True
-try:
- import packet
-except ImportError:
- HAS_PACKET_SDK = False
-
-from ansible.module_utils.basic import AnsibleModule
-
-
-NAME_RE = r'({0}|{0}{1}*{0})'.format(r'[a-zA-Z0-9]', r'[a-zA-Z0-9\-]')
-HOSTNAME_RE = r'({0}\.)*{0}$'.format(NAME_RE)
-MAX_DEVICES = 100
-
-PACKET_DEVICE_STATES = (
- 'queued',
- 'provisioning',
- 'failed',
- 'powering_on',
- 'active',
- 'powering_off',
- 'inactive',
- 'rebooting',
-)
-
-PACKET_API_TOKEN_ENV_VAR = "PACKET_API_TOKEN"
-
-
-ALLOWED_STATES = ['absent', 'active', 'inactive', 'rebooted', 'present']
-
-
-def serialize_device(device):
- """
- Standard representation for a device as returned by various tasks::
-
- {
- 'id': 'device_id'
- 'hostname': 'device_hostname',
- 'tags': [],
- 'locked': false,
- 'state': 'provisioning',
- 'ip_addresses': [
- {
- "address": "147.75.194.227",
- "address_family": 4,
- "public": true
- },
- {
- "address": "2604:1380:2:5200::3",
- "address_family": 6,
- "public": true
- },
- {
- "address": "10.100.11.129",
- "address_family": 4,
- "public": false
- }
- ],
- "private_ipv4": "10.100.11.129",
- "public_ipv4": "147.75.194.227",
- "public_ipv6": "2604:1380:2:5200::3",
- }
-
- """
- device_data = {}
- device_data['id'] = device.id
- device_data['hostname'] = device.hostname
- device_data['tags'] = device.tags
- device_data['locked'] = device.locked
- device_data['state'] = device.state
- device_data['ip_addresses'] = [
- {
- 'address': addr_data['address'],
- 'address_family': addr_data['address_family'],
- 'public': addr_data['public'],
- }
- for addr_data in device.ip_addresses
- ]
- # Also include each IPs as a key for easier lookup in roles.
- # Key names:
- # - public_ipv4
- # - public_ipv6
- # - private_ipv4
- # - private_ipv6 (if there is one)
- for ipdata in device_data['ip_addresses']:
- if ipdata['public']:
- if ipdata['address_family'] == 6:
- device_data['public_ipv6'] = ipdata['address']
- elif ipdata['address_family'] == 4:
- device_data['public_ipv4'] = ipdata['address']
- elif not ipdata['public']:
- if ipdata['address_family'] == 6:
- # Packet doesn't give public ipv6 yet, but maybe one
- # day they will
- device_data['private_ipv6'] = ipdata['address']
- elif ipdata['address_family'] == 4:
- device_data['private_ipv4'] = ipdata['address']
- return device_data
-
-
-def is_valid_hostname(hostname):
- return re.match(HOSTNAME_RE, hostname) is not None
-
-
-def is_valid_uuid(myuuid):
- try:
- val = uuid.UUID(myuuid, version=4)
- except ValueError:
- return False
- return str(val) == myuuid
-
-
-def listify_string_name_or_id(s):
- if ',' in s:
- return s.split(',')
- else:
- return [s]
-
-
-def get_hostname_list(module):
- # hostname is a list-typed param, so I guess it should return list
- # (and it does, in Ansible 2.2.1) but in order to be defensive,
- # I keep here the code to convert an eventual string to list
- hostnames = module.params.get('hostnames')
- count = module.params.get('count')
- count_offset = module.params.get('count_offset')
- if isinstance(hostnames, str):
- hostnames = listify_string_name_or_id(hostnames)
- if not isinstance(hostnames, list):
- raise Exception("name %s is not convertible to list" % hostnames)
-
- # at this point, hostnames is a list
- hostnames = [h.strip() for h in hostnames]
-
- if (len(hostnames) > 1) and (count > 1):
- _msg = ("If you set count>1, you should only specify one hostname "
- "with the %d formatter, not a list of hostnames.")
- raise Exception(_msg)
-
- if (len(hostnames) == 1) and (count > 0):
- hostname_spec = hostnames[0]
- count_range = range(count_offset, count_offset + count)
- if re.search(r"%\d{0,2}d", hostname_spec):
- hostnames = [hostname_spec % i for i in count_range]
- elif count > 1:
- hostname_spec = '%s%%02d' % hostname_spec
- hostnames = [hostname_spec % i for i in count_range]
-
- for hn in hostnames:
- if not is_valid_hostname(hn):
- raise Exception("Hostname '%s' does not seem to be valid" % hn)
-
- if len(hostnames) > MAX_DEVICES:
- raise Exception("You specified too many hostnames, max is %d" %
- MAX_DEVICES)
- return hostnames
-
-
-def get_device_id_list(module):
- device_ids = module.params.get('device_ids')
-
- if isinstance(device_ids, str):
- device_ids = listify_string_name_or_id(device_ids)
-
- device_ids = [di.strip() for di in device_ids]
-
- for di in device_ids:
- if not is_valid_uuid(di):
- raise Exception("Device ID '%s' does not seem to be valid" % di)
-
- if len(device_ids) > MAX_DEVICES:
- raise Exception("You specified too many devices, max is %d" %
- MAX_DEVICES)
- return device_ids
-
-
-def create_single_device(module, packet_conn, hostname):
-
- for param in ('hostnames', 'operating_system', 'plan'):
- if not module.params.get(param):
- raise Exception("%s parameter is required for new device."
- % param)
- project_id = module.params.get('project_id')
- plan = module.params.get('plan')
- tags = module.params.get('tags')
- user_data = module.params.get('user_data')
- facility = module.params.get('facility')
- operating_system = module.params.get('operating_system')
- locked = module.params.get('locked')
- ipxe_script_url = module.params.get('ipxe_script_url')
- always_pxe = module.params.get('always_pxe')
- if operating_system != 'custom_ipxe':
- for param in ('ipxe_script_url', 'always_pxe'):
- if module.params.get(param):
- raise Exception('%s parameter is not valid for non custom_ipxe operating_system.' % param)
-
- device = packet_conn.create_device(
- project_id=project_id,
- hostname=hostname,
- tags=tags,
- plan=plan,
- facility=facility,
- operating_system=operating_system,
- userdata=user_data,
- locked=locked,
- ipxe_script_url=ipxe_script_url,
- always_pxe=always_pxe)
- return device
-
-
-def refresh_device_list(module, packet_conn, devices):
- device_ids = [d.id for d in devices]
- new_device_list = get_existing_devices(module, packet_conn)
- return [d for d in new_device_list if d.id in device_ids]
-
-
-def wait_for_devices_active(module, packet_conn, watched_devices):
- wait_timeout = module.params.get('wait_timeout')
- wait_timeout = time.time() + wait_timeout
- refreshed = watched_devices
- while wait_timeout > time.time():
- refreshed = refresh_device_list(module, packet_conn, watched_devices)
- if all(d.state == 'active' for d in refreshed):
- return refreshed
- time.sleep(5)
- raise Exception("Waiting for state \"active\" timed out for devices: %s"
- % [d.hostname for d in refreshed if d.state != "active"])
-
-
-def wait_for_public_IPv(module, packet_conn, created_devices):
-
- def has_public_ip(addr_list, ip_v):
- return any(a['public'] and a['address_family'] == ip_v and a['address'] for a in addr_list)
-
- def all_have_public_ip(ds, ip_v):
- return all(has_public_ip(d.ip_addresses, ip_v) for d in ds)
-
- address_family = module.params.get('wait_for_public_IPv')
-
- wait_timeout = module.params.get('wait_timeout')
- wait_timeout = time.time() + wait_timeout
- while wait_timeout > time.time():
- refreshed = refresh_device_list(module, packet_conn, created_devices)
- if all_have_public_ip(refreshed, address_family):
- return refreshed
- time.sleep(5)
-
- raise Exception("Waiting for IPv%d address timed out. Hostnames: %s"
- % (address_family, [d.hostname for d in created_devices]))
-
-
-def get_existing_devices(module, packet_conn):
- project_id = module.params.get('project_id')
- return packet_conn.list_devices(
- project_id, params={
- 'per_page': MAX_DEVICES})
-
-
-def get_specified_device_identifiers(module):
- if module.params.get('device_ids'):
- device_id_list = get_device_id_list(module)
- return {'ids': device_id_list, 'hostnames': []}
- elif module.params.get('hostnames'):
- hostname_list = get_hostname_list(module)
- return {'hostnames': hostname_list, 'ids': []}
-
-
-def act_on_devices(module, packet_conn, target_state):
- specified_identifiers = get_specified_device_identifiers(module)
- existing_devices = get_existing_devices(module, packet_conn)
- changed = False
- create_hostnames = []
- if target_state in ['present', 'active', 'rebooted']:
- # states where we might create non-existing specified devices
- existing_devices_names = [ed.hostname for ed in existing_devices]
- create_hostnames = [hn for hn in specified_identifiers['hostnames']
- if hn not in existing_devices_names]
-
- process_devices = [d for d in existing_devices
- if (d.id in specified_identifiers['ids']) or
- (d.hostname in specified_identifiers['hostnames'])]
-
- if target_state != 'present':
- _absent_state_map = {}
- for s in PACKET_DEVICE_STATES:
- _absent_state_map[s] = packet.Device.delete
-
- state_map = {
- 'absent': _absent_state_map,
- 'active': {'inactive': packet.Device.power_on,
- 'provisioning': None, 'rebooting': None
- },
- 'inactive': {'active': packet.Device.power_off},
- 'rebooted': {'active': packet.Device.reboot,
- 'inactive': packet.Device.power_on,
- 'provisioning': None, 'rebooting': None
- },
- }
-
- # First do non-creation actions, it might be faster
- for d in process_devices:
- if d.state == target_state:
- continue
- if d.state in state_map[target_state]:
- api_operation = state_map[target_state].get(d.state)
- if api_operation is not None:
- api_operation(d)
- changed = True
- else:
- _msg = (
- "I don't know how to process existing device %s from state %s "
- "to state %s" %
- (d.hostname, d.state, target_state))
- raise Exception(_msg)
-
- # At last create missing devices
- created_devices = []
- if create_hostnames:
- created_devices = [create_single_device(module, packet_conn, n)
- for n in create_hostnames]
- if module.params.get('wait_for_public_IPv'):
- created_devices = wait_for_public_IPv(
- module, packet_conn, created_devices)
- changed = True
-
- processed_devices = created_devices + process_devices
- if target_state == 'active':
- processed_devices = wait_for_devices_active(
- module, packet_conn, processed_devices)
-
- return {
- 'changed': changed,
- 'devices': [serialize_device(d) for d in processed_devices]
- }
-
-
-def main():
- module = AnsibleModule(
- argument_spec=dict(
- auth_token=dict(default=os.environ.get(PACKET_API_TOKEN_ENV_VAR),
- no_log=True),
- count=dict(type='int', default=1),
- count_offset=dict(type='int', default=1),
- device_ids=dict(type='list', elements='str'),
- facility=dict(),
- features=dict(type='dict'),
- hostnames=dict(type='list', elements='str', aliases=['name']),
- tags=dict(type='list', elements='str'),
- locked=dict(type='bool', default=False, aliases=['lock']),
- operating_system=dict(),
- plan=dict(),
- project_id=dict(required=True),
- state=dict(choices=ALLOWED_STATES, default='present'),
- user_data=dict(default=None),
- wait_for_public_IPv=dict(type='int', choices=[4, 6]),
- wait_timeout=dict(type='int', default=900),
- ipxe_script_url=dict(default=''),
- always_pxe=dict(type='bool', default=False),
- ),
- required_one_of=[('device_ids', 'hostnames',)],
- mutually_exclusive=[
- ('hostnames', 'device_ids'),
- ('count', 'device_ids'),
- ('count_offset', 'device_ids'),
- ]
- )
-
- if not HAS_PACKET_SDK:
- module.fail_json(msg='packet required for this module')
-
- if not module.params.get('auth_token'):
- _fail_msg = ("if Packet API token is not in environment variable %s, "
- "the auth_token parameter is required" %
- PACKET_API_TOKEN_ENV_VAR)
- module.fail_json(msg=_fail_msg)
-
- auth_token = module.params.get('auth_token')
-
- packet_conn = packet.Manager(auth_token=auth_token)
-
- state = module.params.get('state')
-
- try:
- module.exit_json(**act_on_devices(module, packet_conn, state))
- except Exception as e:
- module.fail_json(msg='failed to set device state %s, error: %s' %
- (state, to_native(e)), exception=traceback.format_exc())
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/packet/packet_ip_subnet.py b/ansible_collections/community/general/plugins/modules/cloud/packet/packet_ip_subnet.py
deleted file mode 100644
index 718de36f..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/packet/packet_ip_subnet.py
+++ /dev/null
@@ -1,326 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2019, Nurfet Becirevic
-# Copyright: (c) 2017, Tomas Karasek
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-
-__metaclass__ = type
-
-DOCUMENTATION = '''
----
-module: packet_ip_subnet
-
-short_description: Assign IP subnet to a bare metal server.
-
-description:
- - Assign or unassign IPv4 or IPv6 subnets to or from a device in the Packet host.
- - IPv4 subnets must come from already reserved block.
- - IPv6 subnets must come from publicly routable /56 block from your project.
- - See U(https://support.packet.com/kb/articles/elastic-ips) for more info on IP block reservation.
-
-version_added: '0.2.0'
-
-author:
- - Tomas Karasek (@t0mk)
- - Nurfet Becirevic (@nurfet-becirevic)
-
-options:
- auth_token:
- description:
- - Packet API token. You can also supply it in env var C(PACKET_API_TOKEN).
- type: str
-
- hostname:
- description:
- - A hostname of a device to/from which to assign/remove a subnet.
- required: False
- type: str
-
- device_id:
- description:
- - UUID of a device to/from which to assign/remove a subnet.
- required: False
- type: str
-
- project_id:
- description:
- - UUID of a project of the device to/from which to assign/remove a subnet.
- type: str
-
- device_count:
- description:
- - The number of devices to retrieve from the project. The max allowed value is 1000.
- - See U(https://www.packet.com/developers/api/#retrieve-all-devices-of-a-project) for more info.
- default: 100
- type: int
-
- cidr:
- description:
- - IPv4 or IPv6 subnet which you want to manage. It must come from a reserved block for your project in the Packet Host.
- aliases: [name]
- type: str
- required: true
-
- state:
- description:
- - Desired state of the IP subnet on the specified device.
- - With state == C(present), you must specify either hostname or device_id. Subnet with given CIDR will then be assigned to the specified device.
- - With state == C(absent), you can specify either hostname or device_id. The subnet will be removed from specified devices.
- - If you leave both hostname and device_id empty, the subnet will be removed from any device it's assigned to.
- choices: ['present', 'absent']
- default: 'present'
- type: str
-
-requirements:
- - "packet-python >= 1.35"
- - "python >= 2.6"
-'''
-
-EXAMPLES = '''
-# All the examples assume that you have your Packet API token in env var PACKET_API_TOKEN.
-# You can also pass it to the auth_token parameter of the module instead.
-
-- name: Create 1 device and assign an arbitrary public IPv4 subnet to it
- hosts: localhost
- tasks:
-
- - packet_device:
- project_id: 89b497ee-5afc-420a-8fb5-56984898f4df
- hostnames: myserver
- operating_system: ubuntu_16_04
- plan: baremetal_0
- facility: sjc1
- state: active
-
-# Pick an IPv4 address from a block allocated to your project.
-
- - community.general.packet_ip_subnet:
- project_id: 89b497ee-5afc-420a-8fb5-56984898f4df
- hostname: myserver
- cidr: "147.75.201.78/32"
-
-# Release IP address 147.75.201.78
-
-- name: Unassign IP address from any device in your project
- hosts: localhost
- tasks:
- - community.general.packet_ip_subnet:
- project_id: 89b497ee-5afc-420a-8fb5-56984898f4df
- cidr: "147.75.201.78/32"
- state: absent
-'''
-
-RETURN = '''
-changed:
- description: True if an IP address assignments were altered in any way (created or removed).
- type: bool
- sample: True
- returned: success
-
-device_id:
- type: str
- description: UUID of the device associated with the specified IP address.
- returned: success
-
-subnet:
- description: Dict with data about the handled IP subnet.
- type: dict
- sample:
- address: 147.75.90.241
- address_family: 4
- assigned_to: { href : /devices/61f9aa5e-0530-47f5-97c2-113828e61ed0 }
- cidr: 31
- created_at: '2017-08-07T15:15:30Z'
- enabled: True
- gateway: 147.75.90.240
- href: /ips/31eda960-0a16-4c0f-b196-f3dc4928529f
- id: 1eda960-0a16-4c0f-b196-f3dc4928529f
- manageable: True
- management: True
- netmask: 255.255.255.254
- network: 147.75.90.240
- public: True
- returned: success
-'''
-
-
-import uuid
-import re
-
-from ansible.module_utils.basic import AnsibleModule, env_fallback
-from ansible.module_utils.common.text.converters import to_native
-
-HAS_PACKET_SDK = True
-
-try:
- import packet
-except ImportError:
- HAS_PACKET_SDK = False
-
-
-NAME_RE = r'({0}|{0}{1}*{0})'.format(r'[a-zA-Z0-9]', r'[a-zA-Z0-9\-]')
-HOSTNAME_RE = r'({0}\.)*{0}$'.format(NAME_RE)
-PROJECT_MAX_DEVICES = 100
-
-
-PACKET_API_TOKEN_ENV_VAR = "PACKET_API_TOKEN"
-
-
-ALLOWED_STATES = ['absent', 'present']
-
-
-def is_valid_hostname(hostname):
- return re.match(HOSTNAME_RE, hostname) is not None
-
-
-def is_valid_uuid(myuuid):
- try:
- val = uuid.UUID(myuuid, version=4)
- except ValueError:
- return False
- return str(val) == myuuid
-
-
-def get_existing_devices(module, packet_conn):
- project_id = module.params.get('project_id')
- if not is_valid_uuid(project_id):
- raise Exception("Project ID {0} does not seem to be valid".format(project_id))
-
- per_page = module.params.get('device_count')
- return packet_conn.list_devices(
- project_id, params={'per_page': per_page})
-
-
-def get_specified_device_identifiers(module):
- if module.params.get('device_id'):
- _d_id = module.params.get('device_id')
- if not is_valid_uuid(_d_id):
- raise Exception("Device ID '{0}' does not seem to be valid".format(_d_id))
- return {'device_id': _d_id, 'hostname': None}
- elif module.params.get('hostname'):
- _hn = module.params.get('hostname')
- if not is_valid_hostname(_hn):
- raise Exception("Hostname '{0}' does not seem to be valid".format(_hn))
- return {'hostname': _hn, 'device_id': None}
- else:
- return {'hostname': None, 'device_id': None}
-
-
-def parse_subnet_cidr(cidr):
- if "/" not in cidr:
- raise Exception("CIDR expression in wrong format, must be address/prefix_len")
- addr, prefixlen = cidr.split("/")
- try:
- prefixlen = int(prefixlen)
- except ValueError:
- raise("Wrong prefix length in CIDR expression {0}".format(cidr))
- return addr, prefixlen
-
-
-def act_on_assignment(target_state, module, packet_conn):
- return_dict = {'changed': False}
- specified_cidr = module.params.get("cidr")
- address, prefixlen = parse_subnet_cidr(specified_cidr)
-
- specified_identifier = get_specified_device_identifiers(module)
-
- if module.check_mode:
- return return_dict
-
- if (specified_identifier['hostname'] is None) and (
- specified_identifier['device_id'] is None):
- if target_state == 'absent':
- # The special case to release the IP from any assignment
- for d in get_existing_devices(module, packet_conn):
- for ia in d.ip_addresses:
- if address == ia['address'] and prefixlen == ia['cidr']:
- packet_conn.call_api(ia['href'], "DELETE")
- return_dict['changed'] = True
- return_dict['subnet'] = ia
- return_dict['device_id'] = d.id
- return return_dict
- raise Exception("If you assign an address, you must specify either "
- "target device ID or target unique hostname.")
-
- if specified_identifier['device_id'] is not None:
- device = packet_conn.get_device(specified_identifier['device_id'])
- else:
- all_devices = get_existing_devices(module, packet_conn)
- hn = specified_identifier['hostname']
- matching_devices = [d for d in all_devices if d.hostname == hn]
- if len(matching_devices) > 1:
- raise Exception("There are more than one devices matching given hostname {0}".format(hn))
- if len(matching_devices) == 0:
- raise Exception("There is no device matching given hostname {0}".format(hn))
- device = matching_devices[0]
-
- return_dict['device_id'] = device.id
- assignment_dicts = [i for i in device.ip_addresses
- if i['address'] == address and i['cidr'] == prefixlen]
- if len(assignment_dicts) > 1:
- raise Exception("IP address {0} is assigned more than once for device {1}".format(
- specified_cidr, device.hostname))
-
- if target_state == "absent":
- if len(assignment_dicts) == 1:
- packet_conn.call_api(assignment_dicts[0]['href'], "DELETE")
- return_dict['subnet'] = assignment_dicts[0]
- return_dict['changed'] = True
- elif target_state == "present":
- if len(assignment_dicts) == 0:
- new_assignment = packet_conn.call_api(
- "devices/{0}/ips".format(device.id), "POST", {"address": "{0}".format(specified_cidr)})
- return_dict['changed'] = True
- return_dict['subnet'] = new_assignment
- return return_dict
-
-
-def main():
- module = AnsibleModule(
- argument_spec=dict(
- auth_token=dict(
- type='str',
- fallback=(env_fallback, [PACKET_API_TOKEN_ENV_VAR]),
- no_log=True
- ),
- device_id=dict(type='str'),
- hostname=dict(type='str'),
- project_id=dict(type='str'),
- device_count=dict(type='int', default=PROJECT_MAX_DEVICES),
- cidr=dict(type='str', required=True, aliases=['name']),
- state=dict(choices=ALLOWED_STATES, default='present'),
- ),
- supports_check_mode=True,
- mutually_exclusive=[('hostname', 'device_id')],
- required_one_of=[['hostname', 'device_id', 'project_id']],
- required_by=dict(
- hostname=('project_id',),
- ),
- )
-
- if not HAS_PACKET_SDK:
- module.fail_json(msg='packet required for this module')
-
- if not module.params.get('auth_token'):
- _fail_msg = ("if Packet API token is not in environment variable {0}, "
- "the auth_token parameter is required".format(PACKET_API_TOKEN_ENV_VAR))
- module.fail_json(msg=_fail_msg)
-
- auth_token = module.params.get('auth_token')
-
- packet_conn = packet.Manager(auth_token=auth_token)
-
- state = module.params.get('state')
-
- try:
- module.exit_json(**act_on_assignment(state, module, packet_conn))
- except Exception as e:
- module.fail_json(
- msg="failed to set IP subnet to state {0}, error: {1}".format(state, to_native(e)))
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/packet/packet_project.py b/ansible_collections/community/general/plugins/modules/cloud/packet/packet_project.py
deleted file mode 100644
index c6502c6e..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/packet/packet_project.py
+++ /dev/null
@@ -1,244 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2019, Nurfet Becirevic
-# Copyright: (c) 2019, Tomas Karasek
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-
-__metaclass__ = type
-
-DOCUMENTATION = '''
----
-module: packet_project
-
-short_description: Create/delete a project in Packet host.
-
-description:
- - Create/delete a project in Packet host.
- - API is documented at U(https://www.packet.com/developers/api/#projects).
-
-version_added: '0.2.0'
-
-author:
- - Tomas Karasek (@t0mk)
- - Nurfet Becirevic (@nurfet-becirevic)
-
-options:
- state:
- description:
- - Indicate desired state of the target.
- default: present
- choices: ['present', 'absent']
- type: str
-
- payment_method:
- description:
- - Payment method is name of one of the payment methods available to your user.
- - When blank, the API assumes the default payment method.
- type: str
-
- auth_token:
- description:
- - Packet api token. You can also supply it in env var C(PACKET_API_TOKEN).
- type: str
-
- name:
- description:
- - Name for/of the project.
- type: str
-
- org_id:
- description:
- - UUID of the organization to create a project for.
- - When blank, the API assumes the default organization.
- type: str
-
- id:
- description:
- - UUID of the project which you want to remove.
- type: str
-
- custom_data:
- description:
- - Custom data about the project to create.
- type: str
-
-requirements:
- - "python >= 2.6"
- - "packet-python >= 1.40"
-
-'''
-
-EXAMPLES = '''
-# All the examples assume that you have your Packet API token in env var PACKET_API_TOKEN.
-# You can also pass the api token in module param auth_token.
-
-- name: Create new project
- hosts: localhost
- tasks:
- community.general.packet_project:
- name: "new project"
-
-- name: Create new project within non-default organization
- hosts: localhost
- tasks:
- community.general.packet_project:
- name: "my org project"
- org_id: a4cc87f9-e00f-48c2-9460-74aa60beb6b0
-
-- name: Remove project by id
- hosts: localhost
- tasks:
- community.general.packet_project:
- state: absent
- id: eef49903-7a09-4ca1-af67-4087c29ab5b6
-
-- name: Create new project with non-default billing method
- hosts: localhost
- tasks:
- community.general.packet_project:
- name: "newer project"
- payment_method: "the other visa"
-'''
-
-RETURN = '''
-changed:
- description: True if a project was created or removed.
- type: bool
- sample: True
- returned: success
-
-name:
- description: Name of addressed project.
- type: str
- returned: success
-
-id:
- description: UUID of addressed project.
- type: str
- returned: success
-'''
-
-from ansible.module_utils.basic import AnsibleModule, env_fallback
-from ansible.module_utils.common.text.converters import to_native
-
-HAS_PACKET_SDK = True
-
-try:
- import packet
-except ImportError:
- HAS_PACKET_SDK = False
-
-PACKET_API_TOKEN_ENV_VAR = "PACKET_API_TOKEN"
-
-
-def act_on_project(target_state, module, packet_conn):
- result_dict = {'changed': False}
- given_id = module.params.get('id')
- given_name = module.params.get('name')
- if given_id:
- matching_projects = [
- p for p in packet_conn.list_projects() if given_id == p.id]
- else:
- matching_projects = [
- p for p in packet_conn.list_projects() if given_name == p.name]
-
- if target_state == 'present':
- if len(matching_projects) == 0:
- org_id = module.params.get('org_id')
- custom_data = module.params.get('custom_data')
- payment_method = module.params.get('payment_method')
-
- if not org_id:
- params = {
- "name": given_name,
- "payment_method_id": payment_method,
- "customdata": custom_data
- }
- new_project_data = packet_conn.call_api("projects", "POST", params)
- new_project = packet.Project(new_project_data, packet_conn)
- else:
- new_project = packet_conn.create_organization_project(
- org_id=org_id,
- name=given_name,
- payment_method_id=payment_method,
- customdata=custom_data
- )
-
- result_dict['changed'] = True
- matching_projects.append(new_project)
-
- result_dict['name'] = matching_projects[0].name
- result_dict['id'] = matching_projects[0].id
- else:
- if len(matching_projects) > 1:
- _msg = ("More than projects matched for module call with state = absent: "
- "{0}".format(to_native(matching_projects)))
- module.fail_json(msg=_msg)
-
- if len(matching_projects) == 1:
- p = matching_projects[0]
- result_dict['name'] = p.name
- result_dict['id'] = p.id
- result_dict['changed'] = True
- try:
- p.delete()
- except Exception as e:
- _msg = ("while trying to remove project {0}, id {1}, got error: {2}".format(
- p.name, p.id, to_native(e)))
- module.fail_json(msg=_msg)
- return result_dict
-
-
-def main():
- module = AnsibleModule(
- argument_spec=dict(
- state=dict(choices=['present', 'absent'], default='present'),
- auth_token=dict(
- type='str',
- fallback=(env_fallback, [PACKET_API_TOKEN_ENV_VAR]),
- no_log=True
- ),
- name=dict(type='str'),
- id=dict(type='str'),
- org_id=dict(type='str'),
- payment_method=dict(type='str'),
- custom_data=dict(type='str'),
- ),
- supports_check_mode=True,
- required_one_of=[("name", "id",)],
- mutually_exclusive=[
- ('name', 'id'),
- ]
- )
- if not HAS_PACKET_SDK:
- module.fail_json(msg='packet required for this module')
-
- if not module.params.get('auth_token'):
- _fail_msg = ("if Packet API token is not in environment variable {0}, "
- "the auth_token parameter is required".format(PACKET_API_TOKEN_ENV_VAR))
- module.fail_json(msg=_fail_msg)
-
- auth_token = module.params.get('auth_token')
-
- packet_conn = packet.Manager(auth_token=auth_token)
-
- state = module.params.get('state')
-
- if state in ['present', 'absent']:
- if module.check_mode:
- module.exit_json(changed=False)
-
- try:
- module.exit_json(**act_on_project(state, module, packet_conn))
- except Exception as e:
- module.fail_json(
- msg="failed to set project state {0}: {1}".format(state, to_native(e)))
- else:
- module.fail_json(msg="{0} is not a valid state for this module".format(state))
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/packet/packet_sshkey.py b/ansible_collections/community/general/plugins/modules/cloud/packet/packet_sshkey.py
deleted file mode 100644
index 4800718f..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/packet/packet_sshkey.py
+++ /dev/null
@@ -1,270 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# Copyright 2016 Tomas Karasek
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: packet_sshkey
-short_description: Create/delete an SSH key in Packet host.
-description:
- - Create/delete an SSH key in Packet host.
- - API is documented at U(https://www.packet.net/help/api/#page:ssh-keys,header:ssh-keys-ssh-keys-post).
-author: "Tomas Karasek (@t0mk) "
-options:
- state:
- description:
- - Indicate desired state of the target.
- default: present
- choices: ['present', 'absent']
- type: str
- auth_token:
- description:
- - Packet API token. You can also supply it in env var C(PACKET_API_TOKEN).
- type: str
- label:
- description:
- - Label for the key. If you keep it empty, it will be read from key string.
- type: str
- aliases: [name]
- id:
- description:
- - UUID of the key which you want to remove.
- type: str
- fingerprint:
- description:
- - Fingerprint of the key which you want to remove.
- type: str
- key:
- description:
- - Public Key string ({type} {base64 encoded key} {description}).
- type: str
- key_file:
- description:
- - File with the public key.
- type: path
-
-requirements:
- - "python >= 2.6"
- - packet-python
-
-'''
-
-EXAMPLES = '''
-# All the examples assume that you have your Packet API token in env var PACKET_API_TOKEN.
-# You can also pass the api token in module param auth_token.
-
-- name: Create sshkey from string
- hosts: localhost
- tasks:
- community.general.packet_sshkey:
- key: "{{ lookup('file', 'my_packet_sshkey.pub') }}"
-
-- name: Create sshkey from file
- hosts: localhost
- tasks:
- community.general.packet_sshkey:
- label: key from file
- key_file: ~/ff.pub
-
-- name: Remove sshkey by id
- hosts: localhost
- tasks:
- community.general.packet_sshkey:
- state: absent
- id: eef49903-7a09-4ca1-af67-4087c29ab5b6
-'''
-
-RETURN = '''
-changed:
- description: True if a sshkey was created or removed.
- type: bool
- sample: True
- returned: always
-sshkeys:
- description: Information about sshkeys that were created/removed.
- type: list
- sample: [
- {
- "fingerprint": "5c:93:74:7c:ed:07:17:62:28:75:79:23:d6:08:93:46",
- "id": "41d61bd8-3342-428b-a09c-e67bdd18a9b7",
- "key": "ssh-dss AAAAB3NzaC1kc3MAAACBAIfNT5S0ncP4BBJBYNhNPxFF9lqVhfPeu6SM1LoCocxqDc1AT3zFRi8hjIf6TLZ2AA4FYbcAWxLMhiBxZRVldT9GdBXile78kAK5z3bKTwq152DCqpxwwbaTIggLFhsU8wrfBsPWnDuAxZ0h7mmrCjoLIE3CNLDA/NmV3iB8xMThAAAAFQCStcesSgR1adPORzBxTr7hug92LwAAAIBOProm3Gk+HWedLyE8IfofLaOeRnbBRHAOL4z0SexKkVOnQ/LGN/uDIIPGGBDYTvXgKZT+jbHeulRJ2jKgfSpGKN4JxFQ8uzVH492jEiiUJtT72Ss1dCV4PmyERVIw+f54itihV3z/t25dWgowhb0int8iC/OY3cGodlmYb3wdcQAAAIBuLbB45djZXzUkOTzzcRDIRfhaxo5WipbtEM2B1fuBt2gyrvksPpH/LK6xTjdIIb0CxPu4OCxwJG0aOz5kJoRnOWIXQGhH7VowrJhsqhIc8gN9ErbO5ea8b1L76MNcAotmBDeTUiPw01IJ8MdDxfmcsCslJKgoRKSmQpCwXQtN2g== tomk@hp2",
- "label": "mynewkey33"
- }
- ]
- returned: always
-''' # NOQA
-
-import os
-import uuid
-
-from ansible.module_utils.basic import AnsibleModule
-
-HAS_PACKET_SDK = True
-try:
- import packet
-except ImportError:
- HAS_PACKET_SDK = False
-
-
-PACKET_API_TOKEN_ENV_VAR = "PACKET_API_TOKEN"
-
-
-def serialize_sshkey(sshkey):
- sshkey_data = {}
- copy_keys = ['id', 'key', 'label', 'fingerprint']
- for name in copy_keys:
- sshkey_data[name] = getattr(sshkey, name)
- return sshkey_data
-
-
-def is_valid_uuid(myuuid):
- try:
- val = uuid.UUID(myuuid, version=4)
- except ValueError:
- return False
- return str(val) == myuuid
-
-
-def load_key_string(key_str):
- ret_dict = {}
- key_str = key_str.strip()
- ret_dict['key'] = key_str
- cut_key = key_str.split()
- if len(cut_key) in [2, 3]:
- if len(cut_key) == 3:
- ret_dict['label'] = cut_key[2]
- else:
- raise Exception("Public key %s is in wrong format" % key_str)
- return ret_dict
-
-
-def get_sshkey_selector(module):
- key_id = module.params.get('id')
- if key_id:
- if not is_valid_uuid(key_id):
- raise Exception("sshkey ID %s is not valid UUID" % key_id)
- selecting_fields = ['label', 'fingerprint', 'id', 'key']
- select_dict = {}
- for f in selecting_fields:
- if module.params.get(f) is not None:
- select_dict[f] = module.params.get(f)
-
- if module.params.get('key_file'):
- with open(module.params.get('key_file')) as _file:
- loaded_key = load_key_string(_file.read())
- select_dict['key'] = loaded_key['key']
- if module.params.get('label') is None:
- if loaded_key.get('label'):
- select_dict['label'] = loaded_key['label']
-
- def selector(k):
- if 'key' in select_dict:
- # if key string is specified, compare only the key strings
- return k.key == select_dict['key']
- else:
- # if key string not specified, all the fields must match
- return all(select_dict[f] == getattr(k, f) for f in select_dict)
- return selector
-
-
-def act_on_sshkeys(target_state, module, packet_conn):
- selector = get_sshkey_selector(module)
- existing_sshkeys = packet_conn.list_ssh_keys()
- matching_sshkeys = filter(selector, existing_sshkeys)
- changed = False
- if target_state == 'present':
- if matching_sshkeys == []:
- # there is no key matching the fields from module call
- # => create the key, label and
- newkey = {}
- if module.params.get('key_file'):
- with open(module.params.get('key_file')) as f:
- newkey = load_key_string(f.read())
- if module.params.get('key'):
- newkey = load_key_string(module.params.get('key'))
- if module.params.get('label'):
- newkey['label'] = module.params.get('label')
- for param in ('label', 'key'):
- if param not in newkey:
- _msg = ("If you want to ensure a key is present, you must "
- "supply both a label and a key string, either in "
- "module params, or in a key file. %s is missing"
- % param)
- raise Exception(_msg)
- matching_sshkeys = []
- new_key_response = packet_conn.create_ssh_key(
- newkey['label'], newkey['key'])
- changed = True
-
- matching_sshkeys.append(new_key_response)
- else:
- # state is 'absent' => delete matching keys
- for k in matching_sshkeys:
- try:
- k.delete()
- changed = True
- except Exception as e:
- _msg = ("while trying to remove sshkey %s, id %s %s, "
- "got error: %s" %
- (k.label, k.id, target_state, e))
- raise Exception(_msg)
-
- return {
- 'changed': changed,
- 'sshkeys': [serialize_sshkey(k) for k in matching_sshkeys]
- }
-
-
-def main():
- module = AnsibleModule(
- argument_spec=dict(
- state=dict(choices=['present', 'absent'], default='present'),
- auth_token=dict(default=os.environ.get(PACKET_API_TOKEN_ENV_VAR),
- no_log=True),
- label=dict(type='str', aliases=['name'], default=None),
- id=dict(type='str', default=None),
- fingerprint=dict(type='str', default=None),
- key=dict(type='str', default=None, no_log=True),
- key_file=dict(type='path', default=None),
- ),
- mutually_exclusive=[
- ('label', 'id'),
- ('label', 'fingerprint'),
- ('id', 'fingerprint'),
- ('key', 'fingerprint'),
- ('key', 'id'),
- ('key_file', 'key'),
- ]
- )
-
- if not HAS_PACKET_SDK:
- module.fail_json(msg='packet required for this module')
-
- if not module.params.get('auth_token'):
- _fail_msg = ("if Packet API token is not in environment variable %s, "
- "the auth_token parameter is required" %
- PACKET_API_TOKEN_ENV_VAR)
- module.fail_json(msg=_fail_msg)
-
- auth_token = module.params.get('auth_token')
-
- packet_conn = packet.Manager(auth_token=auth_token)
-
- state = module.params.get('state')
-
- if state in ['present', 'absent']:
- try:
- module.exit_json(**act_on_sshkeys(state, module, packet_conn))
- except Exception as e:
- module.fail_json(msg='failed to set sshkey state: %s' % str(e))
- else:
- module.fail_json(msg='%s is not a valid state for this module' % state)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/packet/packet_volume.py b/ansible_collections/community/general/plugins/modules/cloud/packet/packet_volume.py
deleted file mode 100644
index 97c1e749..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/packet/packet_volume.py
+++ /dev/null
@@ -1,321 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2019, Nurfet Becirevic
-# Copyright: (c) 2017, Tomas Karasek
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-DOCUMENTATION = '''
----
-module: packet_volume
-
-short_description: Create/delete a volume in Packet host.
-
-description:
- - Create/delete a volume in Packet host.
- - API is documented at U(https://www.packet.com/developers/api/#volumes).
-
-version_added: '0.2.0'
-
-author:
- - Tomas Karasek (@t0mk)
- - Nurfet Becirevic (@nurfet-becirevic)
-
-options:
- state:
- description:
- - Desired state of the volume.
- default: present
- choices: ['present', 'absent']
- type: str
-
- project_id:
- description:
- - ID of project of the device.
- required: true
- type: str
-
- auth_token:
- description:
- - Packet API token. You can also supply it in env var C(PACKET_API_TOKEN).
- type: str
-
- name:
- description:
- - Selector for API-generated name of the volume
- type: str
-
- description:
- description:
- - User-defined description attribute for Packet volume.
- - "It is used used as idempotent identifier - if volume with given
- description exists, new one is not created."
- type: str
-
- id:
- description:
- - UUID of a volume.
- type: str
-
- plan:
- description:
- - storage_1 for standard tier, storage_2 for premium (performance) tier.
- - Tiers are described at U(https://www.packet.com/cloud/storage/).
- choices: ['storage_1', 'storage_2']
- default: 'storage_1'
- type: str
-
- facility:
- description:
- - Location of the volume.
- - Volumes can only be attached to device in the same location.
- type: str
-
- size:
- description:
- - Size of the volume in gigabytes.
- type: int
-
- locked:
- description:
- - Create new volume locked.
- type: bool
- default: False
-
- billing_cycle:
- description:
- - Billing cycle for new volume.
- choices: ['hourly', 'monthly']
- default: 'hourly'
- type: str
-
- snapshot_policy:
- description:
- - Snapshot policy for new volume.
- type: dict
-
- suboptions:
- snapshot_count:
- description:
- - How many snapshots to keep, a positive integer.
- required: True
- type: int
-
- snapshot_frequency:
- description:
- - Frequency of snapshots.
- required: True
- choices: ["15min", "1hour", "1day", "1week", "1month", "1year"]
- type: str
-
-requirements:
- - "python >= 2.6"
- - "packet-python >= 1.35"
-
-'''
-
-EXAMPLES = '''
-# All the examples assume that you have your Packet API token in env var PACKET_API_TOKEN.
-# You can also pass the api token in module param auth_token.
-
-- hosts: localhost
- vars:
- volname: testvol123
- project_id: 53000fb2-ee46-4673-93a8-de2c2bdba33b
-
- tasks:
- - name: Create volume
- community.general.packet_volume:
- description: "{{ volname }}"
- project_id: "{{ project_id }}"
- facility: 'ewr1'
- plan: 'storage_1'
- state: present
- size: 10
- snapshot_policy:
- snapshot_count: 10
- snapshot_frequency: 1day
- register: result_create
-
- - name: Delete volume
- community.general.packet_volume:
- id: "{{ result_create.id }}"
- project_id: "{{ project_id }}"
- state: absent
-'''
-
-RETURN = '''
-id:
- description: UUID of specified volume
- type: str
- returned: success
- sample: 53000fb2-ee46-4673-93a8-de2c2bdba33c
-name:
- description: The API-generated name of the volume resource.
- type: str
- returned: if volume is attached/detached to/from some device
- sample: "volume-a91dc506"
-description:
- description: The user-defined description of the volume resource.
- type: str
- returned: success
- sample: "Just another volume"
-'''
-
-import uuid
-
-from ansible.module_utils.basic import AnsibleModule, env_fallback
-from ansible.module_utils.common.text.converters import to_native
-
-HAS_PACKET_SDK = True
-
-
-try:
- import packet
-except ImportError:
- HAS_PACKET_SDK = False
-
-
-PACKET_API_TOKEN_ENV_VAR = "PACKET_API_TOKEN"
-
-VOLUME_PLANS = ["storage_1", "storage_2"]
-VOLUME_STATES = ["present", "absent"]
-BILLING = ["hourly", "monthly"]
-
-
-def is_valid_uuid(myuuid):
- try:
- val = uuid.UUID(myuuid, version=4)
- except ValueError:
- return False
- return str(val) == myuuid
-
-
-def get_volume_selector(module):
- if module.params.get('id'):
- i = module.params.get('id')
- if not is_valid_uuid(i):
- raise Exception("Volume ID '{0}' is not a valid UUID".format(i))
- return lambda v: v['id'] == i
- elif module.params.get('name'):
- n = module.params.get('name')
- return lambda v: v['name'] == n
- elif module.params.get('description'):
- d = module.params.get('description')
- return lambda v: v['description'] == d
-
-
-def get_or_fail(params, key):
- item = params.get(key)
- if item is None:
- raise Exception("{0} must be specified for new volume".format(key))
- return item
-
-
-def act_on_volume(target_state, module, packet_conn):
- return_dict = {'changed': False}
- s = get_volume_selector(module)
- project_id = module.params.get("project_id")
- api_method = "projects/{0}/storage".format(project_id)
- all_volumes = packet_conn.call_api(api_method, "GET")['volumes']
- matching_volumes = [v for v in all_volumes if s(v)]
-
- if target_state == "present":
- if len(matching_volumes) == 0:
- params = {
- "description": get_or_fail(module.params, "description"),
- "size": get_or_fail(module.params, "size"),
- "plan": get_or_fail(module.params, "plan"),
- "facility": get_or_fail(module.params, "facility"),
- "locked": get_or_fail(module.params, "locked"),
- "billing_cycle": get_or_fail(module.params, "billing_cycle"),
- "snapshot_policies": module.params.get("snapshot_policy"),
- }
-
- new_volume_data = packet_conn.call_api(api_method, "POST", params)
- return_dict['changed'] = True
- for k in ['id', 'name', 'description']:
- return_dict[k] = new_volume_data[k]
-
- else:
- for k in ['id', 'name', 'description']:
- return_dict[k] = matching_volumes[0][k]
-
- else:
- if len(matching_volumes) > 1:
- _msg = ("More than one volume matches in module call for absent state: {0}".format(
- to_native(matching_volumes)))
- module.fail_json(msg=_msg)
-
- if len(matching_volumes) == 1:
- volume = matching_volumes[0]
- packet_conn.call_api("storage/{0}".format(volume['id']), "DELETE")
- return_dict['changed'] = True
- for k in ['id', 'name', 'description']:
- return_dict[k] = volume[k]
-
- return return_dict
-
-
-def main():
- module = AnsibleModule(
- argument_spec=dict(
- id=dict(type='str', default=None),
- description=dict(type="str", default=None),
- name=dict(type='str', default=None),
- state=dict(choices=VOLUME_STATES, default="present"),
- auth_token=dict(
- type='str',
- fallback=(env_fallback, [PACKET_API_TOKEN_ENV_VAR]),
- no_log=True
- ),
- project_id=dict(required=True),
- plan=dict(choices=VOLUME_PLANS, default="storage_1"),
- facility=dict(type="str"),
- size=dict(type="int"),
- locked=dict(type="bool", default=False),
- snapshot_policy=dict(type='dict', default=None),
- billing_cycle=dict(type='str', choices=BILLING, default="hourly"),
- ),
- supports_check_mode=True,
- required_one_of=[("name", "id", "description")],
- mutually_exclusive=[
- ('name', 'id'),
- ('id', 'description'),
- ('name', 'description'),
- ]
- )
-
- if not HAS_PACKET_SDK:
- module.fail_json(msg='packet required for this module')
-
- if not module.params.get('auth_token'):
- _fail_msg = ("if Packet API token is not in environment variable {0}, "
- "the auth_token parameter is required".format(PACKET_API_TOKEN_ENV_VAR))
- module.fail_json(msg=_fail_msg)
-
- auth_token = module.params.get('auth_token')
-
- packet_conn = packet.Manager(auth_token=auth_token)
-
- state = module.params.get('state')
-
- if state in VOLUME_STATES:
- if module.check_mode:
- module.exit_json(changed=False)
-
- try:
- module.exit_json(**act_on_volume(state, module, packet_conn))
- except Exception as e:
- module.fail_json(
- msg="failed to set volume state {0}: {1}".format(
- state, to_native(e)))
- else:
- module.fail_json(msg="{0} is not a valid state for this module".format(state))
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/packet/packet_volume_attachment.py b/ansible_collections/community/general/plugins/modules/cloud/packet/packet_volume_attachment.py
deleted file mode 100644
index 9044fbcf..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/packet/packet_volume_attachment.py
+++ /dev/null
@@ -1,298 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2019, Nurfet Becirevic
-# Copyright: (c) 2017, Tomas Karasek
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: packet_volume_attachment
-
-short_description: Attach/detach a volume to a device in the Packet host.
-
-description:
- - Attach/detach a volume to a device in the Packet host.
- - API is documented at U(https://www.packet.com/developers/api/volumes/).
- - "This module creates the attachment route in the Packet API. In order to discover
- the block devices on the server, you have to run the Attach Scripts,
- as documented at U(https://help.packet.net/technical/storage/packet-block-storage-linux)."
-
-version_added: '0.2.0'
-
-author:
- - Tomas Karasek (@t0mk)
- - Nurfet Becirevic (@nurfet-becirevic)
-
-options:
- state:
- description:
- - Indicate desired state of the attachment.
- default: present
- choices: ['present', 'absent']
- type: str
-
- auth_token:
- description:
- - Packet API token. You can also supply it in env var C(PACKET_API_TOKEN).
- type: str
-
- project_id:
- description:
- - UUID of the project to which the device and volume belong.
- type: str
- required: true
-
- volume:
- description:
- - Selector for the volume.
- - It can be a UUID, an API-generated volume name, or user-defined description string.
- - 'Example values: 4a347482-b546-4f67-8300-fb5018ef0c5, volume-4a347482, "my volume"'
- type: str
- required: true
-
- device:
- description:
- - Selector for the device.
- - It can be a UUID of the device, or a hostname.
- - 'Example values: 98a14f7a-3d27-4478-b7cf-35b5670523f3, "my device"'
- type: str
-
-requirements:
- - "python >= 2.6"
- - "packet-python >= 1.35"
-
-'''
-
-EXAMPLES = '''
-# All the examples assume that you have your Packet API token in env var PACKET_API_TOKEN.
-# You can also pass the api token in module param auth_token.
-
-- hosts: localhost
-
- vars:
- volname: testvol
- devname: testdev
- project_id: 52000fb2-ee46-4673-93a8-de2c2bdba33b
-
- tasks:
- - name: Create volume
- packet_volume:
- description: "{{ volname }}"
- project_id: "{{ project_id }}"
- facility: ewr1
- plan: storage_1
- state: present
- size: 10
- snapshot_policy:
- snapshot_count: 10
- snapshot_frequency: 1day
-
- - name: Create a device
- packet_device:
- project_id: "{{ project_id }}"
- hostnames: "{{ devname }}"
- operating_system: ubuntu_16_04
- plan: baremetal_0
- facility: ewr1
- state: present
-
- - name: Attach testvol to testdev
- community.general.packet_volume_attachment:
- project_id: "{{ project_id }}"
- volume: "{{ volname }}"
- device: "{{ devname }}"
-
- - name: Detach testvol from testdev
- community.general.packet_volume_attachment:
- project_id: "{{ project_id }}"
- volume: "{{ volname }}"
- device: "{{ devname }}"
- state: absent
-'''
-
-RETURN = '''
-volume_id:
- description: UUID of volume addressed by the module call.
- type: str
- returned: success
-
-device_id:
- description: UUID of device addressed by the module call.
- type: str
- returned: success
-'''
-
-import uuid
-
-from ansible.module_utils.basic import AnsibleModule, env_fallback
-from ansible.module_utils.common.text.converters import to_native
-
-HAS_PACKET_SDK = True
-
-
-try:
- import packet
-except ImportError:
- HAS_PACKET_SDK = False
-
-
-PACKET_API_TOKEN_ENV_VAR = "PACKET_API_TOKEN"
-
-STATES = ["present", "absent"]
-
-
-def is_valid_uuid(myuuid):
- try:
- val = uuid.UUID(myuuid, version=4)
- except ValueError:
- return False
- return str(val) == myuuid
-
-
-def get_volume_selector(spec):
- if is_valid_uuid(spec):
- return lambda v: v['id'] == spec
- else:
- return lambda v: v['name'] == spec or v['description'] == spec
-
-
-def get_device_selector(spec):
- if is_valid_uuid(spec):
- return lambda v: v['id'] == spec
- else:
- return lambda v: v['hostname'] == spec
-
-
-def do_attach(packet_conn, vol_id, dev_id):
- api_method = "storage/{0}/attachments".format(vol_id)
- packet_conn.call_api(
- api_method,
- params={"device_id": dev_id},
- type="POST")
-
-
-def do_detach(packet_conn, vol, dev_id=None):
- def dev_match(a):
- return (dev_id is None) or (a['device']['id'] == dev_id)
- for a in vol['attachments']:
- if dev_match(a):
- packet_conn.call_api(a['href'], type="DELETE")
-
-
-def validate_selected(l, resource_type, spec):
- if len(l) > 1:
- _msg = ("more than one {0} matches specification {1}: {2}".format(
- resource_type, spec, l))
- raise Exception(_msg)
- if len(l) == 0:
- _msg = "no {0} matches specification: {1}".format(resource_type, spec)
- raise Exception(_msg)
-
-
-def get_attached_dev_ids(volume_dict):
- if len(volume_dict['attachments']) == 0:
- return []
- else:
- return [a['device']['id'] for a in volume_dict['attachments']]
-
-
-def act_on_volume_attachment(target_state, module, packet_conn):
- return_dict = {'changed': False}
- volspec = module.params.get("volume")
- devspec = module.params.get("device")
- if devspec is None and target_state == 'present':
- raise Exception("If you want to attach a volume, you must specify a device.")
- project_id = module.params.get("project_id")
- volumes_api_method = "projects/{0}/storage".format(project_id)
- volumes = packet_conn.call_api(volumes_api_method,
- params={'include': 'facility,attachments.device'})['volumes']
- v_match = get_volume_selector(volspec)
- matching_volumes = [v for v in volumes if v_match(v)]
- validate_selected(matching_volumes, "volume", volspec)
- volume = matching_volumes[0]
- return_dict['volume_id'] = volume['id']
-
- device = None
- if devspec is not None:
- devices_api_method = "projects/{0}/devices".format(project_id)
- devices = packet_conn.call_api(devices_api_method)['devices']
- d_match = get_device_selector(devspec)
- matching_devices = [d for d in devices if d_match(d)]
- validate_selected(matching_devices, "device", devspec)
- device = matching_devices[0]
- return_dict['device_id'] = device['id']
-
- attached_device_ids = get_attached_dev_ids(volume)
-
- if target_state == "present":
- if len(attached_device_ids) == 0:
- do_attach(packet_conn, volume['id'], device['id'])
- return_dict['changed'] = True
- elif device['id'] not in attached_device_ids:
- # Don't reattach volume which is attached to a different device.
- # Rather fail than force remove a device on state == 'present'.
- raise Exception("volume {0} is already attached to device {1}".format(
- volume, attached_device_ids))
- else:
- if device is None:
- if len(attached_device_ids) > 0:
- do_detach(packet_conn, volume)
- return_dict['changed'] = True
- elif device['id'] in attached_device_ids:
- do_detach(packet_conn, volume, device['id'])
- return_dict['changed'] = True
-
- return return_dict
-
-
-def main():
- module = AnsibleModule(
- argument_spec=dict(
- state=dict(choices=STATES, default="present"),
- auth_token=dict(
- type='str',
- fallback=(env_fallback, [PACKET_API_TOKEN_ENV_VAR]),
- no_log=True
- ),
- volume=dict(type="str", required=True),
- project_id=dict(type="str", required=True),
- device=dict(type="str"),
- ),
- supports_check_mode=True,
- )
-
- if not HAS_PACKET_SDK:
- module.fail_json(msg='packet required for this module')
-
- if not module.params.get('auth_token'):
- _fail_msg = ("if Packet API token is not in environment variable {0}, "
- "the auth_token parameter is required".format(PACKET_API_TOKEN_ENV_VAR))
- module.fail_json(msg=_fail_msg)
-
- auth_token = module.params.get('auth_token')
-
- packet_conn = packet.Manager(auth_token=auth_token)
-
- state = module.params.get('state')
-
- if state in STATES:
- if module.check_mode:
- module.exit_json(changed=False)
-
- try:
- module.exit_json(
- **act_on_volume_attachment(state, module, packet_conn))
- except Exception as e:
- module.fail_json(
- msg="failed to set volume_attachment state {0}: {1}".format(state, to_native(e)))
- else:
- module.fail_json(msg="{0} is not a valid state for this module".format(state))
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/profitbricks/profitbricks.py b/ansible_collections/community/general/plugins/modules/cloud/profitbricks/profitbricks.py
deleted file mode 100644
index 3a75778a..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/profitbricks/profitbricks.py
+++ /dev/null
@@ -1,657 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: profitbricks
-short_description: Create, destroy, start, stop, and reboot a ProfitBricks virtual machine.
-description:
- - Create, destroy, update, start, stop, and reboot a ProfitBricks virtual machine. When the virtual machine is created it can optionally wait
- for it to be 'running' before returning. This module has a dependency on profitbricks >= 1.0.0
-options:
- auto_increment:
- description:
- - Whether or not to increment a single number in the name for created virtual machines.
- type: bool
- default: 'yes'
- name:
- description:
- - The name of the virtual machine.
- type: str
- image:
- description:
- - The system image ID for creating the virtual machine, e.g. a3eae284-a2fe-11e4-b187-5f1f641608c8.
- type: str
- image_password:
- description:
- - Password set for the administrative user.
- type: str
- ssh_keys:
- description:
- - Public SSH keys allowing access to the virtual machine.
- type: list
- elements: str
- datacenter:
- description:
- - The datacenter to provision this virtual machine.
- type: str
- cores:
- description:
- - The number of CPU cores to allocate to the virtual machine.
- default: 2
- type: int
- ram:
- description:
- - The amount of memory to allocate to the virtual machine.
- default: 2048
- type: int
- cpu_family:
- description:
- - The CPU family type to allocate to the virtual machine.
- type: str
- default: AMD_OPTERON
- choices: [ "AMD_OPTERON", "INTEL_XEON" ]
- volume_size:
- description:
- - The size in GB of the boot volume.
- type: int
- default: 10
- bus:
- description:
- - The bus type for the volume.
- type: str
- default: VIRTIO
- choices: [ "IDE", "VIRTIO"]
- instance_ids:
- description:
- - list of instance ids, currently only used when state='absent' to remove instances.
- type: list
- elements: str
- count:
- description:
- - The number of virtual machines to create.
- type: int
- default: 1
- location:
- description:
- - The datacenter location. Use only if you want to create the Datacenter or else this value is ignored.
- type: str
- default: us/las
- choices: [ "us/las", "de/fra", "de/fkb" ]
- assign_public_ip:
- description:
- - This will assign the machine to the public LAN. If no LAN exists with public Internet access it is created.
- type: bool
- default: 'no'
- lan:
- description:
- - The ID of the LAN you wish to add the servers to.
- type: int
- default: 1
- subscription_user:
- description:
- - The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environment variable.
- type: str
- subscription_password:
- description:
- - THe ProfitBricks password. Overrides the PB_PASSWORD environment variable.
- type: str
- wait:
- description:
- - wait for the instance to be in state 'running' before returning
- type: bool
- default: 'yes'
- wait_timeout:
- description:
- - how long before wait gives up, in seconds
- type: int
- default: 600
- remove_boot_volume:
- description:
- - remove the bootVolume of the virtual machine you're destroying.
- type: bool
- default: 'yes'
- state:
- description:
- - create or terminate instances
- - 'The choices available are: C(running), C(stopped), C(absent), C(present).'
- type: str
- default: 'present'
- disk_type:
- description:
- - the type of disk to be allocated.
- type: str
- choices: [SSD, HDD]
- default: HDD
-
-requirements:
- - "profitbricks"
- - "python >= 2.6"
-author: Matt Baldwin (@baldwinSPC)
-'''
-
-EXAMPLES = '''
-
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-# Provisioning example
-- name: Create three servers and enumerate their names
- community.general.profitbricks:
- datacenter: Tardis One
- name: web%02d.stackpointcloud.com
- cores: 4
- ram: 2048
- volume_size: 50
- cpu_family: INTEL_XEON
- image: a3eae284-a2fe-11e4-b187-5f1f641608c8
- location: us/las
- count: 3
- assign_public_ip: true
-
-- name: Remove virtual machines
- community.general.profitbricks:
- datacenter: Tardis One
- instance_ids:
- - 'web001.stackpointcloud.com'
- - 'web002.stackpointcloud.com'
- - 'web003.stackpointcloud.com'
- wait_timeout: 500
- state: absent
-
-- name: Start virtual machines
- community.general.profitbricks:
- datacenter: Tardis One
- instance_ids:
- - 'web001.stackpointcloud.com'
- - 'web002.stackpointcloud.com'
- - 'web003.stackpointcloud.com'
- wait_timeout: 500
- state: running
-
-- name: Stop virtual machines
- community.general.profitbricks:
- datacenter: Tardis One
- instance_ids:
- - 'web001.stackpointcloud.com'
- - 'web002.stackpointcloud.com'
- - 'web003.stackpointcloud.com'
- wait_timeout: 500
- state: stopped
-'''
-
-import re
-import uuid
-import time
-import traceback
-
-HAS_PB_SDK = True
-
-try:
- from profitbricks.client import ProfitBricksService, Volume, Server, Datacenter, NIC, LAN
-except ImportError:
- HAS_PB_SDK = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.six.moves import xrange
-from ansible.module_utils.common.text.converters import to_native
-
-
-LOCATIONS = ['us/las',
- 'de/fra',
- 'de/fkb']
-
-uuid_match = re.compile(
- r'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I)
-
-
-def _wait_for_completion(profitbricks, promise, wait_timeout, msg):
- if not promise:
- return
- wait_timeout = time.time() + wait_timeout
- while wait_timeout > time.time():
- time.sleep(5)
- operation_result = profitbricks.get_request(
- request_id=promise['requestId'],
- status=True)
-
- if operation_result['metadata']['status'] == "DONE":
- return
- elif operation_result['metadata']['status'] == "FAILED":
- raise Exception(
- 'Request failed to complete ' + msg + ' "' + str(
- promise['requestId']) + '" to complete.')
-
- raise Exception(
- 'Timed out waiting for async operation ' + msg + ' "' + str(
- promise['requestId']
- ) + '" to complete.')
-
-
-def _create_machine(module, profitbricks, datacenter, name):
- cores = module.params.get('cores')
- ram = module.params.get('ram')
- cpu_family = module.params.get('cpu_family')
- volume_size = module.params.get('volume_size')
- disk_type = module.params.get('disk_type')
- image_password = module.params.get('image_password')
- ssh_keys = module.params.get('ssh_keys')
- bus = module.params.get('bus')
- lan = module.params.get('lan')
- assign_public_ip = module.params.get('assign_public_ip')
- subscription_user = module.params.get('subscription_user')
- subscription_password = module.params.get('subscription_password')
- location = module.params.get('location')
- image = module.params.get('image')
- assign_public_ip = module.boolean(module.params.get('assign_public_ip'))
- wait = module.params.get('wait')
- wait_timeout = module.params.get('wait_timeout')
-
- if assign_public_ip:
- public_found = False
-
- lans = profitbricks.list_lans(datacenter)
- for lan in lans['items']:
- if lan['properties']['public']:
- public_found = True
- lan = lan['id']
-
- if not public_found:
- i = LAN(
- name='public',
- public=True)
-
- lan_response = profitbricks.create_lan(datacenter, i)
- _wait_for_completion(profitbricks, lan_response,
- wait_timeout, "_create_machine")
- lan = lan_response['id']
-
- v = Volume(
- name=str(uuid.uuid4()).replace('-', '')[:10],
- size=volume_size,
- image=image,
- image_password=image_password,
- ssh_keys=ssh_keys,
- disk_type=disk_type,
- bus=bus)
-
- n = NIC(
- lan=int(lan)
- )
-
- s = Server(
- name=name,
- ram=ram,
- cores=cores,
- cpu_family=cpu_family,
- create_volumes=[v],
- nics=[n],
- )
-
- try:
- create_server_response = profitbricks.create_server(
- datacenter_id=datacenter, server=s)
-
- _wait_for_completion(profitbricks, create_server_response,
- wait_timeout, "create_virtual_machine")
-
- server_response = profitbricks.get_server(
- datacenter_id=datacenter,
- server_id=create_server_response['id'],
- depth=3
- )
- except Exception as e:
- module.fail_json(msg="failed to create the new server: %s" % str(e))
- else:
- return server_response
-
-
-def _startstop_machine(module, profitbricks, datacenter_id, server_id):
- state = module.params.get('state')
-
- try:
- if state == 'running':
- profitbricks.start_server(datacenter_id, server_id)
- else:
- profitbricks.stop_server(datacenter_id, server_id)
-
- return True
- except Exception as e:
- module.fail_json(msg="failed to start or stop the virtual machine %s at %s: %s" % (server_id, datacenter_id, str(e)))
-
-
-def _create_datacenter(module, profitbricks):
- datacenter = module.params.get('datacenter')
- location = module.params.get('location')
- wait_timeout = module.params.get('wait_timeout')
-
- i = Datacenter(
- name=datacenter,
- location=location
- )
-
- try:
- datacenter_response = profitbricks.create_datacenter(datacenter=i)
-
- _wait_for_completion(profitbricks, datacenter_response,
- wait_timeout, "_create_datacenter")
-
- return datacenter_response
- except Exception as e:
- module.fail_json(msg="failed to create the new server(s): %s" % str(e))
-
-
-def create_virtual_machine(module, profitbricks):
- """
- Create new virtual machine
-
- module : AnsibleModule object
- community.general.profitbricks: authenticated profitbricks object
-
- Returns:
- True if a new virtual machine was created, false otherwise
- """
- datacenter = module.params.get('datacenter')
- name = module.params.get('name')
- auto_increment = module.params.get('auto_increment')
- count = module.params.get('count')
- lan = module.params.get('lan')
- wait_timeout = module.params.get('wait_timeout')
- failed = True
- datacenter_found = False
-
- virtual_machines = []
- virtual_machine_ids = []
-
- # Locate UUID for datacenter if referenced by name.
- datacenter_list = profitbricks.list_datacenters()
- datacenter_id = _get_datacenter_id(datacenter_list, datacenter)
- if datacenter_id:
- datacenter_found = True
-
- if not datacenter_found:
- datacenter_response = _create_datacenter(module, profitbricks)
- datacenter_id = datacenter_response['id']
-
- _wait_for_completion(profitbricks, datacenter_response,
- wait_timeout, "create_virtual_machine")
-
- if auto_increment:
- numbers = set()
- count_offset = 1
-
- try:
- name % 0
- except TypeError as e:
- if e.message.startswith('not all'):
- name = '%s%%d' % name
- else:
- module.fail_json(msg=e.message, exception=traceback.format_exc())
-
- number_range = xrange(count_offset, count_offset + count + len(numbers))
- available_numbers = list(set(number_range).difference(numbers))
- names = []
- numbers_to_use = available_numbers[:count]
- for number in numbers_to_use:
- names.append(name % number)
- else:
- names = [name]
-
- # Prefetch a list of servers for later comparison.
- server_list = profitbricks.list_servers(datacenter_id)
- for name in names:
- # Skip server creation if the server already exists.
- if _get_server_id(server_list, name):
- continue
-
- create_response = _create_machine(module, profitbricks, str(datacenter_id), name)
- nics = profitbricks.list_nics(datacenter_id, create_response['id'])
- for n in nics['items']:
- if lan == n['properties']['lan']:
- create_response.update({'public_ip': n['properties']['ips'][0]})
-
- virtual_machines.append(create_response)
-
- failed = False
-
- results = {
- 'failed': failed,
- 'machines': virtual_machines,
- 'action': 'create',
- 'instance_ids': {
- 'instances': [i['id'] for i in virtual_machines],
- }
- }
-
- return results
-
-
-def remove_virtual_machine(module, profitbricks):
- """
- Removes a virtual machine.
-
- This will remove the virtual machine along with the bootVolume.
-
- module : AnsibleModule object
- community.general.profitbricks: authenticated profitbricks object.
-
- Not yet supported: handle deletion of attached data disks.
-
- Returns:
- True if a new virtual server was deleted, false otherwise
- """
- datacenter = module.params.get('datacenter')
- instance_ids = module.params.get('instance_ids')
- remove_boot_volume = module.params.get('remove_boot_volume')
- changed = False
-
- if not isinstance(module.params.get('instance_ids'), list) or len(module.params.get('instance_ids')) < 1:
- module.fail_json(msg='instance_ids should be a list of virtual machine ids or names, aborting')
-
- # Locate UUID for datacenter if referenced by name.
- datacenter_list = profitbricks.list_datacenters()
- datacenter_id = _get_datacenter_id(datacenter_list, datacenter)
- if not datacenter_id:
- module.fail_json(msg='Virtual data center \'%s\' not found.' % str(datacenter))
-
- # Prefetch server list for later comparison.
- server_list = profitbricks.list_servers(datacenter_id)
- for instance in instance_ids:
- # Locate UUID for server if referenced by name.
- server_id = _get_server_id(server_list, instance)
- if server_id:
- # Remove the server's boot volume
- if remove_boot_volume:
- _remove_boot_volume(module, profitbricks, datacenter_id, server_id)
-
- # Remove the server
- try:
- server_response = profitbricks.delete_server(datacenter_id, server_id)
- except Exception as e:
- module.fail_json(msg="failed to terminate the virtual server: %s" % to_native(e), exception=traceback.format_exc())
- else:
- changed = True
-
- return changed
-
-
-def _remove_boot_volume(module, profitbricks, datacenter_id, server_id):
- """
- Remove the boot volume from the server
- """
- try:
- server = profitbricks.get_server(datacenter_id, server_id)
- volume_id = server['properties']['bootVolume']['id']
- volume_response = profitbricks.delete_volume(datacenter_id, volume_id)
- except Exception as e:
- module.fail_json(msg="failed to remove the server's boot volume: %s" % to_native(e), exception=traceback.format_exc())
-
-
-def startstop_machine(module, profitbricks, state):
- """
- Starts or Stops a virtual machine.
-
- module : AnsibleModule object
- community.general.profitbricks: authenticated profitbricks object.
-
- Returns:
- True when the servers process the action successfully, false otherwise.
- """
- if not isinstance(module.params.get('instance_ids'), list) or len(module.params.get('instance_ids')) < 1:
- module.fail_json(msg='instance_ids should be a list of virtual machine ids or names, aborting')
-
- wait = module.params.get('wait')
- wait_timeout = module.params.get('wait_timeout')
- changed = False
-
- datacenter = module.params.get('datacenter')
- instance_ids = module.params.get('instance_ids')
-
- # Locate UUID for datacenter if referenced by name.
- datacenter_list = profitbricks.list_datacenters()
- datacenter_id = _get_datacenter_id(datacenter_list, datacenter)
- if not datacenter_id:
- module.fail_json(msg='Virtual data center \'%s\' not found.' % str(datacenter))
-
- # Prefetch server list for later comparison.
- server_list = profitbricks.list_servers(datacenter_id)
- for instance in instance_ids:
- # Locate UUID of server if referenced by name.
- server_id = _get_server_id(server_list, instance)
- if server_id:
- _startstop_machine(module, profitbricks, datacenter_id, server_id)
- changed = True
-
- if wait:
- wait_timeout = time.time() + wait_timeout
- while wait_timeout > time.time():
- matched_instances = []
- for res in profitbricks.list_servers(datacenter_id)['items']:
- if state == 'running':
- if res['properties']['vmState'].lower() == state:
- matched_instances.append(res)
- elif state == 'stopped':
- if res['properties']['vmState'].lower() == 'shutoff':
- matched_instances.append(res)
-
- if len(matched_instances) < len(instance_ids):
- time.sleep(5)
- else:
- break
-
- if wait_timeout <= time.time():
- # waiting took too long
- module.fail_json(msg="wait for virtual machine state timeout on %s" % time.asctime())
-
- return (changed)
-
-
-def _get_datacenter_id(datacenters, identity):
- """
- Fetch and return datacenter UUID by datacenter name if found.
- """
- for datacenter in datacenters['items']:
- if identity in (datacenter['properties']['name'], datacenter['id']):
- return datacenter['id']
- return None
-
-
-def _get_server_id(servers, identity):
- """
- Fetch and return server UUID by server name if found.
- """
- for server in servers['items']:
- if identity in (server['properties']['name'], server['id']):
- return server['id']
- return None
-
-
-def main():
- module = AnsibleModule(
- argument_spec=dict(
- datacenter=dict(),
- name=dict(),
- image=dict(),
- cores=dict(type='int', default=2),
- ram=dict(type='int', default=2048),
- cpu_family=dict(choices=['AMD_OPTERON', 'INTEL_XEON'],
- default='AMD_OPTERON'),
- volume_size=dict(type='int', default=10),
- disk_type=dict(choices=['HDD', 'SSD'], default='HDD'),
- image_password=dict(default=None, no_log=True),
- ssh_keys=dict(type='list', elements='str', default=[], no_log=False),
- bus=dict(choices=['VIRTIO', 'IDE'], default='VIRTIO'),
- lan=dict(type='int', default=1),
- count=dict(type='int', default=1),
- auto_increment=dict(type='bool', default=True),
- instance_ids=dict(type='list', elements='str', default=[]),
- subscription_user=dict(),
- subscription_password=dict(no_log=True),
- location=dict(choices=LOCATIONS, default='us/las'),
- assign_public_ip=dict(type='bool', default=False),
- wait=dict(type='bool', default=True),
- wait_timeout=dict(type='int', default=600),
- remove_boot_volume=dict(type='bool', default=True),
- state=dict(default='present'),
- )
- )
-
- if not HAS_PB_SDK:
- module.fail_json(msg='profitbricks required for this module')
-
- subscription_user = module.params.get('subscription_user')
- subscription_password = module.params.get('subscription_password')
-
- profitbricks = ProfitBricksService(
- username=subscription_user,
- password=subscription_password)
-
- state = module.params.get('state')
-
- if state == 'absent':
- if not module.params.get('datacenter'):
- module.fail_json(msg='datacenter parameter is required ' +
- 'for running or stopping machines.')
-
- try:
- (changed) = remove_virtual_machine(module, profitbricks)
- module.exit_json(changed=changed)
- except Exception as e:
- module.fail_json(msg='failed to set instance state: %s' % to_native(e), exception=traceback.format_exc())
-
- elif state in ('running', 'stopped'):
- if not module.params.get('datacenter'):
- module.fail_json(msg='datacenter parameter is required for ' +
- 'running or stopping machines.')
- try:
- (changed) = startstop_machine(module, profitbricks, state)
- module.exit_json(changed=changed)
- except Exception as e:
- module.fail_json(msg='failed to set instance state: %s' % to_native(e), exception=traceback.format_exc())
-
- elif state == 'present':
- if not module.params.get('name'):
- module.fail_json(msg='name parameter is required for new instance')
- if not module.params.get('image'):
- module.fail_json(msg='image parameter is required for new instance')
- if not module.params.get('subscription_user'):
- module.fail_json(msg='subscription_user parameter is ' +
- 'required for new instance')
- if not module.params.get('subscription_password'):
- module.fail_json(msg='subscription_password parameter is ' +
- 'required for new instance')
-
- try:
- (machine_dict_array) = create_virtual_machine(module, profitbricks)
- module.exit_json(**machine_dict_array)
- except Exception as e:
- module.fail_json(msg='failed to set instance state: %s' % to_native(e), exception=traceback.format_exc())
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/profitbricks/profitbricks_datacenter.py b/ansible_collections/community/general/plugins/modules/cloud/profitbricks/profitbricks_datacenter.py
deleted file mode 100644
index 7897ffde..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/profitbricks/profitbricks_datacenter.py
+++ /dev/null
@@ -1,258 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: profitbricks_datacenter
-short_description: Create or destroy a ProfitBricks Virtual Datacenter.
-description:
- - This is a simple module that supports creating or removing vDCs. A vDC is required before you can create servers. This module has a dependency
- on profitbricks >= 1.0.0
-options:
- name:
- description:
- - The name of the virtual datacenter.
- type: str
- description:
- description:
- - The description of the virtual datacenter.
- type: str
- required: false
- location:
- description:
- - The datacenter location.
- type: str
- required: false
- default: us/las
- choices: [ "us/las", "de/fra", "de/fkb" ]
- subscription_user:
- description:
- - The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environment variable.
- type: str
- required: false
- subscription_password:
- description:
- - THe ProfitBricks password. Overrides the PB_PASSWORD environment variable.
- type: str
- required: false
- wait:
- description:
- - wait for the datacenter to be created before returning
- required: false
- default: "yes"
- type: bool
- wait_timeout:
- description:
- - how long before wait gives up, in seconds
- type: int
- default: 600
- state:
- description:
- - Create or terminate datacenters.
- - "The available choices are: C(present), C(absent)."
- type: str
- required: false
- default: 'present'
-
-requirements: [ "profitbricks" ]
-author: Matt Baldwin (@baldwinSPC)
-'''
-
-EXAMPLES = '''
-- name: Create a datacenter
- community.general.profitbricks_datacenter:
- datacenter: Tardis One
- wait_timeout: 500
-
-- name: Destroy a datacenter (remove all servers, volumes, and other objects in the datacenter)
- community.general.profitbricks_datacenter:
- datacenter: Tardis One
- wait_timeout: 500
- state: absent
-'''
-
-import re
-import time
-
-HAS_PB_SDK = True
-try:
- from profitbricks.client import ProfitBricksService, Datacenter
-except ImportError:
- HAS_PB_SDK = False
-
-from ansible.module_utils.basic import AnsibleModule
-
-
-LOCATIONS = ['us/las',
- 'de/fra',
- 'de/fkb']
-
-uuid_match = re.compile(
- r'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I)
-
-
-def _wait_for_completion(profitbricks, promise, wait_timeout, msg):
- if not promise:
- return
- wait_timeout = time.time() + wait_timeout
- while wait_timeout > time.time():
- time.sleep(5)
- operation_result = profitbricks.get_request(
- request_id=promise['requestId'],
- status=True)
-
- if operation_result['metadata']['status'] == "DONE":
- return
- elif operation_result['metadata']['status'] == "FAILED":
- raise Exception(
- 'Request failed to complete ' + msg + ' "' + str(
- promise['requestId']) + '" to complete.')
-
- raise Exception(
- 'Timed out waiting for async operation ' + msg + ' "' + str(
- promise['requestId']
- ) + '" to complete.')
-
-
-def _remove_datacenter(module, profitbricks, datacenter):
- try:
- profitbricks.delete_datacenter(datacenter)
- except Exception as e:
- module.fail_json(msg="failed to remove the datacenter: %s" % str(e))
-
-
-def create_datacenter(module, profitbricks):
- """
- Creates a Datacenter
-
- This will create a new Datacenter in the specified location.
-
- module : AnsibleModule object
- profitbricks: authenticated profitbricks object.
-
- Returns:
- True if a new datacenter was created, false otherwise
- """
- name = module.params.get('name')
- location = module.params.get('location')
- description = module.params.get('description')
- wait = module.params.get('wait')
- wait_timeout = int(module.params.get('wait_timeout'))
-
- i = Datacenter(
- name=name,
- location=location,
- description=description
- )
-
- try:
- datacenter_response = profitbricks.create_datacenter(datacenter=i)
-
- if wait:
- _wait_for_completion(profitbricks, datacenter_response,
- wait_timeout, "_create_datacenter")
-
- results = {
- 'datacenter_id': datacenter_response['id']
- }
-
- return results
-
- except Exception as e:
- module.fail_json(msg="failed to create the new datacenter: %s" % str(e))
-
-
-def remove_datacenter(module, profitbricks):
- """
- Removes a Datacenter.
-
- This will remove a datacenter.
-
- module : AnsibleModule object
- profitbricks: authenticated profitbricks object.
-
- Returns:
- True if the datacenter was deleted, false otherwise
- """
- name = module.params.get('name')
- changed = False
-
- if(uuid_match.match(name)):
- _remove_datacenter(module, profitbricks, name)
- changed = True
- else:
- datacenters = profitbricks.list_datacenters()
-
- for d in datacenters['items']:
- vdc = profitbricks.get_datacenter(d['id'])
-
- if name == vdc['properties']['name']:
- name = d['id']
- _remove_datacenter(module, profitbricks, name)
- changed = True
-
- return changed
-
-
-def main():
- module = AnsibleModule(
- argument_spec=dict(
- name=dict(),
- description=dict(),
- location=dict(choices=LOCATIONS, default='us/las'),
- subscription_user=dict(),
- subscription_password=dict(no_log=True),
- wait=dict(type='bool', default=True),
- wait_timeout=dict(default=600, type='int'),
- state=dict(default='present'), # @TODO add choices
- )
- )
- if not HAS_PB_SDK:
- module.fail_json(msg='profitbricks required for this module')
-
- if not module.params.get('subscription_user'):
- module.fail_json(msg='subscription_user parameter is required')
- if not module.params.get('subscription_password'):
- module.fail_json(msg='subscription_password parameter is required')
-
- subscription_user = module.params.get('subscription_user')
- subscription_password = module.params.get('subscription_password')
-
- profitbricks = ProfitBricksService(
- username=subscription_user,
- password=subscription_password)
-
- state = module.params.get('state')
-
- if state == 'absent':
- if not module.params.get('name'):
- module.fail_json(msg='name parameter is required deleting a virtual datacenter.')
-
- try:
- (changed) = remove_datacenter(module, profitbricks)
- module.exit_json(
- changed=changed)
- except Exception as e:
- module.fail_json(msg='failed to set datacenter state: %s' % str(e))
-
- elif state == 'present':
- if not module.params.get('name'):
- module.fail_json(msg='name parameter is required for a new datacenter')
- if not module.params.get('location'):
- module.fail_json(msg='location parameter is required for a new datacenter')
-
- try:
- (datacenter_dict_array) = create_datacenter(module, profitbricks)
- module.exit_json(**datacenter_dict_array)
- except Exception as e:
- module.fail_json(msg='failed to set datacenter state: %s' % str(e))
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/profitbricks/profitbricks_nic.py b/ansible_collections/community/general/plugins/modules/cloud/profitbricks/profitbricks_nic.py
deleted file mode 100644
index 5d98e05e..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/profitbricks/profitbricks_nic.py
+++ /dev/null
@@ -1,289 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: profitbricks_nic
-short_description: Create or Remove a NIC.
-description:
- - This module allows you to create or restore a volume snapshot. This module has a dependency on profitbricks >= 1.0.0
-options:
- datacenter:
- description:
- - The datacenter in which to operate.
- type: str
- required: true
- server:
- description:
- - The server name or ID.
- type: str
- required: true
- name:
- description:
- - The name or ID of the NIC. This is only required on deletes, but not on create.
- - If not specified, it defaults to a value based on UUID4.
- type: str
- lan:
- description:
- - The LAN to place the NIC on. You can pass a LAN that doesn't exist and it will be created. Required on create.
- type: str
- subscription_user:
- description:
- - The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environment variable.
- type: str
- required: true
- subscription_password:
- description:
- - THe ProfitBricks password. Overrides the PB_PASSWORD environment variable.
- type: str
- required: true
- wait:
- description:
- - wait for the operation to complete before returning
- required: false
- default: "yes"
- type: bool
- wait_timeout:
- description:
- - how long before wait gives up, in seconds
- type: int
- default: 600
- state:
- description:
- - Indicate desired state of the resource
- - "The available choices are: C(present), C(absent)."
- type: str
- required: false
- default: 'present'
-
-requirements: [ "profitbricks" ]
-author: Matt Baldwin (@baldwinSPC)
-'''
-
-EXAMPLES = '''
-- name: Create a NIC
- community.general.profitbricks_nic:
- datacenter: Tardis One
- server: node002
- lan: 2
- wait_timeout: 500
- state: present
-
-- name: Remove a NIC
- community.general.profitbricks_nic:
- datacenter: Tardis One
- server: node002
- name: 7341c2454f
- wait_timeout: 500
- state: absent
-'''
-
-import re
-import uuid
-import time
-
-HAS_PB_SDK = True
-try:
- from profitbricks.client import ProfitBricksService, NIC
-except ImportError:
- HAS_PB_SDK = False
-
-from ansible.module_utils.basic import AnsibleModule
-
-
-uuid_match = re.compile(
- r'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I)
-
-
-def _make_default_name():
- return str(uuid.uuid4()).replace('-', '')[:10]
-
-
-def _wait_for_completion(profitbricks, promise, wait_timeout, msg):
- if not promise:
- return
- wait_timeout = time.time() + wait_timeout
- while wait_timeout > time.time():
- time.sleep(5)
- operation_result = profitbricks.get_request(
- request_id=promise['requestId'],
- status=True)
-
- if operation_result['metadata']['status'] == "DONE":
- return
- elif operation_result['metadata']['status'] == "FAILED":
- raise Exception(
- 'Request failed to complete ' + msg + ' "' + str(
- promise['requestId']) + '" to complete.')
-
- raise Exception(
- 'Timed out waiting for async operation ' + msg + ' "' + str(
- promise['requestId']
- ) + '" to complete.')
-
-
-def create_nic(module, profitbricks):
- """
- Creates a NIC.
-
- module : AnsibleModule object
- profitbricks: authenticated profitbricks object.
-
- Returns:
- True if the nic creates, false otherwise
- """
- datacenter = module.params.get('datacenter')
- server = module.params.get('server')
- lan = module.params.get('lan')
- name = module.params.get('name')
- if name is None:
- name = _make_default_name()
- wait = module.params.get('wait')
- wait_timeout = module.params.get('wait_timeout')
-
- # Locate UUID for Datacenter
- if not (uuid_match.match(datacenter)):
- datacenter_list = profitbricks.list_datacenters()
- for d in datacenter_list['items']:
- dc = profitbricks.get_datacenter(d['id'])
- if datacenter == dc['properties']['name']:
- datacenter = d['id']
- break
-
- # Locate UUID for Server
- if not (uuid_match.match(server)):
- server_list = profitbricks.list_servers(datacenter)
- for s in server_list['items']:
- if server == s['properties']['name']:
- server = s['id']
- break
- try:
- n = NIC(
- name=name,
- lan=lan
- )
-
- nic_response = profitbricks.create_nic(datacenter, server, n)
-
- if wait:
- _wait_for_completion(profitbricks, nic_response,
- wait_timeout, "create_nic")
-
- return nic_response
-
- except Exception as e:
- module.fail_json(msg="failed to create the NIC: %s" % str(e))
-
-
-def delete_nic(module, profitbricks):
- """
- Removes a NIC
-
- module : AnsibleModule object
- profitbricks: authenticated profitbricks object.
-
- Returns:
- True if the NIC was removed, false otherwise
- """
- datacenter = module.params.get('datacenter')
- server = module.params.get('server')
- name = module.params.get('name')
- if name is None:
- name = _make_default_name()
-
- # Locate UUID for Datacenter
- if not (uuid_match.match(datacenter)):
- datacenter_list = profitbricks.list_datacenters()
- for d in datacenter_list['items']:
- dc = profitbricks.get_datacenter(d['id'])
- if datacenter == dc['properties']['name']:
- datacenter = d['id']
- break
-
- # Locate UUID for Server
- server_found = False
- if not (uuid_match.match(server)):
- server_list = profitbricks.list_servers(datacenter)
- for s in server_list['items']:
- if server == s['properties']['name']:
- server_found = True
- server = s['id']
- break
-
- if not server_found:
- return False
-
- # Locate UUID for NIC
- nic_found = False
- if not (uuid_match.match(name)):
- nic_list = profitbricks.list_nics(datacenter, server)
- for n in nic_list['items']:
- if name == n['properties']['name']:
- nic_found = True
- name = n['id']
- break
-
- if not nic_found:
- return False
-
- try:
- nic_response = profitbricks.delete_nic(datacenter, server, name)
- return nic_response
- except Exception as e:
- module.fail_json(msg="failed to remove the NIC: %s" % str(e))
-
-
-def main():
- module = AnsibleModule(
- argument_spec=dict(
- datacenter=dict(required=True),
- server=dict(required=True),
- name=dict(),
- lan=dict(),
- subscription_user=dict(required=True),
- subscription_password=dict(required=True, no_log=True),
- wait=dict(type='bool', default=True),
- wait_timeout=dict(type='int', default=600),
- state=dict(default='present'),
- ),
- required_if=(
- ('state', 'absent', ['name']),
- ('state', 'present', ['lan']),
- )
- )
-
- if not HAS_PB_SDK:
- module.fail_json(msg='profitbricks required for this module')
-
- subscription_user = module.params.get('subscription_user')
- subscription_password = module.params.get('subscription_password')
-
- profitbricks = ProfitBricksService(
- username=subscription_user,
- password=subscription_password)
-
- state = module.params.get('state')
-
- if state == 'absent':
- try:
- (changed) = delete_nic(module, profitbricks)
- module.exit_json(changed=changed)
- except Exception as e:
- module.fail_json(msg='failed to set nic state: %s' % str(e))
-
- elif state == 'present':
- try:
- (nic_dict) = create_nic(module, profitbricks)
- module.exit_json(nics=nic_dict) # @FIXME changed not calculated?
- except Exception as e:
- module.fail_json(msg='failed to set nic state: %s' % str(e))
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/profitbricks/profitbricks_volume.py b/ansible_collections/community/general/plugins/modules/cloud/profitbricks/profitbricks_volume.py
deleted file mode 100644
index be1c18b5..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/profitbricks/profitbricks_volume.py
+++ /dev/null
@@ -1,432 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: profitbricks_volume
-short_description: Create or destroy a volume.
-description:
- - Allows you to create or remove a volume from a ProfitBricks datacenter. This module has a dependency on profitbricks >= 1.0.0
-options:
- datacenter:
- description:
- - The datacenter in which to create the volumes.
- type: str
- name:
- description:
- - The name of the volumes. You can enumerate the names using auto_increment.
- type: str
- size:
- description:
- - The size of the volume.
- type: int
- required: false
- default: 10
- bus:
- description:
- - The bus type.
- type: str
- required: false
- default: VIRTIO
- choices: [ "IDE", "VIRTIO"]
- image:
- description:
- - The system image ID for the volume, e.g. a3eae284-a2fe-11e4-b187-5f1f641608c8. This can also be a snapshot image ID.
- type: str
- image_password:
- description:
- - Password set for the administrative user.
- type: str
- required: false
- ssh_keys:
- description:
- - Public SSH keys allowing access to the virtual machine.
- type: list
- elements: str
- required: false
- disk_type:
- description:
- - The disk type of the volume.
- type: str
- required: false
- default: HDD
- choices: [ "HDD", "SSD" ]
- licence_type:
- description:
- - The licence type for the volume. This is used when the image is non-standard.
- - "The available choices are: C(LINUX), C(WINDOWS), C(UNKNOWN), C(OTHER)."
- type: str
- required: false
- default: UNKNOWN
- count:
- description:
- - The number of volumes you wish to create.
- type: int
- required: false
- default: 1
- auto_increment:
- description:
- - Whether or not to increment a single number in the name for created virtual machines.
- default: yes
- type: bool
- instance_ids:
- description:
- - list of instance ids, currently only used when state='absent' to remove instances.
- type: list
- elements: str
- required: false
- subscription_user:
- description:
- - The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environment variable.
- type: str
- required: false
- subscription_password:
- description:
- - THe ProfitBricks password. Overrides the PB_PASSWORD environment variable.
- type: str
- required: false
- wait:
- description:
- - wait for the datacenter to be created before returning
- required: false
- default: "yes"
- type: bool
- wait_timeout:
- description:
- - how long before wait gives up, in seconds
- type: int
- default: 600
- state:
- description:
- - create or terminate datacenters
- - "The available choices are: C(present), C(absent)."
- type: str
- required: false
- default: 'present'
- server:
- description:
- - Server name to attach the volume to.
- type: str
-
-requirements: [ "profitbricks" ]
-author: Matt Baldwin (@baldwinSPC)
-'''
-
-EXAMPLES = '''
-- name: Create multiple volumes
- community.general.profitbricks_volume:
- datacenter: Tardis One
- name: vol%02d
- count: 5
- auto_increment: yes
- wait_timeout: 500
- state: present
-
-- name: Remove Volumes
- community.general.profitbricks_volume:
- datacenter: Tardis One
- instance_ids:
- - 'vol01'
- - 'vol02'
- wait_timeout: 500
- state: absent
-'''
-
-import re
-import time
-import traceback
-
-HAS_PB_SDK = True
-try:
- from profitbricks.client import ProfitBricksService, Volume
-except ImportError:
- HAS_PB_SDK = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.six.moves import xrange
-from ansible.module_utils.common.text.converters import to_native
-
-
-uuid_match = re.compile(
- r'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I)
-
-
-def _wait_for_completion(profitbricks, promise, wait_timeout, msg):
- if not promise:
- return
- wait_timeout = time.time() + wait_timeout
- while wait_timeout > time.time():
- time.sleep(5)
- operation_result = profitbricks.get_request(
- request_id=promise['requestId'],
- status=True)
-
- if operation_result['metadata']['status'] == "DONE":
- return
- elif operation_result['metadata']['status'] == "FAILED":
- raise Exception(
- 'Request failed to complete ' + msg + ' "' + str(
- promise['requestId']) + '" to complete.')
-
- raise Exception(
- 'Timed out waiting for async operation ' + msg + ' "' + str(
- promise['requestId']
- ) + '" to complete.')
-
-
-def _create_volume(module, profitbricks, datacenter, name):
- size = module.params.get('size')
- bus = module.params.get('bus')
- image = module.params.get('image')
- image_password = module.params.get('image_password')
- ssh_keys = module.params.get('ssh_keys')
- disk_type = module.params.get('disk_type')
- licence_type = module.params.get('licence_type')
- wait_timeout = module.params.get('wait_timeout')
- wait = module.params.get('wait')
-
- try:
- v = Volume(
- name=name,
- size=size,
- bus=bus,
- image=image,
- image_password=image_password,
- ssh_keys=ssh_keys,
- disk_type=disk_type,
- licence_type=licence_type
- )
-
- volume_response = profitbricks.create_volume(datacenter, v)
-
- if wait:
- _wait_for_completion(profitbricks, volume_response,
- wait_timeout, "_create_volume")
-
- except Exception as e:
- module.fail_json(msg="failed to create the volume: %s" % str(e))
-
- return volume_response
-
-
-def _delete_volume(module, profitbricks, datacenter, volume):
- try:
- profitbricks.delete_volume(datacenter, volume)
- except Exception as e:
- module.fail_json(msg="failed to remove the volume: %s" % str(e))
-
-
-def create_volume(module, profitbricks):
- """
- Creates a volume.
-
- This will create a volume in a datacenter.
-
- module : AnsibleModule object
- profitbricks: authenticated profitbricks object.
-
- Returns:
- True if the volume was created, false otherwise
- """
- datacenter = module.params.get('datacenter')
- name = module.params.get('name')
- auto_increment = module.params.get('auto_increment')
- count = module.params.get('count')
-
- datacenter_found = False
- failed = True
- volumes = []
-
- # Locate UUID for Datacenter
- if not (uuid_match.match(datacenter)):
- datacenter_list = profitbricks.list_datacenters()
- for d in datacenter_list['items']:
- dc = profitbricks.get_datacenter(d['id'])
- if datacenter == dc['properties']['name']:
- datacenter = d['id']
- datacenter_found = True
- break
-
- if not datacenter_found:
- module.fail_json(msg='datacenter could not be found.')
-
- if auto_increment:
- numbers = set()
- count_offset = 1
-
- try:
- name % 0
- except TypeError as e:
- if e.message.startswith('not all'):
- name = '%s%%d' % name
- else:
- module.fail_json(msg=e.message, exception=traceback.format_exc())
-
- number_range = xrange(count_offset, count_offset + count + len(numbers))
- available_numbers = list(set(number_range).difference(numbers))
- names = []
- numbers_to_use = available_numbers[:count]
- for number in numbers_to_use:
- names.append(name % number)
- else:
- names = [name] * count
-
- for name in names:
- create_response = _create_volume(module, profitbricks, str(datacenter), name)
- volumes.append(create_response)
- _attach_volume(module, profitbricks, datacenter, create_response['id'])
- failed = False
-
- results = {
- 'failed': failed,
- 'volumes': volumes,
- 'action': 'create',
- 'instance_ids': {
- 'instances': [i['id'] for i in volumes],
- }
- }
-
- return results
-
-
-def delete_volume(module, profitbricks):
- """
- Removes a volume.
-
- This will create a volume in a datacenter.
-
- module : AnsibleModule object
- profitbricks: authenticated profitbricks object.
-
- Returns:
- True if the volume was removed, false otherwise
- """
- if not isinstance(module.params.get('instance_ids'), list) or len(module.params.get('instance_ids')) < 1:
- module.fail_json(msg='instance_ids should be a list of virtual machine ids or names, aborting')
-
- datacenter = module.params.get('datacenter')
- changed = False
- instance_ids = module.params.get('instance_ids')
-
- # Locate UUID for Datacenter
- if not (uuid_match.match(datacenter)):
- datacenter_list = profitbricks.list_datacenters()
- for d in datacenter_list['items']:
- dc = profitbricks.get_datacenter(d['id'])
- if datacenter == dc['properties']['name']:
- datacenter = d['id']
- break
-
- for n in instance_ids:
- if(uuid_match.match(n)):
- _delete_volume(module, profitbricks, datacenter, n)
- changed = True
- else:
- volumes = profitbricks.list_volumes(datacenter)
- for v in volumes['items']:
- if n == v['properties']['name']:
- volume_id = v['id']
- _delete_volume(module, profitbricks, datacenter, volume_id)
- changed = True
-
- return changed
-
-
-def _attach_volume(module, profitbricks, datacenter, volume):
- """
- Attaches a volume.
-
- This will attach a volume to the server.
-
- module : AnsibleModule object
- profitbricks: authenticated profitbricks object.
-
- Returns:
- True if the volume was attached, false otherwise
- """
- server = module.params.get('server')
-
- # Locate UUID for Server
- if server:
- if not (uuid_match.match(server)):
- server_list = profitbricks.list_servers(datacenter)
- for s in server_list['items']:
- if server == s['properties']['name']:
- server = s['id']
- break
-
- try:
- return profitbricks.attach_volume(datacenter, server, volume)
- except Exception as e:
- module.fail_json(msg='failed to attach volume: %s' % to_native(e), exception=traceback.format_exc())
-
-
-def main():
- module = AnsibleModule(
- argument_spec=dict(
- datacenter=dict(),
- server=dict(),
- name=dict(),
- size=dict(type='int', default=10),
- bus=dict(choices=['VIRTIO', 'IDE'], default='VIRTIO'),
- image=dict(),
- image_password=dict(no_log=True),
- ssh_keys=dict(type='list', elements='str', default=[], no_log=False),
- disk_type=dict(choices=['HDD', 'SSD'], default='HDD'),
- licence_type=dict(default='UNKNOWN'),
- count=dict(type='int', default=1),
- auto_increment=dict(type='bool', default=True),
- instance_ids=dict(type='list', elements='str', default=[]),
- subscription_user=dict(),
- subscription_password=dict(no_log=True),
- wait=dict(type='bool', default=True),
- wait_timeout=dict(type='int', default=600),
- state=dict(default='present'),
- )
- )
-
- if not module.params.get('subscription_user'):
- module.fail_json(msg='subscription_user parameter is required')
- if not module.params.get('subscription_password'):
- module.fail_json(msg='subscription_password parameter is required')
-
- subscription_user = module.params.get('subscription_user')
- subscription_password = module.params.get('subscription_password')
-
- profitbricks = ProfitBricksService(
- username=subscription_user,
- password=subscription_password)
-
- state = module.params.get('state')
-
- if state == 'absent':
- if not module.params.get('datacenter'):
- module.fail_json(msg='datacenter parameter is required for running or stopping machines.')
-
- try:
- (changed) = delete_volume(module, profitbricks)
- module.exit_json(changed=changed)
- except Exception as e:
- module.fail_json(msg='failed to set volume state: %s' % to_native(e), exception=traceback.format_exc())
-
- elif state == 'present':
- if not module.params.get('datacenter'):
- module.fail_json(msg='datacenter parameter is required for new instance')
- if not module.params.get('name'):
- module.fail_json(msg='name parameter is required for new instance')
-
- try:
- (volume_dict_array) = create_volume(module, profitbricks)
- module.exit_json(**volume_dict_array)
- except Exception as e:
- module.fail_json(msg='failed to set volume state: %s' % to_native(e), exception=traceback.format_exc())
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/profitbricks/profitbricks_volume_attachments.py b/ansible_collections/community/general/plugins/modules/cloud/profitbricks/profitbricks_volume_attachments.py
deleted file mode 100644
index 1fb3f3c0..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/profitbricks/profitbricks_volume_attachments.py
+++ /dev/null
@@ -1,259 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: profitbricks_volume_attachments
-short_description: Attach or detach a volume.
-description:
- - Allows you to attach or detach a volume from a ProfitBricks server. This module has a dependency on profitbricks >= 1.0.0
-options:
- datacenter:
- description:
- - The datacenter in which to operate.
- type: str
- server:
- description:
- - The name of the server you wish to detach or attach the volume.
- type: str
- volume:
- description:
- - The volume name or ID.
- type: str
- subscription_user:
- description:
- - The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environment variable.
- type: str
- required: false
- subscription_password:
- description:
- - THe ProfitBricks password. Overrides the PB_PASSWORD environment variable.
- type: str
- required: false
- wait:
- description:
- - wait for the operation to complete before returning
- required: false
- default: "yes"
- type: bool
- wait_timeout:
- description:
- - how long before wait gives up, in seconds
- type: int
- default: 600
- state:
- description:
- - Indicate desired state of the resource
- - "The available choices are: C(present), C(absent)."
- type: str
- required: false
- default: 'present'
-
-requirements: [ "profitbricks" ]
-author: Matt Baldwin (@baldwinSPC)
-'''
-
-EXAMPLES = '''
-- name: Attach a volume
- community.general.profitbricks_volume_attachments:
- datacenter: Tardis One
- server: node002
- volume: vol01
- wait_timeout: 500
- state: present
-
-- name: Detach a volume
- community.general.profitbricks_volume_attachments:
- datacenter: Tardis One
- server: node002
- volume: vol01
- wait_timeout: 500
- state: absent
-'''
-
-import re
-import time
-
-HAS_PB_SDK = True
-try:
- from profitbricks.client import ProfitBricksService
-except ImportError:
- HAS_PB_SDK = False
-
-from ansible.module_utils.basic import AnsibleModule
-
-
-uuid_match = re.compile(
- r'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I)
-
-
-def _wait_for_completion(profitbricks, promise, wait_timeout, msg):
- if not promise:
- return
- wait_timeout = time.time() + wait_timeout
- while wait_timeout > time.time():
- time.sleep(5)
- operation_result = profitbricks.get_request(
- request_id=promise['requestId'],
- status=True)
-
- if operation_result['metadata']['status'] == "DONE":
- return
- elif operation_result['metadata']['status'] == "FAILED":
- raise Exception(
- 'Request failed to complete ' + msg + ' "' + str(
- promise['requestId']) + '" to complete.')
-
- raise Exception(
- 'Timed out waiting for async operation ' + msg + ' "' + str(
- promise['requestId']
- ) + '" to complete.')
-
-
-def attach_volume(module, profitbricks):
- """
- Attaches a volume.
-
- This will attach a volume to the server.
-
- module : AnsibleModule object
- profitbricks: authenticated profitbricks object.
-
- Returns:
- True if the volume was attached, false otherwise
- """
- datacenter = module.params.get('datacenter')
- server = module.params.get('server')
- volume = module.params.get('volume')
-
- # Locate UUID for Datacenter
- if not (uuid_match.match(datacenter)):
- datacenter_list = profitbricks.list_datacenters()
- for d in datacenter_list['items']:
- dc = profitbricks.get_datacenter(d['id'])
- if datacenter == dc['properties']['name']:
- datacenter = d['id']
- break
-
- # Locate UUID for Server
- if not (uuid_match.match(server)):
- server_list = profitbricks.list_servers(datacenter)
- for s in server_list['items']:
- if server == s['properties']['name']:
- server = s['id']
- break
-
- # Locate UUID for Volume
- if not (uuid_match.match(volume)):
- volume_list = profitbricks.list_volumes(datacenter)
- for v in volume_list['items']:
- if volume == v['properties']['name']:
- volume = v['id']
- break
-
- return profitbricks.attach_volume(datacenter, server, volume)
-
-
-def detach_volume(module, profitbricks):
- """
- Detaches a volume.
-
- This will remove a volume from the server.
-
- module : AnsibleModule object
- profitbricks: authenticated profitbricks object.
-
- Returns:
- True if the volume was detached, false otherwise
- """
- datacenter = module.params.get('datacenter')
- server = module.params.get('server')
- volume = module.params.get('volume')
-
- # Locate UUID for Datacenter
- if not (uuid_match.match(datacenter)):
- datacenter_list = profitbricks.list_datacenters()
- for d in datacenter_list['items']:
- dc = profitbricks.get_datacenter(d['id'])
- if datacenter == dc['properties']['name']:
- datacenter = d['id']
- break
-
- # Locate UUID for Server
- if not (uuid_match.match(server)):
- server_list = profitbricks.list_servers(datacenter)
- for s in server_list['items']:
- if server == s['properties']['name']:
- server = s['id']
- break
-
- # Locate UUID for Volume
- if not (uuid_match.match(volume)):
- volume_list = profitbricks.list_volumes(datacenter)
- for v in volume_list['items']:
- if volume == v['properties']['name']:
- volume = v['id']
- break
-
- return profitbricks.detach_volume(datacenter, server, volume)
-
-
-def main():
- module = AnsibleModule(
- argument_spec=dict(
- datacenter=dict(),
- server=dict(),
- volume=dict(),
- subscription_user=dict(),
- subscription_password=dict(no_log=True),
- wait=dict(type='bool', default=True),
- wait_timeout=dict(type='int', default=600),
- state=dict(default='present'),
- )
- )
-
- if not HAS_PB_SDK:
- module.fail_json(msg='profitbricks required for this module')
-
- if not module.params.get('subscription_user'):
- module.fail_json(msg='subscription_user parameter is required')
- if not module.params.get('subscription_password'):
- module.fail_json(msg='subscription_password parameter is required')
- if not module.params.get('datacenter'):
- module.fail_json(msg='datacenter parameter is required')
- if not module.params.get('server'):
- module.fail_json(msg='server parameter is required')
- if not module.params.get('volume'):
- module.fail_json(msg='volume parameter is required')
-
- subscription_user = module.params.get('subscription_user')
- subscription_password = module.params.get('subscription_password')
-
- profitbricks = ProfitBricksService(
- username=subscription_user,
- password=subscription_password)
-
- state = module.params.get('state')
-
- if state == 'absent':
- try:
- (changed) = detach_volume(module, profitbricks)
- module.exit_json(changed=changed)
- except Exception as e:
- module.fail_json(msg='failed to set volume_attach state: %s' % str(e))
- elif state == 'present':
- try:
- attach_volume(module, profitbricks)
- module.exit_json()
- except Exception as e:
- module.fail_json(msg='failed to set volume_attach state: %s' % str(e))
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/pubnub/pubnub_blocks.py b/ansible_collections/community/general/plugins/modules/cloud/pubnub/pubnub_blocks.py
deleted file mode 100644
index d3b76337..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/pubnub/pubnub_blocks.py
+++ /dev/null
@@ -1,628 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# PubNub Real-time Cloud-Hosted Push API and Push Notification Client
-# Frameworks
-# Copyright (C) 2016 PubNub Inc.
-# http://www.pubnub.com/
-# http://www.pubnub.com/terms
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: pubnub_blocks
-short_description: PubNub blocks management module.
-description:
- - "This module allows Ansible to interface with the PubNub BLOCKS
- infrastructure by providing the following operations: create / remove,
- start / stop and rename for blocks and create / modify / remove for event
- handlers"
-author:
- - PubNub (@pubnub)
- - Sergey Mamontov (@parfeon)
-requirements:
- - "python >= 2.7"
- - "pubnub_blocks_client >= 1.0"
-options:
- email:
- description:
- - Email from account for which new session should be started.
- - "Not required if C(cache) contains result of previous module call (in
- same play)."
- required: false
- type: str
- password:
- description:
- - Password which match to account to which specified C(email) belong.
- - "Not required if C(cache) contains result of previous module call (in
- same play)."
- required: false
- type: str
- cache:
- description: >
- In case if single play use blocks management module few times it is
- preferred to enabled 'caching' by making previous module to share
- gathered artifacts and pass them to this parameter.
- required: false
- type: dict
- default: {}
- account:
- description:
- - "Name of PubNub account for from which C(application) will be used to
- manage blocks."
- - "User's account will be used if value not set or empty."
- type: str
- required: false
- application:
- description:
- - "Name of target PubNub application for which blocks configuration on
- specific C(keyset) will be done."
- type: str
- required: true
- keyset:
- description:
- - Name of application's keys set which is bound to managed blocks.
- type: str
- required: true
- state:
- description:
- - "Intended block state after event handlers creation / update process
- will be completed."
- required: false
- default: 'present'
- choices: ['started', 'stopped', 'present', 'absent']
- type: str
- name:
- description:
- - Name of managed block which will be later visible on admin.pubnub.com.
- required: true
- type: str
- description:
- description:
- - Short block description which will be later visible on
- admin.pubnub.com. Used only if block doesn't exists and won't change
- description for existing block.
- required: false
- type: str
- event_handlers:
- description:
- - "List of event handlers which should be updated for specified block
- C(name)."
- - "Each entry for new event handler should contain: C(name), C(src),
- C(channels), C(event). C(name) used as event handler name which can be
- used later to make changes to it."
- - C(src) is full path to file with event handler code.
- - "C(channels) is name of channel from which event handler is waiting
- for events."
- - "C(event) is type of event which is able to trigger event handler:
- I(js-before-publish), I(js-after-publish), I(js-after-presence)."
- - "Each entry for existing handlers should contain C(name) (so target
- handler can be identified). Rest parameters (C(src), C(channels) and
- C(event)) can be added if changes required for them."
- - "It is possible to rename event handler by adding C(changes) key to
- event handler payload and pass dictionary, which will contain single key
- C(name), where new name should be passed."
- - "To remove particular event handler it is possible to set C(state) for
- it to C(absent) and it will be removed."
- required: false
- default: []
- type: list
- elements: dict
- changes:
- description:
- - "List of fields which should be changed by block itself (doesn't
- affect any event handlers)."
- - "Possible options for change is: C(name)."
- required: false
- default: {}
- type: dict
- validate_certs:
- description:
- - "This key allow to try skip certificates check when performing REST API
- calls. Sometimes host may have issues with certificates on it and this
- will cause problems to call PubNub REST API."
- - If check should be ignored C(False) should be passed to this parameter.
- required: false
- default: true
- type: bool
-'''
-
-EXAMPLES = '''
-# Event handler create example.
-- name: Create single event handler
- community.general.pubnub_blocks:
- email: '{{ email }}'
- password: '{{ password }}'
- application: '{{ app_name }}'
- keyset: '{{ keyset_name }}'
- name: '{{ block_name }}'
- event_handlers:
- -
- src: '{{ path_to_handler_source }}'
- name: '{{ handler_name }}'
- event: 'js-before-publish'
- channels: '{{ handler_channel }}'
-
-# Change event handler trigger event type.
-- name: Change event handler 'event'
- community.general.pubnub_blocks:
- email: '{{ email }}'
- password: '{{ password }}'
- application: '{{ app_name }}'
- keyset: '{{ keyset_name }}'
- name: '{{ block_name }}'
- event_handlers:
- -
- name: '{{ handler_name }}'
- event: 'js-after-publish'
-
-# Stop block and event handlers.
-- name: Stopping block
- community.general.pubnub_blocks:
- email: '{{ email }}'
- password: '{{ password }}'
- application: '{{ app_name }}'
- keyset: '{{ keyset_name }}'
- name: '{{ block_name }}'
- state: stop
-
-# Multiple module calls with cached result passing
-- name: Create '{{ block_name }}' block
- register: module_cache
- community.general.pubnub_blocks:
- email: '{{ email }}'
- password: '{{ password }}'
- application: '{{ app_name }}'
- keyset: '{{ keyset_name }}'
- name: '{{ block_name }}'
- state: present
-- name: Add '{{ event_handler_1_name }}' handler to '{{ block_name }}'
- register: module_cache
- community.general.pubnub_blocks:
- cache: '{{ module_cache }}'
- application: '{{ app_name }}'
- keyset: '{{ keyset_name }}'
- name: '{{ block_name }}'
- state: present
- event_handlers:
- -
- src: '{{ path_to_handler_1_source }}'
- name: '{{ event_handler_1_name }}'
- channels: '{{ event_handler_1_channel }}'
- event: 'js-before-publish'
-- name: Add '{{ event_handler_2_name }}' handler to '{{ block_name }}'
- register: module_cache
- community.general.pubnub_blocks:
- cache: '{{ module_cache }}'
- application: '{{ app_name }}'
- keyset: '{{ keyset_name }}'
- name: '{{ block_name }}'
- state: present
- event_handlers:
- -
- src: '{{ path_to_handler_2_source }}'
- name: '{{ event_handler_2_name }}'
- channels: '{{ event_handler_2_channel }}'
- event: 'js-before-publish'
-- name: Start '{{ block_name }}' block
- register: module_cache
- community.general.pubnub_blocks:
- cache: '{{ module_cache }}'
- application: '{{ app_name }}'
- keyset: '{{ keyset_name }}'
- name: '{{ block_name }}'
- state: started
-'''
-
-RETURN = '''
-module_cache:
- description: "Cached account information. In case if with single play module
- used few times it is better to pass cached data to next module calls to speed
- up process."
- type: dict
- returned: always
-'''
-import copy
-import os
-
-try:
- # Import PubNub BLOCKS client.
- from pubnub_blocks_client import User, Account, Owner, Application, Keyset
- from pubnub_blocks_client import Block, EventHandler
- from pubnub_blocks_client import exceptions
- HAS_PUBNUB_BLOCKS_CLIENT = True
-except ImportError:
- HAS_PUBNUB_BLOCKS_CLIENT = False
- User = None
- Account = None
- Owner = None
- Application = None
- Keyset = None
- Block = None
- EventHandler = None
- exceptions = None
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.common.text.converters import to_text
-
-
-def pubnub_user(module):
- """Create and configure user model if it possible.
-
- :type module: AnsibleModule
- :param module: Reference on module which contain module launch
- information and status report methods.
-
- :rtype: User
- :return: Reference on initialized and ready to use user or 'None' in
- case if not all required information has been passed to block.
- """
- user = None
- params = module.params
-
- if params.get('cache') and params['cache'].get('module_cache'):
- cache = params['cache']['module_cache']
- user = User()
- user.restore(cache=copy.deepcopy(cache['pnm_user']))
- elif params.get('email') and params.get('password'):
- user = User(email=params.get('email'), password=params.get('password'))
- else:
- err_msg = 'It looks like not account credentials has been passed or ' \
- '\'cache\' field doesn\'t have result of previous module ' \
- 'call.'
- module.fail_json(msg='Missing account credentials.',
- description=err_msg, changed=False)
-
- return user
-
-
-def pubnub_account(module, user):
- """Create and configure account if it is possible.
-
- :type module: AnsibleModule
- :param module: Reference on module which contain module launch
- information and status report methods.
- :type user: User
- :param user: Reference on authorized user for which one of accounts
- should be used during manipulations with block.
-
- :rtype: Account
- :return: Reference on initialized and ready to use account or 'None' in
- case if not all required information has been passed to block.
- """
- params = module.params
- if params.get('account'):
- account_name = params.get('account')
- account = user.account(name=params.get('account'))
- if account is None:
- err_frmt = 'It looks like there is no \'{0}\' account for ' \
- 'authorized user. Please make sure what correct ' \
- 'name has been passed during module configuration.'
- module.fail_json(msg='Missing account.',
- description=err_frmt.format(account_name),
- changed=False)
- else:
- account = user.accounts()[0]
-
- return account
-
-
-def pubnub_application(module, account):
- """Retrieve reference on target application from account model.
-
- NOTE: In case if account authorization will fail or there is no
- application with specified name, module will exit with error.
- :type module: AnsibleModule
- :param module: Reference on module which contain module launch
- information and status report methods.
- :type account: Account
- :param account: Reference on PubNub account model from which reference
- on application should be fetched.
-
- :rtype: Application
- :return: Reference on initialized and ready to use application model.
- """
- application = None
- params = module.params
- try:
- application = account.application(params['application'])
- except (exceptions.AccountError, exceptions.GeneralPubNubError) as exc:
- exc_msg = _failure_title_from_exception(exc)
- exc_descr = exc.message if hasattr(exc, 'message') else exc.args[0]
- module.fail_json(msg=exc_msg, description=exc_descr,
- changed=account.changed,
- module_cache=dict(account))
-
- if application is None:
- err_fmt = 'There is no \'{0}\' application for {1}. Make sure what ' \
- 'correct application name has been passed. If application ' \
- 'doesn\'t exist you can create it on admin.pubnub.com.'
- email = account.owner.email
- module.fail_json(msg=err_fmt.format(params['application'], email),
- changed=account.changed, module_cache=dict(account))
-
- return application
-
-
-def pubnub_keyset(module, account, application):
- """Retrieve reference on target keyset from application model.
-
- NOTE: In case if there is no keyset with specified name, module will
- exit with error.
- :type module: AnsibleModule
- :param module: Reference on module which contain module launch
- information and status report methods.
- :type account: Account
- :param account: Reference on PubNub account model which will be
- used in case of error to export cached data.
- :type application: Application
- :param application: Reference on PubNub application model from which
- reference on keyset should be fetched.
-
- :rtype: Keyset
- :return: Reference on initialized and ready to use keyset model.
- """
- params = module.params
- keyset = application.keyset(params['keyset'])
- if keyset is None:
- err_fmt = 'There is no \'{0}\' keyset for \'{1}\' application. Make ' \
- 'sure what correct keyset name has been passed. If keyset ' \
- 'doesn\'t exist you can create it on admin.pubnub.com.'
- module.fail_json(msg=err_fmt.format(params['keyset'],
- application.name),
- changed=account.changed, module_cache=dict(account))
-
- return keyset
-
-
-def pubnub_block(module, account, keyset):
- """Retrieve reference on target keyset from application model.
-
- NOTE: In case if there is no block with specified name and module
- configured to start/stop it, module will exit with error.
- :type module: AnsibleModule
- :param module: Reference on module which contain module launch
- information and status report methods.
- :type account: Account
- :param account: Reference on PubNub account model which will be used in
- case of error to export cached data.
- :type keyset: Keyset
- :param keyset: Reference on keyset model from which reference on block
- should be fetched.
-
- :rtype: Block
- :return: Reference on initialized and ready to use keyset model.
- """
- block = None
- params = module.params
- try:
- block = keyset.block(params['name'])
- except (exceptions.KeysetError, exceptions.GeneralPubNubError) as exc:
- exc_msg = _failure_title_from_exception(exc)
- exc_descr = exc.message if hasattr(exc, 'message') else exc.args[0]
- module.fail_json(msg=exc_msg, description=exc_descr,
- changed=account.changed, module_cache=dict(account))
-
- # Report error because block doesn't exists and at the same time
- # requested to start/stop.
- if block is None and params['state'] in ['started', 'stopped']:
- block_name = params.get('name')
- module.fail_json(msg="'{0}' block doesn't exists.".format(block_name),
- changed=account.changed, module_cache=dict(account))
-
- if block is None and params['state'] == 'present':
- block = Block(name=params.get('name'),
- description=params.get('description'))
- keyset.add_block(block)
-
- if block:
- # Update block information if required.
- if params.get('changes') and params['changes'].get('name'):
- block.name = params['changes']['name']
- if params.get('description'):
- block.description = params.get('description')
-
- return block
-
-
-def pubnub_event_handler(block, data):
- """Retrieve reference on target event handler from application model.
-
- :type block: Block
- :param block: Reference on block model from which reference on event
- handlers should be fetched.
- :type data: dict
- :param data: Reference on dictionary which contain information about
- event handler and whether it should be created or not.
-
- :rtype: EventHandler
- :return: Reference on initialized and ready to use event handler model.
- 'None' will be returned in case if there is no handler with
- specified name and no request to create it.
- """
- event_handler = block.event_handler(data['name'])
-
- # Prepare payload for event handler update.
- changed_name = (data.pop('changes').get('name')
- if 'changes' in data else None)
- name = data.get('name') or changed_name
- channels = data.get('channels')
- event = data.get('event')
- code = _content_of_file_at_path(data.get('src'))
- state = data.get('state') or 'present'
-
- # Create event handler if required.
- if event_handler is None and state == 'present':
- event_handler = EventHandler(name=name, channels=channels, event=event,
- code=code)
- block.add_event_handler(event_handler)
-
- # Update event handler if required.
- if event_handler is not None and state == 'present':
- if name is not None:
- event_handler.name = name
- if channels is not None:
- event_handler.channels = channels
- if event is not None:
- event_handler.event = event
- if code is not None:
- event_handler.code = code
-
- return event_handler
-
-
-def _failure_title_from_exception(exception):
- """Compose human-readable title for module error title.
-
- Title will be based on status codes if they has been provided.
- :type exception: exceptions.GeneralPubNubError
- :param exception: Reference on exception for which title should be
- composed.
-
- :rtype: str
- :return: Reference on error tile which should be shown on module
- failure.
- """
- title = 'General REST API access error.'
- if exception.code == exceptions.PN_AUTHORIZATION_MISSING_CREDENTIALS:
- title = 'Authorization error: missing credentials.'
- elif exception.code == exceptions.PN_AUTHORIZATION_WRONG_CREDENTIALS:
- title = 'Authorization error: wrong credentials.'
- elif exception.code == exceptions.PN_USER_INSUFFICIENT_RIGHTS:
- title = 'API access error: insufficient access rights.'
- elif exception.code == exceptions.PN_API_ACCESS_TOKEN_EXPIRED:
- title = 'API access error: time token expired.'
- elif exception.code == exceptions.PN_KEYSET_BLOCK_EXISTS:
- title = 'Block create did fail: block with same name already exists).'
- elif exception.code == exceptions.PN_KEYSET_BLOCKS_FETCH_DID_FAIL:
- title = 'Unable fetch list of blocks for keyset.'
- elif exception.code == exceptions.PN_BLOCK_CREATE_DID_FAIL:
- title = 'Block creation did fail.'
- elif exception.code == exceptions.PN_BLOCK_UPDATE_DID_FAIL:
- title = 'Block update did fail.'
- elif exception.code == exceptions.PN_BLOCK_REMOVE_DID_FAIL:
- title = 'Block removal did fail.'
- elif exception.code == exceptions.PN_BLOCK_START_STOP_DID_FAIL:
- title = 'Block start/stop did fail.'
- elif exception.code == exceptions.PN_EVENT_HANDLER_MISSING_FIELDS:
- title = 'Event handler creation did fail: missing fields.'
- elif exception.code == exceptions.PN_BLOCK_EVENT_HANDLER_EXISTS:
- title = 'Event handler creation did fail: missing fields.'
- elif exception.code == exceptions.PN_EVENT_HANDLER_CREATE_DID_FAIL:
- title = 'Event handler creation did fail.'
- elif exception.code == exceptions.PN_EVENT_HANDLER_UPDATE_DID_FAIL:
- title = 'Event handler update did fail.'
- elif exception.code == exceptions.PN_EVENT_HANDLER_REMOVE_DID_FAIL:
- title = 'Event handler removal did fail.'
-
- return title
-
-
-def _content_of_file_at_path(path):
- """Read file content.
-
- Try read content of file at specified path.
- :type path: str
- :param path: Full path to location of file which should be read'ed.
- :rtype: content
- :return: File content or 'None'
- """
- content = None
- if path and os.path.exists(path):
- with open(path, mode="rt") as opened_file:
- b_content = opened_file.read()
- try:
- content = to_text(b_content, errors='surrogate_or_strict')
- except UnicodeError:
- pass
-
- return content
-
-
-def main():
- fields = dict(
- email=dict(default='', required=False, type='str'),
- password=dict(default='', required=False, type='str', no_log=True),
- account=dict(default='', required=False, type='str'),
- application=dict(required=True, type='str'),
- keyset=dict(required=True, type='str', no_log=False),
- state=dict(default='present', type='str',
- choices=['started', 'stopped', 'present', 'absent']),
- name=dict(required=True, type='str'), description=dict(type='str'),
- event_handlers=dict(default=list(), type='list', elements='dict'),
- changes=dict(default=dict(), type='dict'),
- cache=dict(default=dict(), type='dict'),
- validate_certs=dict(default=True, type='bool'))
- module = AnsibleModule(argument_spec=fields, supports_check_mode=True)
-
- if not HAS_PUBNUB_BLOCKS_CLIENT:
- module.fail_json(msg='pubnub_blocks_client required for this module.')
-
- params = module.params
-
- # Authorize user.
- user = pubnub_user(module)
- # Initialize PubNub account instance.
- account = pubnub_account(module, user=user)
- # Try fetch application with which module should work.
- application = pubnub_application(module, account=account)
- # Try fetch keyset with which module should work.
- keyset = pubnub_keyset(module, account=account, application=application)
- # Try fetch block with which module should work.
- block = pubnub_block(module, account=account, keyset=keyset)
- is_new_block = block is not None and block.uid == -1
-
- # Check whether block should be removed or not.
- if block is not None and params['state'] == 'absent':
- keyset.remove_block(block)
- block = None
-
- if block is not None:
- # Update block information if required.
- if params.get('changes') and params['changes'].get('name'):
- block.name = params['changes']['name']
-
- # Process event changes to event handlers.
- for event_handler_data in params.get('event_handlers') or list():
- state = event_handler_data.get('state') or 'present'
- event_handler = pubnub_event_handler(data=event_handler_data,
- block=block)
- if state == 'absent' and event_handler:
- block.delete_event_handler(event_handler)
-
- # Update block operation state if required.
- if block and not is_new_block:
- if params['state'] == 'started':
- block.start()
- elif params['state'] == 'stopped':
- block.stop()
-
- # Save current account state.
- if not module.check_mode:
- try:
- account.save()
- except (exceptions.APIAccessError, exceptions.KeysetError,
- exceptions.BlockError, exceptions.EventHandlerError,
- exceptions.GeneralPubNubError) as exc:
- module_cache = dict(account)
- module_cache.update(dict(pnm_user=dict(user)))
- exc_msg = _failure_title_from_exception(exc)
- exc_descr = exc.message if hasattr(exc, 'message') else exc.args[0]
- module.fail_json(msg=exc_msg, description=exc_descr,
- changed=account.changed,
- module_cache=module_cache)
-
- # Report module execution results.
- module_cache = dict(account)
- module_cache.update(dict(pnm_user=dict(user)))
- changed_will_change = account.changed or account.will_change
- module.exit_json(changed=changed_will_change, module_cache=module_cache)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax.py b/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax.py
deleted file mode 100644
index 8c452d9d..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax.py
+++ /dev/null
@@ -1,892 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: rax
-short_description: create / delete an instance in Rackspace Public Cloud
-description:
- - creates / deletes a Rackspace Public Cloud instance and optionally
- waits for it to be 'running'.
-options:
- auto_increment:
- description:
- - Whether or not to increment a single number with the name of the
- created servers. Only applicable when used with the I(group) attribute
- or meta key.
- type: bool
- default: 'yes'
- boot_from_volume:
- description:
- - Whether or not to boot the instance from a Cloud Block Storage volume.
- If C(yes) and I(image) is specified a new volume will be created at
- boot time. I(boot_volume_size) is required with I(image) to create a
- new volume at boot time.
- type: bool
- default: 'no'
- boot_volume:
- type: str
- description:
- - Cloud Block Storage ID or Name to use as the boot volume of the
- instance
- boot_volume_size:
- type: int
- description:
- - Size of the volume to create in Gigabytes. This is only required with
- I(image) and I(boot_from_volume).
- default: 100
- boot_volume_terminate:
- description:
- - Whether the I(boot_volume) or newly created volume from I(image) will
- be terminated when the server is terminated
- type: bool
- default: 'no'
- config_drive:
- description:
- - Attach read-only configuration drive to server as label config-2
- type: bool
- default: 'no'
- count:
- type: int
- description:
- - number of instances to launch
- default: 1
- count_offset:
- type: int
- description:
- - number count to start at
- default: 1
- disk_config:
- type: str
- description:
- - Disk partitioning strategy
- - If not specified it will assume the value C(auto).
- choices:
- - auto
- - manual
- exact_count:
- description:
- - Explicitly ensure an exact count of instances, used with
- state=active/present. If specified as C(yes) and I(count) is less than
- the servers matched, servers will be deleted to match the count. If
- the number of matched servers is fewer than specified in I(count)
- additional servers will be added.
- type: bool
- default: 'no'
- extra_client_args:
- type: dict
- description:
- - A hash of key/value pairs to be used when creating the cloudservers
- client. This is considered an advanced option, use it wisely and
- with caution.
- extra_create_args:
- type: dict
- description:
- - A hash of key/value pairs to be used when creating a new server.
- This is considered an advanced option, use it wisely and with caution.
- files:
- type: dict
- description:
- - Files to insert into the instance. remotefilename:localcontent
- flavor:
- type: str
- description:
- - flavor to use for the instance
- group:
- type: str
- description:
- - host group to assign to server, is also used for idempotent operations
- to ensure a specific number of instances
- image:
- type: str
- description:
- - image to use for the instance. Can be an C(id), C(human_id) or C(name).
- With I(boot_from_volume), a Cloud Block Storage volume will be created
- with this image
- instance_ids:
- type: list
- elements: str
- description:
- - list of instance ids, currently only used when state='absent' to
- remove instances
- key_name:
- type: str
- description:
- - key pair to use on the instance
- aliases:
- - keypair
- meta:
- type: dict
- description:
- - A hash of metadata to associate with the instance
- name:
- type: str
- description:
- - Name to give the instance
- networks:
- type: list
- elements: str
- description:
- - The network to attach to the instances. If specified, you must include
- ALL networks including the public and private interfaces. Can be C(id)
- or C(label).
- default:
- - public
- - private
- state:
- type: str
- description:
- - Indicate desired state of the resource
- choices:
- - present
- - absent
- default: present
- user_data:
- type: str
- description:
- - Data to be uploaded to the servers config drive. This option implies
- I(config_drive). Can be a file path or a string
- wait:
- description:
- - wait for the instance to be in state 'running' before returning
- type: bool
- default: 'no'
- wait_timeout:
- type: int
- description:
- - how long before wait gives up, in seconds
- default: 300
-author:
- - "Jesse Keating (@omgjlk)"
- - "Matt Martz (@sivel)"
-notes:
- - I(exact_count) can be "destructive" if the number of running servers in
- the I(group) is larger than that specified in I(count). In such a case, the
- I(state) is effectively set to C(absent) and the extra servers are deleted.
- In the case of deletion, the returned data structure will have C(action)
- set to C(delete), and the oldest servers in the group will be deleted.
-extends_documentation_fragment:
-- community.general.rackspace.openstack
-
-'''
-
-EXAMPLES = '''
-- name: Build a Cloud Server
- gather_facts: False
- tasks:
- - name: Server build request
- local_action:
- module: rax
- credentials: ~/.raxpub
- name: rax-test1
- flavor: 5
- image: b11d9567-e412-4255-96b9-bd63ab23bcfe
- key_name: my_rackspace_key
- files:
- /root/test.txt: /home/localuser/test.txt
- wait: yes
- state: present
- networks:
- - private
- - public
- register: rax
-
-- name: Build an exact count of cloud servers with incremented names
- hosts: local
- gather_facts: False
- tasks:
- - name: Server build requests
- local_action:
- module: rax
- credentials: ~/.raxpub
- name: test%03d.example.org
- flavor: performance1-1
- image: ubuntu-1204-lts-precise-pangolin
- state: present
- count: 10
- count_offset: 10
- exact_count: yes
- group: test
- wait: yes
- register: rax
-'''
-
-import json
-import os
-import re
-import time
-
-try:
- import pyrax
- HAS_PYRAX = True
-except ImportError:
- HAS_PYRAX = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.rax import (FINAL_STATUSES, rax_argument_spec, rax_find_bootable_volume,
- rax_find_image, rax_find_network, rax_find_volume,
- rax_required_together, rax_to_dict, setup_rax_module)
-from ansible.module_utils.six.moves import xrange
-from ansible.module_utils.six import string_types
-
-
-def rax_find_server_image(module, server, image, boot_volume):
- if not image and boot_volume:
- vol = rax_find_bootable_volume(module, pyrax, server,
- exit=False)
- if not vol:
- return None
- volume_image_metadata = vol.volume_image_metadata
- vol_image_id = volume_image_metadata.get('image_id')
- if vol_image_id:
- server_image = rax_find_image(module, pyrax,
- vol_image_id, exit=False)
- if server_image:
- server.image = dict(id=server_image)
-
- # Match image IDs taking care of boot from volume
- if image and not server.image:
- vol = rax_find_bootable_volume(module, pyrax, server)
- volume_image_metadata = vol.volume_image_metadata
- vol_image_id = volume_image_metadata.get('image_id')
- if not vol_image_id:
- return None
- server_image = rax_find_image(module, pyrax,
- vol_image_id, exit=False)
- if image != server_image:
- return None
-
- server.image = dict(id=server_image)
- elif image and server.image['id'] != image:
- return None
-
- return server.image
-
-
-def create(module, names=None, flavor=None, image=None, meta=None, key_name=None,
- files=None, wait=True, wait_timeout=300, disk_config=None,
- group=None, nics=None, extra_create_args=None, user_data=None,
- config_drive=False, existing=None, block_device_mapping_v2=None):
- names = [] if names is None else names
- meta = {} if meta is None else meta
- files = {} if files is None else files
- nics = [] if nics is None else nics
- extra_create_args = {} if extra_create_args is None else extra_create_args
- existing = [] if existing is None else existing
- block_device_mapping_v2 = [] if block_device_mapping_v2 is None else block_device_mapping_v2
-
- cs = pyrax.cloudservers
- changed = False
-
- if user_data:
- config_drive = True
-
- if user_data and os.path.isfile(os.path.expanduser(user_data)):
- try:
- user_data = os.path.expanduser(user_data)
- f = open(user_data)
- user_data = f.read()
- f.close()
- except Exception as e:
- module.fail_json(msg='Failed to load %s' % user_data)
-
- # Handle the file contents
- for rpath in files.keys():
- lpath = os.path.expanduser(files[rpath])
- try:
- fileobj = open(lpath, 'r')
- files[rpath] = fileobj.read()
- fileobj.close()
- except Exception as e:
- module.fail_json(msg='Failed to load %s' % lpath)
- try:
- servers = []
- bdmv2 = block_device_mapping_v2
- for name in names:
- servers.append(cs.servers.create(name=name, image=image,
- flavor=flavor, meta=meta,
- key_name=key_name,
- files=files, nics=nics,
- disk_config=disk_config,
- config_drive=config_drive,
- userdata=user_data,
- block_device_mapping_v2=bdmv2,
- **extra_create_args))
- except Exception as e:
- if e.message:
- msg = str(e.message)
- else:
- msg = repr(e)
- module.fail_json(msg=msg)
- else:
- changed = True
-
- if wait:
- end_time = time.time() + wait_timeout
- infinite = wait_timeout == 0
- while infinite or time.time() < end_time:
- for server in servers:
- try:
- server.get()
- except Exception:
- server.status = 'ERROR'
-
- if not filter(lambda s: s.status not in FINAL_STATUSES,
- servers):
- break
- time.sleep(5)
-
- success = []
- error = []
- timeout = []
- for server in servers:
- try:
- server.get()
- except Exception:
- server.status = 'ERROR'
- instance = rax_to_dict(server, 'server')
- if server.status == 'ACTIVE' or not wait:
- success.append(instance)
- elif server.status == 'ERROR':
- error.append(instance)
- elif wait:
- timeout.append(instance)
-
- untouched = [rax_to_dict(s, 'server') for s in existing]
- instances = success + untouched
-
- results = {
- 'changed': changed,
- 'action': 'create',
- 'instances': instances,
- 'success': success,
- 'error': error,
- 'timeout': timeout,
- 'instance_ids': {
- 'instances': [i['id'] for i in instances],
- 'success': [i['id'] for i in success],
- 'error': [i['id'] for i in error],
- 'timeout': [i['id'] for i in timeout]
- }
- }
-
- if timeout:
- results['msg'] = 'Timeout waiting for all servers to build'
- elif error:
- results['msg'] = 'Failed to build all servers'
-
- if 'msg' in results:
- module.fail_json(**results)
- else:
- module.exit_json(**results)
-
-
-def delete(module, instance_ids=None, wait=True, wait_timeout=300, kept=None):
- instance_ids = [] if instance_ids is None else instance_ids
- kept = [] if kept is None else kept
-
- cs = pyrax.cloudservers
-
- changed = False
- instances = {}
- servers = []
-
- for instance_id in instance_ids:
- servers.append(cs.servers.get(instance_id))
-
- for server in servers:
- try:
- server.delete()
- except Exception as e:
- module.fail_json(msg=e.message)
- else:
- changed = True
-
- instance = rax_to_dict(server, 'server')
- instances[instance['id']] = instance
-
- # If requested, wait for server deletion
- if wait:
- end_time = time.time() + wait_timeout
- infinite = wait_timeout == 0
- while infinite or time.time() < end_time:
- for server in servers:
- instance_id = server.id
- try:
- server.get()
- except Exception:
- instances[instance_id]['status'] = 'DELETED'
- instances[instance_id]['rax_status'] = 'DELETED'
-
- if not filter(lambda s: s['status'] not in ('', 'DELETED',
- 'ERROR'),
- instances.values()):
- break
-
- time.sleep(5)
-
- timeout = filter(lambda s: s['status'] not in ('', 'DELETED', 'ERROR'),
- instances.values())
- error = filter(lambda s: s['status'] in ('ERROR'),
- instances.values())
- success = filter(lambda s: s['status'] in ('', 'DELETED'),
- instances.values())
-
- instances = [rax_to_dict(s, 'server') for s in kept]
-
- results = {
- 'changed': changed,
- 'action': 'delete',
- 'instances': instances,
- 'success': success,
- 'error': error,
- 'timeout': timeout,
- 'instance_ids': {
- 'instances': [i['id'] for i in instances],
- 'success': [i['id'] for i in success],
- 'error': [i['id'] for i in error],
- 'timeout': [i['id'] for i in timeout]
- }
- }
-
- if timeout:
- results['msg'] = 'Timeout waiting for all servers to delete'
- elif error:
- results['msg'] = 'Failed to delete all servers'
-
- if 'msg' in results:
- module.fail_json(**results)
- else:
- module.exit_json(**results)
-
-
-def cloudservers(module, state=None, name=None, flavor=None, image=None,
- meta=None, key_name=None, files=None, wait=True, wait_timeout=300,
- disk_config=None, count=1, group=None, instance_ids=None,
- exact_count=False, networks=None, count_offset=0,
- auto_increment=False, extra_create_args=None, user_data=None,
- config_drive=False, boot_from_volume=False,
- boot_volume=None, boot_volume_size=None,
- boot_volume_terminate=False):
- meta = {} if meta is None else meta
- files = {} if files is None else files
- instance_ids = [] if instance_ids is None else instance_ids
- networks = [] if networks is None else networks
- extra_create_args = {} if extra_create_args is None else extra_create_args
-
- cs = pyrax.cloudservers
- cnw = pyrax.cloud_networks
- if not cnw:
- module.fail_json(msg='Failed to instantiate client. This '
- 'typically indicates an invalid region or an '
- 'incorrectly capitalized region name.')
-
- if state == 'present' or (state == 'absent' and instance_ids is None):
- if not boot_from_volume and not boot_volume and not image:
- module.fail_json(msg='image is required for the "rax" module')
-
- for arg, value in dict(name=name, flavor=flavor).items():
- if not value:
- module.fail_json(msg='%s is required for the "rax" module' %
- arg)
-
- if boot_from_volume and not image and not boot_volume:
- module.fail_json(msg='image or boot_volume are required for the '
- '"rax" with boot_from_volume')
-
- if boot_from_volume and image and not boot_volume_size:
- module.fail_json(msg='boot_volume_size is required for the "rax" '
- 'module with boot_from_volume and image')
-
- if boot_from_volume and image and boot_volume:
- image = None
-
- servers = []
-
- # Add the group meta key
- if group and 'group' not in meta:
- meta['group'] = group
- elif 'group' in meta and group is None:
- group = meta['group']
-
- # Normalize and ensure all metadata values are strings
- for k, v in meta.items():
- if isinstance(v, list):
- meta[k] = ','.join(['%s' % i for i in v])
- elif isinstance(v, dict):
- meta[k] = json.dumps(v)
- elif not isinstance(v, string_types):
- meta[k] = '%s' % v
-
- # When using state=absent with group, the absent block won't match the
- # names properly. Use the exact_count functionality to decrease the count
- # to the desired level
- was_absent = False
- if group is not None and state == 'absent':
- exact_count = True
- state = 'present'
- was_absent = True
-
- if image:
- image = rax_find_image(module, pyrax, image)
-
- nics = []
- if networks:
- for network in networks:
- nics.extend(rax_find_network(module, pyrax, network))
-
- # act on the state
- if state == 'present':
- # Idempotent ensurance of a specific count of servers
- if exact_count is not False:
- # See if we can find servers that match our options
- if group is None:
- module.fail_json(msg='"group" must be provided when using '
- '"exact_count"')
-
- if auto_increment:
- numbers = set()
-
- # See if the name is a printf like string, if not append
- # %d to the end
- try:
- name % 0
- except TypeError as e:
- if e.message.startswith('not all'):
- name = '%s%%d' % name
- else:
- module.fail_json(msg=e.message)
-
- # regex pattern to match printf formatting
- pattern = re.sub(r'%\d*[sd]', r'(\d+)', name)
- for server in cs.servers.list():
- # Ignore DELETED servers
- if server.status == 'DELETED':
- continue
- if server.metadata.get('group') == group:
- servers.append(server)
- match = re.search(pattern, server.name)
- if match:
- number = int(match.group(1))
- numbers.add(number)
-
- number_range = xrange(count_offset, count_offset + count)
- available_numbers = list(set(number_range)
- .difference(numbers))
- else: # Not auto incrementing
- for server in cs.servers.list():
- # Ignore DELETED servers
- if server.status == 'DELETED':
- continue
- if server.metadata.get('group') == group:
- servers.append(server)
- # available_numbers not needed here, we inspect auto_increment
- # again later
-
- # If state was absent but the count was changed,
- # assume we only wanted to remove that number of instances
- if was_absent:
- diff = len(servers) - count
- if diff < 0:
- count = 0
- else:
- count = diff
-
- if len(servers) > count:
- # We have more servers than we need, set state='absent'
- # and delete the extras, this should delete the oldest
- state = 'absent'
- kept = servers[:count]
- del servers[:count]
- instance_ids = []
- for server in servers:
- instance_ids.append(server.id)
- delete(module, instance_ids=instance_ids, wait=wait,
- wait_timeout=wait_timeout, kept=kept)
- elif len(servers) < count:
- # we have fewer servers than we need
- if auto_increment:
- # auto incrementing server numbers
- names = []
- name_slice = count - len(servers)
- numbers_to_use = available_numbers[:name_slice]
- for number in numbers_to_use:
- names.append(name % number)
- else:
- # We are not auto incrementing server numbers,
- # create a list of 'name' that matches how many we need
- names = [name] * (count - len(servers))
- else:
- # we have the right number of servers, just return info
- # about all of the matched servers
- instances = []
- instance_ids = []
- for server in servers:
- instances.append(rax_to_dict(server, 'server'))
- instance_ids.append(server.id)
- module.exit_json(changed=False, action=None,
- instances=instances,
- success=[], error=[], timeout=[],
- instance_ids={'instances': instance_ids,
- 'success': [], 'error': [],
- 'timeout': []})
- else: # not called with exact_count=True
- if group is not None:
- if auto_increment:
- # we are auto incrementing server numbers, but not with
- # exact_count
- numbers = set()
-
- # See if the name is a printf like string, if not append
- # %d to the end
- try:
- name % 0
- except TypeError as e:
- if e.message.startswith('not all'):
- name = '%s%%d' % name
- else:
- module.fail_json(msg=e.message)
-
- # regex pattern to match printf formatting
- pattern = re.sub(r'%\d*[sd]', r'(\d+)', name)
- for server in cs.servers.list():
- # Ignore DELETED servers
- if server.status == 'DELETED':
- continue
- if server.metadata.get('group') == group:
- servers.append(server)
- match = re.search(pattern, server.name)
- if match:
- number = int(match.group(1))
- numbers.add(number)
-
- number_range = xrange(count_offset,
- count_offset + count + len(numbers))
- available_numbers = list(set(number_range)
- .difference(numbers))
- names = []
- numbers_to_use = available_numbers[:count]
- for number in numbers_to_use:
- names.append(name % number)
- else:
- # Not auto incrementing
- names = [name] * count
- else:
- # No group was specified, and not using exact_count
- # Perform more simplistic matching
- search_opts = {
- 'name': '^%s$' % name,
- 'flavor': flavor
- }
- servers = []
- for server in cs.servers.list(search_opts=search_opts):
- # Ignore DELETED servers
- if server.status == 'DELETED':
- continue
-
- if not rax_find_server_image(module, server, image,
- boot_volume):
- continue
-
- # Ignore servers with non matching metadata
- if server.metadata != meta:
- continue
- servers.append(server)
-
- if len(servers) >= count:
- # We have more servers than were requested, don't do
- # anything. Not running with exact_count=True, so we assume
- # more is OK
- instances = []
- for server in servers:
- instances.append(rax_to_dict(server, 'server'))
-
- instance_ids = [i['id'] for i in instances]
- module.exit_json(changed=False, action=None,
- instances=instances, success=[], error=[],
- timeout=[],
- instance_ids={'instances': instance_ids,
- 'success': [], 'error': [],
- 'timeout': []})
-
- # We need more servers to reach out target, create names for
- # them, we aren't performing auto_increment here
- names = [name] * (count - len(servers))
-
- block_device_mapping_v2 = []
- if boot_from_volume:
- mapping = {
- 'boot_index': '0',
- 'delete_on_termination': boot_volume_terminate,
- 'destination_type': 'volume',
- }
- if image:
- mapping.update({
- 'uuid': image,
- 'source_type': 'image',
- 'volume_size': boot_volume_size,
- })
- image = None
- elif boot_volume:
- volume = rax_find_volume(module, pyrax, boot_volume)
- mapping.update({
- 'uuid': pyrax.utils.get_id(volume),
- 'source_type': 'volume',
- })
- block_device_mapping_v2.append(mapping)
-
- create(module, names=names, flavor=flavor, image=image,
- meta=meta, key_name=key_name, files=files, wait=wait,
- wait_timeout=wait_timeout, disk_config=disk_config, group=group,
- nics=nics, extra_create_args=extra_create_args,
- user_data=user_data, config_drive=config_drive,
- existing=servers,
- block_device_mapping_v2=block_device_mapping_v2)
-
- elif state == 'absent':
- if instance_ids is None:
- # We weren't given an explicit list of server IDs to delete
- # Let's match instead
- search_opts = {
- 'name': '^%s$' % name,
- 'flavor': flavor
- }
- for server in cs.servers.list(search_opts=search_opts):
- # Ignore DELETED servers
- if server.status == 'DELETED':
- continue
-
- if not rax_find_server_image(module, server, image,
- boot_volume):
- continue
-
- # Ignore servers with non matching metadata
- if meta != server.metadata:
- continue
-
- servers.append(server)
-
- # Build a list of server IDs to delete
- instance_ids = []
- for server in servers:
- if len(instance_ids) < count:
- instance_ids.append(server.id)
- else:
- break
-
- if not instance_ids:
- # No server IDs were matched for deletion, or no IDs were
- # explicitly provided, just exit and don't do anything
- module.exit_json(changed=False, action=None, instances=[],
- success=[], error=[], timeout=[],
- instance_ids={'instances': [],
- 'success': [], 'error': [],
- 'timeout': []})
-
- delete(module, instance_ids=instance_ids, wait=wait,
- wait_timeout=wait_timeout)
-
-
-def main():
- argument_spec = rax_argument_spec()
- argument_spec.update(
- dict(
- auto_increment=dict(default=True, type='bool'),
- boot_from_volume=dict(default=False, type='bool'),
- boot_volume=dict(type='str'),
- boot_volume_size=dict(type='int', default=100),
- boot_volume_terminate=dict(type='bool', default=False),
- config_drive=dict(default=False, type='bool'),
- count=dict(default=1, type='int'),
- count_offset=dict(default=1, type='int'),
- disk_config=dict(choices=['auto', 'manual']),
- exact_count=dict(default=False, type='bool'),
- extra_client_args=dict(type='dict', default={}),
- extra_create_args=dict(type='dict', default={}),
- files=dict(type='dict', default={}),
- flavor=dict(),
- group=dict(),
- image=dict(),
- instance_ids=dict(type='list', elements='str'),
- key_name=dict(aliases=['keypair']),
- meta=dict(type='dict', default={}),
- name=dict(),
- networks=dict(type='list', elements='str', default=['public', 'private']),
- state=dict(default='present', choices=['present', 'absent']),
- user_data=dict(no_log=True),
- wait=dict(default=False, type='bool'),
- wait_timeout=dict(default=300, type='int'),
- )
- )
-
- module = AnsibleModule(
- argument_spec=argument_spec,
- required_together=rax_required_together(),
- )
-
- if not HAS_PYRAX:
- module.fail_json(msg='pyrax is required for this module')
-
- auto_increment = module.params.get('auto_increment')
- boot_from_volume = module.params.get('boot_from_volume')
- boot_volume = module.params.get('boot_volume')
- boot_volume_size = module.params.get('boot_volume_size')
- boot_volume_terminate = module.params.get('boot_volume_terminate')
- config_drive = module.params.get('config_drive')
- count = module.params.get('count')
- count_offset = module.params.get('count_offset')
- disk_config = module.params.get('disk_config')
- if disk_config:
- disk_config = disk_config.upper()
- exact_count = module.params.get('exact_count', False)
- extra_client_args = module.params.get('extra_client_args')
- extra_create_args = module.params.get('extra_create_args')
- files = module.params.get('files')
- flavor = module.params.get('flavor')
- group = module.params.get('group')
- image = module.params.get('image')
- instance_ids = module.params.get('instance_ids')
- key_name = module.params.get('key_name')
- meta = module.params.get('meta')
- name = module.params.get('name')
- networks = module.params.get('networks')
- state = module.params.get('state')
- user_data = module.params.get('user_data')
- wait = module.params.get('wait')
- wait_timeout = int(module.params.get('wait_timeout'))
-
- setup_rax_module(module, pyrax)
-
- if extra_client_args:
- pyrax.cloudservers = pyrax.connect_to_cloudservers(
- region=pyrax.cloudservers.client.region_name,
- **extra_client_args)
- client = pyrax.cloudservers.client
- if 'bypass_url' in extra_client_args:
- client.management_url = extra_client_args['bypass_url']
-
- if pyrax.cloudservers is None:
- module.fail_json(msg='Failed to instantiate client. This '
- 'typically indicates an invalid region or an '
- 'incorrectly capitalized region name.')
-
- cloudservers(module, state=state, name=name, flavor=flavor,
- image=image, meta=meta, key_name=key_name, files=files,
- wait=wait, wait_timeout=wait_timeout, disk_config=disk_config,
- count=count, group=group, instance_ids=instance_ids,
- exact_count=exact_count, networks=networks,
- count_offset=count_offset, auto_increment=auto_increment,
- extra_create_args=extra_create_args, user_data=user_data,
- config_drive=config_drive, boot_from_volume=boot_from_volume,
- boot_volume=boot_volume, boot_volume_size=boot_volume_size,
- boot_volume_terminate=boot_volume_terminate)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_cbs.py b/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_cbs.py
deleted file mode 100644
index abfda419..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_cbs.py
+++ /dev/null
@@ -1,227 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: rax_cbs
-short_description: Manipulate Rackspace Cloud Block Storage Volumes
-description:
- - Manipulate Rackspace Cloud Block Storage Volumes
-options:
- description:
- type: str
- description:
- - Description to give the volume being created
- image:
- type: str
- description:
- - image to use for bootable volumes. Can be an C(id), C(human_id) or
- C(name). This option requires C(pyrax>=1.9.3)
- meta:
- type: dict
- description:
- - A hash of metadata to associate with the volume
- name:
- type: str
- description:
- - Name to give the volume being created
- required: true
- size:
- type: int
- description:
- - Size of the volume to create in Gigabytes
- default: 100
- snapshot_id:
- type: str
- description:
- - The id of the snapshot to create the volume from
- state:
- type: str
- description:
- - Indicate desired state of the resource
- choices:
- - present
- - absent
- default: present
- volume_type:
- type: str
- description:
- - Type of the volume being created
- choices:
- - SATA
- - SSD
- default: SATA
- wait:
- description:
- - wait for the volume to be in state 'available' before returning
- type: bool
- default: 'no'
- wait_timeout:
- type: int
- description:
- - how long before wait gives up, in seconds
- default: 300
-author:
- - "Christopher H. Laco (@claco)"
- - "Matt Martz (@sivel)"
-extends_documentation_fragment:
-- community.general.rackspace.openstack
-
-'''
-
-EXAMPLES = '''
-- name: Build a Block Storage Volume
- gather_facts: False
- hosts: local
- connection: local
- tasks:
- - name: Storage volume create request
- local_action:
- module: rax_cbs
- credentials: ~/.raxpub
- name: my-volume
- description: My Volume
- volume_type: SSD
- size: 150
- region: DFW
- wait: yes
- state: present
- meta:
- app: my-cool-app
- register: my_volume
-'''
-
-from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
-
-try:
- import pyrax
- HAS_PYRAX = True
-except ImportError:
- HAS_PYRAX = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.rax import (VOLUME_STATUS, rax_argument_spec, rax_find_image, rax_find_volume,
- rax_required_together, rax_to_dict, setup_rax_module)
-
-
-def cloud_block_storage(module, state, name, description, meta, size,
- snapshot_id, volume_type, wait, wait_timeout,
- image):
- changed = False
- volume = None
- instance = {}
-
- cbs = pyrax.cloud_blockstorage
-
- if cbs is None:
- module.fail_json(msg='Failed to instantiate client. This '
- 'typically indicates an invalid region or an '
- 'incorrectly capitalized region name.')
-
- if image:
- # pyrax<1.9.3 did not have support for specifying an image when
- # creating a volume which is required for bootable volumes
- if LooseVersion(pyrax.version.version) < LooseVersion('1.9.3'):
- module.fail_json(msg='Creating a bootable volume requires '
- 'pyrax>=1.9.3')
- image = rax_find_image(module, pyrax, image)
-
- volume = rax_find_volume(module, pyrax, name)
-
- if state == 'present':
- if not volume:
- kwargs = dict()
- if image:
- kwargs['image'] = image
- try:
- volume = cbs.create(name, size=size, volume_type=volume_type,
- description=description,
- metadata=meta,
- snapshot_id=snapshot_id, **kwargs)
- changed = True
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
- else:
- if wait:
- attempts = wait_timeout // 5
- pyrax.utils.wait_for_build(volume, interval=5,
- attempts=attempts)
-
- volume.get()
- instance = rax_to_dict(volume)
-
- result = dict(changed=changed, volume=instance)
-
- if volume.status == 'error':
- result['msg'] = '%s failed to build' % volume.id
- elif wait and volume.status not in VOLUME_STATUS:
- result['msg'] = 'Timeout waiting on %s' % volume.id
-
- if 'msg' in result:
- module.fail_json(**result)
- else:
- module.exit_json(**result)
-
- elif state == 'absent':
- if volume:
- instance = rax_to_dict(volume)
- try:
- volume.delete()
- changed = True
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
-
- module.exit_json(changed=changed, volume=instance)
-
-
-def main():
- argument_spec = rax_argument_spec()
- argument_spec.update(
- dict(
- description=dict(type='str'),
- image=dict(type='str'),
- meta=dict(type='dict', default={}),
- name=dict(required=True),
- size=dict(type='int', default=100),
- snapshot_id=dict(),
- state=dict(default='present', choices=['present', 'absent']),
- volume_type=dict(choices=['SSD', 'SATA'], default='SATA'),
- wait=dict(type='bool', default=False),
- wait_timeout=dict(type='int', default=300)
- )
- )
-
- module = AnsibleModule(
- argument_spec=argument_spec,
- required_together=rax_required_together()
- )
-
- if not HAS_PYRAX:
- module.fail_json(msg='pyrax is required for this module')
-
- description = module.params.get('description')
- image = module.params.get('image')
- meta = module.params.get('meta')
- name = module.params.get('name')
- size = module.params.get('size')
- snapshot_id = module.params.get('snapshot_id')
- state = module.params.get('state')
- volume_type = module.params.get('volume_type')
- wait = module.params.get('wait')
- wait_timeout = module.params.get('wait_timeout')
-
- setup_rax_module(module, pyrax)
-
- cloud_block_storage(module, state, name, description, meta, size,
- snapshot_id, volume_type, wait, wait_timeout,
- image)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_cbs_attachments.py b/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_cbs_attachments.py
deleted file mode 100644
index fd210814..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_cbs_attachments.py
+++ /dev/null
@@ -1,219 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: rax_cbs_attachments
-short_description: Manipulate Rackspace Cloud Block Storage Volume Attachments
-description:
- - Manipulate Rackspace Cloud Block Storage Volume Attachments
-options:
- device:
- type: str
- description:
- - The device path to attach the volume to, e.g. /dev/xvde.
- - Before 2.4 this was a required field. Now it can be left to null to auto assign the device name.
- volume:
- type: str
- description:
- - Name or id of the volume to attach/detach
- required: true
- server:
- type: str
- description:
- - Name or id of the server to attach/detach
- required: true
- state:
- type: str
- description:
- - Indicate desired state of the resource
- choices:
- - present
- - absent
- default: present
- wait:
- description:
- - wait for the volume to be in 'in-use'/'available' state before returning
- type: bool
- default: 'no'
- wait_timeout:
- type: int
- description:
- - how long before wait gives up, in seconds
- default: 300
-author:
- - "Christopher H. Laco (@claco)"
- - "Matt Martz (@sivel)"
-extends_documentation_fragment:
-- community.general.rackspace.openstack
-
-'''
-
-EXAMPLES = '''
-- name: Attach a Block Storage Volume
- gather_facts: False
- hosts: local
- connection: local
- tasks:
- - name: Storage volume attach request
- local_action:
- module: rax_cbs_attachments
- credentials: ~/.raxpub
- volume: my-volume
- server: my-server
- device: /dev/xvdd
- region: DFW
- wait: yes
- state: present
- register: my_volume
-'''
-
-try:
- import pyrax
- HAS_PYRAX = True
-except ImportError:
- HAS_PYRAX = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.rax import (NON_CALLABLES,
- rax_argument_spec,
- rax_find_server,
- rax_find_volume,
- rax_required_together,
- rax_to_dict,
- setup_rax_module,
- )
-
-
-def cloud_block_storage_attachments(module, state, volume, server, device,
- wait, wait_timeout):
- cbs = pyrax.cloud_blockstorage
- cs = pyrax.cloudservers
-
- if cbs is None or cs is None:
- module.fail_json(msg='Failed to instantiate client. This '
- 'typically indicates an invalid region or an '
- 'incorrectly capitalized region name.')
-
- changed = False
- instance = {}
-
- volume = rax_find_volume(module, pyrax, volume)
-
- if not volume:
- module.fail_json(msg='No matching storage volumes were found')
-
- if state == 'present':
- server = rax_find_server(module, pyrax, server)
-
- if (volume.attachments and
- volume.attachments[0]['server_id'] == server.id):
- changed = False
- elif volume.attachments:
- module.fail_json(msg='Volume is attached to another server')
- else:
- try:
- volume.attach_to_instance(server, mountpoint=device)
- changed = True
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
-
- volume.get()
-
- for key, value in vars(volume).items():
- if (isinstance(value, NON_CALLABLES) and
- not key.startswith('_')):
- instance[key] = value
-
- result = dict(changed=changed)
-
- if volume.status == 'error':
- result['msg'] = '%s failed to build' % volume.id
- elif wait:
- attempts = wait_timeout // 5
- pyrax.utils.wait_until(volume, 'status', 'in-use',
- interval=5, attempts=attempts)
-
- volume.get()
- result['volume'] = rax_to_dict(volume)
-
- if 'msg' in result:
- module.fail_json(**result)
- else:
- module.exit_json(**result)
-
- elif state == 'absent':
- server = rax_find_server(module, pyrax, server)
-
- if (volume.attachments and
- volume.attachments[0]['server_id'] == server.id):
- try:
- volume.detach()
- if wait:
- pyrax.utils.wait_until(volume, 'status', 'available',
- interval=3, attempts=0,
- verbose=False)
- changed = True
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
-
- volume.get()
- changed = True
- elif volume.attachments:
- module.fail_json(msg='Volume is attached to another server')
-
- result = dict(changed=changed, volume=rax_to_dict(volume))
-
- if volume.status == 'error':
- result['msg'] = '%s failed to build' % volume.id
-
- if 'msg' in result:
- module.fail_json(**result)
- else:
- module.exit_json(**result)
-
- module.exit_json(changed=changed, volume=instance)
-
-
-def main():
- argument_spec = rax_argument_spec()
- argument_spec.update(
- dict(
- device=dict(required=False),
- volume=dict(required=True),
- server=dict(required=True),
- state=dict(default='present', choices=['present', 'absent']),
- wait=dict(type='bool', default=False),
- wait_timeout=dict(type='int', default=300)
- )
- )
-
- module = AnsibleModule(
- argument_spec=argument_spec,
- required_together=rax_required_together()
- )
-
- if not HAS_PYRAX:
- module.fail_json(msg='pyrax is required for this module')
-
- device = module.params.get('device')
- volume = module.params.get('volume')
- server = module.params.get('server')
- state = module.params.get('state')
- wait = module.params.get('wait')
- wait_timeout = module.params.get('wait_timeout')
-
- setup_rax_module(module, pyrax)
-
- cloud_block_storage_attachments(module, state, volume, server, device,
- wait, wait_timeout)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_cdb.py b/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_cdb.py
deleted file mode 100644
index a9c32432..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_cdb.py
+++ /dev/null
@@ -1,259 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: rax_cdb
-short_description: create/delete or resize a Rackspace Cloud Databases instance
-description:
- - creates / deletes or resize a Rackspace Cloud Databases instance
- and optionally waits for it to be 'running'. The name option needs to be
- unique since it's used to identify the instance.
-options:
- name:
- type: str
- description:
- - Name of the databases server instance
- required: yes
- flavor:
- type: int
- description:
- - flavor to use for the instance 1 to 6 (i.e. 512MB to 16GB)
- default: 1
- volume:
- type: int
- description:
- - Volume size of the database 1-150GB
- default: 2
- cdb_type:
- type: str
- description:
- - type of instance (i.e. MySQL, MariaDB, Percona)
- default: MySQL
- aliases: ['type']
- cdb_version:
- type: str
- description:
- - version of database (MySQL supports 5.1 and 5.6, MariaDB supports 10, Percona supports 5.6)
- - "The available choices are: C(5.1), C(5.6) and C(10)."
- default: '5.6'
- aliases: ['version']
- state:
- type: str
- description:
- - Indicate desired state of the resource
- choices: ['present', 'absent']
- default: present
- wait:
- description:
- - wait for the instance to be in state 'running' before returning
- type: bool
- default: 'no'
- wait_timeout:
- type: int
- description:
- - how long before wait gives up, in seconds
- default: 300
-author: "Simon JAILLET (@jails)"
-extends_documentation_fragment:
-- community.general.rackspace
-- community.general.rackspace.openstack
-
-'''
-
-EXAMPLES = '''
-- name: Build a Cloud Databases
- gather_facts: False
- tasks:
- - name: Server build request
- local_action:
- module: rax_cdb
- credentials: ~/.raxpub
- region: IAD
- name: db-server1
- flavor: 1
- volume: 2
- cdb_type: MySQL
- cdb_version: 5.6
- wait: yes
- state: present
- register: rax_db_server
-'''
-
-try:
- import pyrax
- HAS_PYRAX = True
-except ImportError:
- HAS_PYRAX = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, rax_to_dict, setup_rax_module
-
-
-def find_instance(name):
-
- cdb = pyrax.cloud_databases
- instances = cdb.list()
- if instances:
- for instance in instances:
- if instance.name == name:
- return instance
- return False
-
-
-def save_instance(module, name, flavor, volume, cdb_type, cdb_version, wait,
- wait_timeout):
-
- for arg, value in dict(name=name, flavor=flavor,
- volume=volume, type=cdb_type, version=cdb_version
- ).items():
- if not value:
- module.fail_json(msg='%s is required for the "rax_cdb"'
- ' module' % arg)
-
- if not (volume >= 1 and volume <= 150):
- module.fail_json(msg='volume is required to be between 1 and 150')
-
- cdb = pyrax.cloud_databases
-
- flavors = []
- for item in cdb.list_flavors():
- flavors.append(item.id)
-
- if not (flavor in flavors):
- module.fail_json(msg='unexisting flavor reference "%s"' % str(flavor))
-
- changed = False
-
- instance = find_instance(name)
-
- if not instance:
- action = 'create'
- try:
- instance = cdb.create(name=name, flavor=flavor, volume=volume,
- type=cdb_type, version=cdb_version)
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
- else:
- changed = True
-
- else:
- action = None
-
- if instance.volume.size != volume:
- action = 'resize'
- if instance.volume.size > volume:
- module.fail_json(changed=False, action=action,
- msg='The new volume size must be larger than '
- 'the current volume size',
- cdb=rax_to_dict(instance))
- instance.resize_volume(volume)
- changed = True
-
- if int(instance.flavor.id) != flavor:
- action = 'resize'
- pyrax.utils.wait_until(instance, 'status', 'ACTIVE',
- attempts=wait_timeout)
- instance.resize(flavor)
- changed = True
-
- if wait:
- pyrax.utils.wait_until(instance, 'status', 'ACTIVE',
- attempts=wait_timeout)
-
- if wait and instance.status != 'ACTIVE':
- module.fail_json(changed=changed, action=action,
- cdb=rax_to_dict(instance),
- msg='Timeout waiting for "%s" databases instance to '
- 'be created' % name)
-
- module.exit_json(changed=changed, action=action, cdb=rax_to_dict(instance))
-
-
-def delete_instance(module, name, wait, wait_timeout):
-
- if not name:
- module.fail_json(msg='name is required for the "rax_cdb" module')
-
- changed = False
-
- instance = find_instance(name)
- if not instance:
- module.exit_json(changed=False, action='delete')
-
- try:
- instance.delete()
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
- else:
- changed = True
-
- if wait:
- pyrax.utils.wait_until(instance, 'status', 'SHUTDOWN',
- attempts=wait_timeout)
-
- if wait and instance.status != 'SHUTDOWN':
- module.fail_json(changed=changed, action='delete',
- cdb=rax_to_dict(instance),
- msg='Timeout waiting for "%s" databases instance to '
- 'be deleted' % name)
-
- module.exit_json(changed=changed, action='delete',
- cdb=rax_to_dict(instance))
-
-
-def rax_cdb(module, state, name, flavor, volume, cdb_type, cdb_version, wait,
- wait_timeout):
-
- # act on the state
- if state == 'present':
- save_instance(module, name, flavor, volume, cdb_type, cdb_version, wait,
- wait_timeout)
- elif state == 'absent':
- delete_instance(module, name, wait, wait_timeout)
-
-
-def main():
- argument_spec = rax_argument_spec()
- argument_spec.update(
- dict(
- name=dict(type='str', required=True),
- flavor=dict(type='int', default=1),
- volume=dict(type='int', default=2),
- cdb_type=dict(type='str', default='MySQL', aliases=['type']),
- cdb_version=dict(type='str', default='5.6', aliases=['version']),
- state=dict(default='present', choices=['present', 'absent']),
- wait=dict(type='bool', default=False),
- wait_timeout=dict(type='int', default=300),
- )
- )
-
- module = AnsibleModule(
- argument_spec=argument_spec,
- required_together=rax_required_together(),
- )
-
- if not HAS_PYRAX:
- module.fail_json(msg='pyrax is required for this module')
-
- name = module.params.get('name')
- flavor = module.params.get('flavor')
- volume = module.params.get('volume')
- cdb_type = module.params.get('cdb_type')
- cdb_version = module.params.get('cdb_version')
- state = module.params.get('state')
- wait = module.params.get('wait')
- wait_timeout = module.params.get('wait_timeout')
-
- setup_rax_module(module, pyrax)
- rax_cdb(module, state, name, flavor, volume, cdb_type, cdb_version, wait, wait_timeout)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_cdb_database.py b/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_cdb_database.py
deleted file mode 100644
index 86cd1aac..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_cdb_database.py
+++ /dev/null
@@ -1,172 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
-module: rax_cdb_database
-short_description: 'create / delete a database in the Cloud Databases'
-description:
- - create / delete a database in the Cloud Databases.
-options:
- cdb_id:
- type: str
- description:
- - The databases server UUID
- required: yes
- name:
- type: str
- description:
- - Name to give to the database
- required: yes
- character_set:
- type: str
- description:
- - Set of symbols and encodings
- default: 'utf8'
- collate:
- type: str
- description:
- - Set of rules for comparing characters in a character set
- default: 'utf8_general_ci'
- state:
- type: str
- description:
- - Indicate desired state of the resource
- choices: ['present', 'absent']
- default: present
-author: "Simon JAILLET (@jails)"
-extends_documentation_fragment:
-- community.general.rackspace
-- community.general.rackspace.openstack
-
-'''
-
-EXAMPLES = '''
-- name: Build a database in Cloud Databases
- tasks:
- - name: Database build request
- local_action:
- module: rax_cdb_database
- credentials: ~/.raxpub
- region: IAD
- cdb_id: 323e7ce0-9cb0-11e3-a5e2-0800200c9a66
- name: db1
- state: present
- register: rax_db_database
-'''
-
-try:
- import pyrax
- HAS_PYRAX = True
-except ImportError:
- HAS_PYRAX = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, rax_to_dict, setup_rax_module
-
-
-def find_database(instance, name):
- try:
- database = instance.get_database(name)
- except Exception:
- return False
-
- return database
-
-
-def save_database(module, cdb_id, name, character_set, collate):
- cdb = pyrax.cloud_databases
-
- try:
- instance = cdb.get(cdb_id)
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
-
- changed = False
-
- database = find_database(instance, name)
-
- if not database:
- try:
- database = instance.create_database(name=name,
- character_set=character_set,
- collate=collate)
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
- else:
- changed = True
-
- module.exit_json(changed=changed, action='create',
- database=rax_to_dict(database))
-
-
-def delete_database(module, cdb_id, name):
- cdb = pyrax.cloud_databases
-
- try:
- instance = cdb.get(cdb_id)
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
-
- changed = False
-
- database = find_database(instance, name)
-
- if database:
- try:
- database.delete()
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
- else:
- changed = True
-
- module.exit_json(changed=changed, action='delete',
- database=rax_to_dict(database))
-
-
-def rax_cdb_database(module, state, cdb_id, name, character_set, collate):
-
- # act on the state
- if state == 'present':
- save_database(module, cdb_id, name, character_set, collate)
- elif state == 'absent':
- delete_database(module, cdb_id, name)
-
-
-def main():
- argument_spec = rax_argument_spec()
- argument_spec.update(
- dict(
- cdb_id=dict(type='str', required=True),
- name=dict(type='str', required=True),
- character_set=dict(type='str', default='utf8'),
- collate=dict(type='str', default='utf8_general_ci'),
- state=dict(default='present', choices=['present', 'absent'])
- )
- )
-
- module = AnsibleModule(
- argument_spec=argument_spec,
- required_together=rax_required_together(),
- )
-
- if not HAS_PYRAX:
- module.fail_json(msg='pyrax is required for this module')
-
- cdb_id = module.params.get('cdb_id')
- name = module.params.get('name')
- character_set = module.params.get('character_set')
- collate = module.params.get('collate')
- state = module.params.get('state')
-
- setup_rax_module(module, pyrax)
- rax_cdb_database(module, state, cdb_id, name, character_set, collate)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_cdb_user.py b/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_cdb_user.py
deleted file mode 100644
index 674f17c0..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_cdb_user.py
+++ /dev/null
@@ -1,220 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: rax_cdb_user
-short_description: create / delete a Rackspace Cloud Database
-description:
- - create / delete a database in the Cloud Databases.
-options:
- cdb_id:
- type: str
- description:
- - The databases server UUID
- required: yes
- db_username:
- type: str
- description:
- - Name of the database user
- required: yes
- db_password:
- type: str
- description:
- - Database user password
- required: yes
- databases:
- type: list
- elements: str
- description:
- - Name of the databases that the user can access
- default: []
- host:
- type: str
- description:
- - Specifies the host from which a user is allowed to connect to
- the database. Possible values are a string containing an IPv4 address
- or "%" to allow connecting from any host
- default: '%'
- state:
- type: str
- description:
- - Indicate desired state of the resource
- choices: ['present', 'absent']
- default: present
-author: "Simon JAILLET (@jails)"
-extends_documentation_fragment:
-- community.general.rackspace
-- community.general.rackspace.openstack
-
-'''
-
-EXAMPLES = '''
-- name: Build a user in Cloud Databases
- tasks:
- - name: User build request
- local_action:
- module: rax_cdb_user
- credentials: ~/.raxpub
- region: IAD
- cdb_id: 323e7ce0-9cb0-11e3-a5e2-0800200c9a66
- db_username: user1
- db_password: user1
- databases: ['db1']
- state: present
- register: rax_db_user
-'''
-
-try:
- import pyrax
- HAS_PYRAX = True
-except ImportError:
- HAS_PYRAX = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.common.text.converters import to_text
-from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, rax_to_dict, setup_rax_module
-
-
-def find_user(instance, name):
- try:
- user = instance.get_user(name)
- except Exception:
- return False
-
- return user
-
-
-def save_user(module, cdb_id, name, password, databases, host):
-
- for arg, value in dict(cdb_id=cdb_id, name=name).items():
- if not value:
- module.fail_json(msg='%s is required for the "rax_cdb_user" '
- 'module' % arg)
-
- cdb = pyrax.cloud_databases
-
- try:
- instance = cdb.get(cdb_id)
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
-
- changed = False
-
- user = find_user(instance, name)
-
- if not user:
- action = 'create'
- try:
- user = instance.create_user(name=name,
- password=password,
- database_names=databases,
- host=host)
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
- else:
- changed = True
- else:
- action = 'update'
-
- if user.host != host:
- changed = True
-
- user.update(password=password, host=host)
-
- former_dbs = set([item.name for item in user.list_user_access()])
- databases = set(databases)
-
- if databases != former_dbs:
- try:
- revoke_dbs = [db for db in former_dbs if db not in databases]
- user.revoke_user_access(db_names=revoke_dbs)
-
- new_dbs = [db for db in databases if db not in former_dbs]
- user.grant_user_access(db_names=new_dbs)
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
- else:
- changed = True
-
- module.exit_json(changed=changed, action=action, user=rax_to_dict(user))
-
-
-def delete_user(module, cdb_id, name):
-
- for arg, value in dict(cdb_id=cdb_id, name=name).items():
- if not value:
- module.fail_json(msg='%s is required for the "rax_cdb_user"'
- ' module' % arg)
-
- cdb = pyrax.cloud_databases
-
- try:
- instance = cdb.get(cdb_id)
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
-
- changed = False
-
- user = find_user(instance, name)
-
- if user:
- try:
- user.delete()
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
- else:
- changed = True
-
- module.exit_json(changed=changed, action='delete')
-
-
-def rax_cdb_user(module, state, cdb_id, name, password, databases, host):
-
- # act on the state
- if state == 'present':
- save_user(module, cdb_id, name, password, databases, host)
- elif state == 'absent':
- delete_user(module, cdb_id, name)
-
-
-def main():
- argument_spec = rax_argument_spec()
- argument_spec.update(
- dict(
- cdb_id=dict(type='str', required=True),
- db_username=dict(type='str', required=True),
- db_password=dict(type='str', required=True, no_log=True),
- databases=dict(type='list', elements='str', default=[]),
- host=dict(type='str', default='%'),
- state=dict(default='present', choices=['present', 'absent'])
- )
- )
-
- module = AnsibleModule(
- argument_spec=argument_spec,
- required_together=rax_required_together(),
- )
-
- if not HAS_PYRAX:
- module.fail_json(msg='pyrax is required for this module')
-
- cdb_id = module.params.get('cdb_id')
- name = module.params.get('db_username')
- password = module.params.get('db_password')
- databases = module.params.get('databases')
- host = to_text(module.params.get('host'), errors='surrogate_or_strict')
- state = module.params.get('state')
-
- setup_rax_module(module, pyrax)
- rax_cdb_user(module, state, cdb_id, name, password, databases, host)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_clb.py b/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_clb.py
deleted file mode 100644
index 9160133e..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_clb.py
+++ /dev/null
@@ -1,312 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: rax_clb
-short_description: create / delete a load balancer in Rackspace Public Cloud
-description:
- - creates / deletes a Rackspace Public Cloud load balancer.
-options:
- algorithm:
- type: str
- description:
- - algorithm for the balancer being created
- choices:
- - RANDOM
- - LEAST_CONNECTIONS
- - ROUND_ROBIN
- - WEIGHTED_LEAST_CONNECTIONS
- - WEIGHTED_ROUND_ROBIN
- default: LEAST_CONNECTIONS
- meta:
- type: dict
- description:
- - A hash of metadata to associate with the instance
- name:
- type: str
- description:
- - Name to give the load balancer
- required: yes
- port:
- type: int
- description:
- - Port for the balancer being created
- default: 80
- protocol:
- type: str
- description:
- - Protocol for the balancer being created
- choices:
- - DNS_TCP
- - DNS_UDP
- - FTP
- - HTTP
- - HTTPS
- - IMAPS
- - IMAPv4
- - LDAP
- - LDAPS
- - MYSQL
- - POP3
- - POP3S
- - SMTP
- - TCP
- - TCP_CLIENT_FIRST
- - UDP
- - UDP_STREAM
- - SFTP
- default: HTTP
- state:
- type: str
- description:
- - Indicate desired state of the resource
- choices:
- - present
- - absent
- default: present
- timeout:
- type: int
- description:
- - timeout for communication between the balancer and the node
- default: 30
- type:
- type: str
- description:
- - type of interface for the balancer being created
- choices:
- - PUBLIC
- - SERVICENET
- default: PUBLIC
- vip_id:
- type: str
- description:
- - Virtual IP ID to use when creating the load balancer for purposes of
- sharing an IP with another load balancer of another protocol
- wait:
- description:
- - wait for the balancer to be in state 'running' before returning
- type: bool
- default: 'no'
- wait_timeout:
- type: int
- description:
- - how long before wait gives up, in seconds
- default: 300
-author:
- - "Christopher H. Laco (@claco)"
- - "Matt Martz (@sivel)"
-extends_documentation_fragment:
-- community.general.rackspace
-- community.general.rackspace.openstack
-
-'''
-
-EXAMPLES = '''
-- name: Build a Load Balancer
- gather_facts: False
- hosts: local
- connection: local
- tasks:
- - name: Load Balancer create request
- local_action:
- module: rax_clb
- credentials: ~/.raxpub
- name: my-lb
- port: 8080
- protocol: HTTP
- type: SERVICENET
- timeout: 30
- region: DFW
- wait: yes
- state: present
- meta:
- app: my-cool-app
- register: my_lb
-'''
-
-
-try:
- import pyrax
- HAS_PYRAX = True
-except ImportError:
- HAS_PYRAX = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.rax import (CLB_ALGORITHMS,
- CLB_PROTOCOLS,
- rax_argument_spec,
- rax_required_together,
- rax_to_dict,
- setup_rax_module,
- )
-
-
-def cloud_load_balancer(module, state, name, meta, algorithm, port, protocol,
- vip_type, timeout, wait, wait_timeout, vip_id):
- if int(timeout) < 30:
- module.fail_json(msg='"timeout" must be greater than or equal to 30')
-
- changed = False
- balancers = []
-
- clb = pyrax.cloud_loadbalancers
- if not clb:
- module.fail_json(msg='Failed to instantiate client. This '
- 'typically indicates an invalid region or an '
- 'incorrectly capitalized region name.')
-
- balancer_list = clb.list()
- while balancer_list:
- retrieved = clb.list(marker=balancer_list.pop().id)
- balancer_list.extend(retrieved)
- if len(retrieved) < 2:
- break
-
- for balancer in balancer_list:
- if name != balancer.name and name != balancer.id:
- continue
-
- balancers.append(balancer)
-
- if len(balancers) > 1:
- module.fail_json(msg='Multiple Load Balancers were matched by name, '
- 'try using the Load Balancer ID instead')
-
- if state == 'present':
- if isinstance(meta, dict):
- metadata = [dict(key=k, value=v) for k, v in meta.items()]
-
- if not balancers:
- try:
- virtual_ips = [clb.VirtualIP(type=vip_type, id=vip_id)]
- balancer = clb.create(name, metadata=metadata, port=port,
- algorithm=algorithm, protocol=protocol,
- timeout=timeout, virtual_ips=virtual_ips)
- changed = True
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
- else:
- balancer = balancers[0]
- setattr(balancer, 'metadata',
- [dict(key=k, value=v) for k, v in
- balancer.get_metadata().items()])
- atts = {
- 'name': name,
- 'algorithm': algorithm,
- 'port': port,
- 'protocol': protocol,
- 'timeout': timeout
- }
- for att, value in atts.items():
- current = getattr(balancer, att)
- if current != value:
- changed = True
-
- if changed:
- balancer.update(**atts)
-
- if balancer.metadata != metadata:
- balancer.set_metadata(meta)
- changed = True
-
- virtual_ips = [clb.VirtualIP(type=vip_type)]
- current_vip_types = set([v.type for v in balancer.virtual_ips])
- vip_types = set([v.type for v in virtual_ips])
- if current_vip_types != vip_types:
- module.fail_json(msg='Load balancer Virtual IP type cannot '
- 'be changed')
-
- if wait:
- attempts = wait_timeout // 5
- pyrax.utils.wait_for_build(balancer, interval=5, attempts=attempts)
-
- balancer.get()
- instance = rax_to_dict(balancer, 'clb')
-
- result = dict(changed=changed, balancer=instance)
-
- if balancer.status == 'ERROR':
- result['msg'] = '%s failed to build' % balancer.id
- elif wait and balancer.status not in ('ACTIVE', 'ERROR'):
- result['msg'] = 'Timeout waiting on %s' % balancer.id
-
- if 'msg' in result:
- module.fail_json(**result)
- else:
- module.exit_json(**result)
-
- elif state == 'absent':
- if balancers:
- balancer = balancers[0]
- try:
- balancer.delete()
- changed = True
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
-
- instance = rax_to_dict(balancer, 'clb')
-
- if wait:
- attempts = wait_timeout // 5
- pyrax.utils.wait_until(balancer, 'status', ('DELETED'),
- interval=5, attempts=attempts)
- else:
- instance = {}
-
- module.exit_json(changed=changed, balancer=instance)
-
-
-def main():
- argument_spec = rax_argument_spec()
- argument_spec.update(
- dict(
- algorithm=dict(choices=CLB_ALGORITHMS,
- default='LEAST_CONNECTIONS'),
- meta=dict(type='dict', default={}),
- name=dict(required=True),
- port=dict(type='int', default=80),
- protocol=dict(choices=CLB_PROTOCOLS, default='HTTP'),
- state=dict(default='present', choices=['present', 'absent']),
- timeout=dict(type='int', default=30),
- type=dict(choices=['PUBLIC', 'SERVICENET'], default='PUBLIC'),
- vip_id=dict(),
- wait=dict(type='bool', default=False),
- wait_timeout=dict(type='int', default=300),
- )
- )
-
- module = AnsibleModule(
- argument_spec=argument_spec,
- required_together=rax_required_together(),
- )
-
- if not HAS_PYRAX:
- module.fail_json(msg='pyrax is required for this module')
-
- algorithm = module.params.get('algorithm')
- meta = module.params.get('meta')
- name = module.params.get('name')
- port = module.params.get('port')
- protocol = module.params.get('protocol')
- state = module.params.get('state')
- timeout = int(module.params.get('timeout'))
- vip_id = module.params.get('vip_id')
- vip_type = module.params.get('type')
- wait = module.params.get('wait')
- wait_timeout = int(module.params.get('wait_timeout'))
-
- setup_rax_module(module, pyrax)
-
- cloud_load_balancer(module, state, name, meta, algorithm, port, protocol,
- vip_type, timeout, wait, wait_timeout, vip_id)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_clb_nodes.py b/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_clb_nodes.py
deleted file mode 100644
index 4adcc66f..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_clb_nodes.py
+++ /dev/null
@@ -1,283 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: rax_clb_nodes
-short_description: add, modify and remove nodes from a Rackspace Cloud Load Balancer
-description:
- - Adds, modifies and removes nodes from a Rackspace Cloud Load Balancer
-options:
- address:
- type: str
- required: false
- description:
- - IP address or domain name of the node
- condition:
- type: str
- required: false
- choices:
- - enabled
- - disabled
- - draining
- description:
- - Condition for the node, which determines its role within the load
- balancer
- load_balancer_id:
- type: int
- required: true
- description:
- - Load balancer id
- node_id:
- type: int
- required: false
- description:
- - Node id
- port:
- type: int
- required: false
- description:
- - Port number of the load balanced service on the node
- state:
- type: str
- required: false
- default: "present"
- choices:
- - present
- - absent
- description:
- - Indicate desired state of the node
- type:
- type: str
- required: false
- choices:
- - primary
- - secondary
- description:
- - Type of node
- wait:
- required: false
- default: "no"
- type: bool
- description:
- - Wait for the load balancer to become active before returning
- wait_timeout:
- type: int
- required: false
- default: 30
- description:
- - How long to wait before giving up and returning an error
- weight:
- type: int
- required: false
- description:
- - Weight of node
- virtualenv:
- type: path
- description:
- - Virtualenv to execute this module in
-author: "Lukasz Kawczynski (@neuroid)"
-extends_documentation_fragment:
-- community.general.rackspace
-- community.general.rackspace.openstack
-
-'''
-
-EXAMPLES = '''
-- name: Add a new node to the load balancer
- local_action:
- module: rax_clb_nodes
- load_balancer_id: 71
- address: 10.2.2.3
- port: 80
- condition: enabled
- type: primary
- wait: yes
- credentials: /path/to/credentials
-
-- name: Drain connections from a node
- local_action:
- module: rax_clb_nodes
- load_balancer_id: 71
- node_id: 410
- condition: draining
- wait: yes
- credentials: /path/to/credentials
-
-- name: Remove a node from the load balancer
- local_action:
- module: rax_clb_nodes
- load_balancer_id: 71
- node_id: 410
- state: absent
- wait: yes
- credentials: /path/to/credentials
-'''
-
-import os
-
-try:
- import pyrax
- HAS_PYRAX = True
-except ImportError:
- HAS_PYRAX = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_clb_node_to_dict, rax_required_together, setup_rax_module
-
-
-def _activate_virtualenv(path):
- activate_this = os.path.join(path, 'bin', 'activate_this.py')
- with open(activate_this) as f:
- code = compile(f.read(), activate_this, 'exec')
- exec(code)
-
-
-def _get_node(lb, node_id=None, address=None, port=None):
- """Return a matching node"""
- for node in getattr(lb, 'nodes', []):
- match_list = []
- if node_id is not None:
- match_list.append(getattr(node, 'id', None) == node_id)
- if address is not None:
- match_list.append(getattr(node, 'address', None) == address)
- if port is not None:
- match_list.append(getattr(node, 'port', None) == port)
-
- if match_list and all(match_list):
- return node
-
- return None
-
-
-def main():
- argument_spec = rax_argument_spec()
- argument_spec.update(
- dict(
- address=dict(),
- condition=dict(choices=['enabled', 'disabled', 'draining']),
- load_balancer_id=dict(required=True, type='int'),
- node_id=dict(type='int'),
- port=dict(type='int'),
- state=dict(default='present', choices=['present', 'absent']),
- type=dict(choices=['primary', 'secondary']),
- virtualenv=dict(type='path'),
- wait=dict(default=False, type='bool'),
- wait_timeout=dict(default=30, type='int'),
- weight=dict(type='int'),
- )
- )
-
- module = AnsibleModule(
- argument_spec=argument_spec,
- required_together=rax_required_together(),
- )
-
- if not HAS_PYRAX:
- module.fail_json(msg='pyrax is required for this module')
-
- address = module.params['address']
- condition = (module.params['condition'] and
- module.params['condition'].upper())
- load_balancer_id = module.params['load_balancer_id']
- node_id = module.params['node_id']
- port = module.params['port']
- state = module.params['state']
- typ = module.params['type'] and module.params['type'].upper()
- virtualenv = module.params['virtualenv']
- wait = module.params['wait']
- wait_timeout = module.params['wait_timeout'] or 1
- weight = module.params['weight']
-
- if virtualenv:
- try:
- _activate_virtualenv(virtualenv)
- except IOError as e:
- module.fail_json(msg='Failed to activate virtualenv %s (%s)' % (
- virtualenv, e))
-
- setup_rax_module(module, pyrax)
-
- if not pyrax.cloud_loadbalancers:
- module.fail_json(msg='Failed to instantiate client. This '
- 'typically indicates an invalid region or an '
- 'incorrectly capitalized region name.')
-
- try:
- lb = pyrax.cloud_loadbalancers.get(load_balancer_id)
- except pyrax.exc.PyraxException as e:
- module.fail_json(msg='%s' % e.message)
-
- node = _get_node(lb, node_id, address, port)
-
- result = rax_clb_node_to_dict(node)
-
- if state == 'absent':
- if not node: # Removing a non-existent node
- module.exit_json(changed=False, state=state)
- try:
- lb.delete_node(node)
- result = {}
- except pyrax.exc.NotFound:
- module.exit_json(changed=False, state=state)
- except pyrax.exc.PyraxException as e:
- module.fail_json(msg='%s' % e.message)
- else: # present
- if not node:
- if node_id: # Updating a non-existent node
- msg = 'Node %d not found' % node_id
- if lb.nodes:
- msg += (' (available nodes: %s)' %
- ', '.join([str(x.id) for x in lb.nodes]))
- module.fail_json(msg=msg)
- else: # Creating a new node
- try:
- node = pyrax.cloudloadbalancers.Node(
- address=address, port=port, condition=condition,
- weight=weight, type=typ)
- resp, body = lb.add_nodes([node])
- result.update(body['nodes'][0])
- except pyrax.exc.PyraxException as e:
- module.fail_json(msg='%s' % e.message)
- else: # Updating an existing node
- mutable = {
- 'condition': condition,
- 'type': typ,
- 'weight': weight,
- }
-
- for name, value in mutable.items():
- if value is None or value == getattr(node, name):
- mutable.pop(name)
-
- if not mutable:
- module.exit_json(changed=False, state=state, node=result)
-
- try:
- # The diff has to be set explicitly to update node's weight and
- # type; this should probably be fixed in pyrax
- lb.update_node(node, diff=mutable)
- result.update(mutable)
- except pyrax.exc.PyraxException as e:
- module.fail_json(msg='%s' % e.message)
-
- if wait:
- pyrax.utils.wait_until(lb, "status", "ACTIVE", interval=1,
- attempts=wait_timeout)
- if lb.status != 'ACTIVE':
- module.fail_json(
- msg='Load balancer not active after %ds (current status: %s)' %
- (wait_timeout, lb.status.lower()))
-
- kwargs = {'node': result} if result else {}
- module.exit_json(changed=True, state=state, **kwargs)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_clb_ssl.py b/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_clb_ssl.py
deleted file mode 100644
index adf37512..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_clb_ssl.py
+++ /dev/null
@@ -1,282 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
-module: rax_clb_ssl
-short_description: Manage SSL termination for a Rackspace Cloud Load Balancer.
-description:
-- Set up, reconfigure, or remove SSL termination for an existing load balancer.
-options:
- loadbalancer:
- type: str
- description:
- - Name or ID of the load balancer on which to manage SSL termination.
- required: true
- state:
- type: str
- description:
- - If set to "present", SSL termination will be added to this load balancer.
- - If "absent", SSL termination will be removed instead.
- choices:
- - present
- - absent
- default: present
- enabled:
- description:
- - If set to "false", temporarily disable SSL termination without discarding
- - existing credentials.
- default: true
- type: bool
- private_key:
- type: str
- description:
- - The private SSL key as a string in PEM format.
- certificate:
- type: str
- description:
- - The public SSL certificates as a string in PEM format.
- intermediate_certificate:
- type: str
- description:
- - One or more intermediate certificate authorities as a string in PEM
- - format, concatenated into a single string.
- secure_port:
- type: int
- description:
- - The port to listen for secure traffic.
- default: 443
- secure_traffic_only:
- description:
- - If "true", the load balancer will *only* accept secure traffic.
- default: false
- type: bool
- https_redirect:
- description:
- - If "true", the load balancer will redirect HTTP traffic to HTTPS.
- - Requires "secure_traffic_only" to be true. Incurs an implicit wait if SSL
- - termination is also applied or removed.
- type: bool
- wait:
- description:
- - Wait for the balancer to be in state "running" before turning.
- default: false
- type: bool
- wait_timeout:
- type: int
- description:
- - How long before "wait" gives up, in seconds.
- default: 300
-author: Ash Wilson (@smashwilson)
-extends_documentation_fragment:
-- community.general.rackspace
-- community.general.rackspace.openstack
-
-'''
-
-EXAMPLES = '''
-- name: Enable SSL termination on a load balancer
- community.general.rax_clb_ssl:
- loadbalancer: the_loadbalancer
- state: present
- private_key: "{{ lookup('file', 'credentials/server.key' ) }}"
- certificate: "{{ lookup('file', 'credentials/server.crt' ) }}"
- intermediate_certificate: "{{ lookup('file', 'credentials/trust-chain.crt') }}"
- secure_traffic_only: true
- wait: true
-
-- name: Disable SSL termination
- community.general.rax_clb_ssl:
- loadbalancer: "{{ registered_lb.balancer.id }}"
- state: absent
- wait: true
-'''
-
-try:
- import pyrax
- HAS_PYRAX = True
-except ImportError:
- HAS_PYRAX = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec,
- rax_find_loadbalancer,
- rax_required_together,
- rax_to_dict,
- setup_rax_module,
- )
-
-
-def cloud_load_balancer_ssl(module, loadbalancer, state, enabled, private_key,
- certificate, intermediate_certificate, secure_port,
- secure_traffic_only, https_redirect,
- wait, wait_timeout):
- # Validate arguments.
-
- if state == 'present':
- if not private_key:
- module.fail_json(msg="private_key must be provided.")
- else:
- private_key = private_key.strip()
-
- if not certificate:
- module.fail_json(msg="certificate must be provided.")
- else:
- certificate = certificate.strip()
-
- attempts = wait_timeout // 5
-
- # Locate the load balancer.
-
- balancer = rax_find_loadbalancer(module, pyrax, loadbalancer)
- existing_ssl = balancer.get_ssl_termination()
-
- changed = False
-
- if state == 'present':
- # Apply or reconfigure SSL termination on the load balancer.
- ssl_attrs = dict(
- securePort=secure_port,
- privatekey=private_key,
- certificate=certificate,
- intermediateCertificate=intermediate_certificate,
- enabled=enabled,
- secureTrafficOnly=secure_traffic_only
- )
-
- needs_change = False
-
- if existing_ssl:
- for ssl_attr, value in ssl_attrs.items():
- if ssl_attr == 'privatekey':
- # The private key is not included in get_ssl_termination's
- # output (as it shouldn't be). Also, if you're changing the
- # private key, you'll also be changing the certificate,
- # so we don't lose anything by not checking it.
- continue
-
- if value is not None and existing_ssl.get(ssl_attr) != value:
- # module.fail_json(msg='Unnecessary change', attr=ssl_attr, value=value, existing=existing_ssl.get(ssl_attr))
- needs_change = True
- else:
- needs_change = True
-
- if needs_change:
- try:
- balancer.add_ssl_termination(**ssl_attrs)
- except pyrax.exceptions.PyraxException as e:
- module.fail_json(msg='%s' % e.message)
- changed = True
- elif state == 'absent':
- # Remove SSL termination if it's already configured.
- if existing_ssl:
- try:
- balancer.delete_ssl_termination()
- except pyrax.exceptions.PyraxException as e:
- module.fail_json(msg='%s' % e.message)
- changed = True
-
- if https_redirect is not None and balancer.httpsRedirect != https_redirect:
- if changed:
- # This wait is unavoidable because load balancers are immutable
- # while the SSL termination changes above are being applied.
- pyrax.utils.wait_for_build(balancer, interval=5, attempts=attempts)
-
- try:
- balancer.update(httpsRedirect=https_redirect)
- except pyrax.exceptions.PyraxException as e:
- module.fail_json(msg='%s' % e.message)
- changed = True
-
- if changed and wait:
- pyrax.utils.wait_for_build(balancer, interval=5, attempts=attempts)
-
- balancer.get()
- new_ssl_termination = balancer.get_ssl_termination()
-
- # Intentionally omit the private key from the module output, so you don't
- # accidentally echo it with `ansible-playbook -v` or `debug`, and the
- # certificate, which is just long. Convert other attributes to snake_case
- # and include https_redirect at the top-level.
- if new_ssl_termination:
- new_ssl = dict(
- enabled=new_ssl_termination['enabled'],
- secure_port=new_ssl_termination['securePort'],
- secure_traffic_only=new_ssl_termination['secureTrafficOnly']
- )
- else:
- new_ssl = None
-
- result = dict(
- changed=changed,
- https_redirect=balancer.httpsRedirect,
- ssl_termination=new_ssl,
- balancer=rax_to_dict(balancer, 'clb')
- )
- success = True
-
- if balancer.status == 'ERROR':
- result['msg'] = '%s failed to build' % balancer.id
- success = False
- elif wait and balancer.status not in ('ACTIVE', 'ERROR'):
- result['msg'] = 'Timeout waiting on %s' % balancer.id
- success = False
-
- if success:
- module.exit_json(**result)
- else:
- module.fail_json(**result)
-
-
-def main():
- argument_spec = rax_argument_spec()
- argument_spec.update(dict(
- loadbalancer=dict(required=True),
- state=dict(default='present', choices=['present', 'absent']),
- enabled=dict(type='bool', default=True),
- private_key=dict(no_log=True),
- certificate=dict(),
- intermediate_certificate=dict(),
- secure_port=dict(type='int', default=443),
- secure_traffic_only=dict(type='bool', default=False),
- https_redirect=dict(type='bool'),
- wait=dict(type='bool', default=False),
- wait_timeout=dict(type='int', default=300)
- ))
-
- module = AnsibleModule(
- argument_spec=argument_spec,
- required_together=rax_required_together(),
- )
-
- if not HAS_PYRAX:
- module.fail_json(msg='pyrax is required for this module.')
-
- loadbalancer = module.params.get('loadbalancer')
- state = module.params.get('state')
- enabled = module.boolean(module.params.get('enabled'))
- private_key = module.params.get('private_key')
- certificate = module.params.get('certificate')
- intermediate_certificate = module.params.get('intermediate_certificate')
- secure_port = module.params.get('secure_port')
- secure_traffic_only = module.boolean(module.params.get('secure_traffic_only'))
- https_redirect = module.boolean(module.params.get('https_redirect'))
- wait = module.boolean(module.params.get('wait'))
- wait_timeout = module.params.get('wait_timeout')
-
- setup_rax_module(module, pyrax)
-
- cloud_load_balancer_ssl(
- module, loadbalancer, state, enabled, private_key, certificate,
- intermediate_certificate, secure_port, secure_traffic_only,
- https_redirect, wait, wait_timeout
- )
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_dns.py b/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_dns.py
deleted file mode 100644
index 915e13a9..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_dns.py
+++ /dev/null
@@ -1,173 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: rax_dns
-short_description: Manage domains on Rackspace Cloud DNS
-description:
- - Manage domains on Rackspace Cloud DNS
-options:
- comment:
- type: str
- description:
- - Brief description of the domain. Maximum length of 160 characters
- email:
- type: str
- description:
- - Email address of the domain administrator
- name:
- type: str
- description:
- - Domain name to create
- state:
- type: str
- description:
- - Indicate desired state of the resource
- choices:
- - present
- - absent
- default: present
- ttl:
- type: int
- description:
- - Time to live of domain in seconds
- default: 3600
-notes:
- - "It is recommended that plays utilizing this module be run with
- C(serial: 1) to avoid exceeding the API request limit imposed by
- the Rackspace CloudDNS API"
-author: "Matt Martz (@sivel)"
-extends_documentation_fragment:
-- community.general.rackspace
-- community.general.rackspace.openstack
-
-'''
-
-EXAMPLES = '''
-- name: Create domain
- hosts: all
- gather_facts: False
- tasks:
- - name: Domain create request
- local_action:
- module: rax_dns
- credentials: ~/.raxpub
- name: example.org
- email: admin@example.org
- register: rax_dns
-'''
-
-try:
- import pyrax
- HAS_PYRAX = True
-except ImportError:
- HAS_PYRAX = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec,
- rax_required_together,
- rax_to_dict,
- setup_rax_module,
- )
-
-
-def rax_dns(module, comment, email, name, state, ttl):
- changed = False
-
- dns = pyrax.cloud_dns
- if not dns:
- module.fail_json(msg='Failed to instantiate client. This '
- 'typically indicates an invalid region or an '
- 'incorrectly capitalized region name.')
-
- if state == 'present':
- if not email:
- module.fail_json(msg='An "email" attribute is required for '
- 'creating a domain')
-
- try:
- domain = dns.find(name=name)
- except pyrax.exceptions.NoUniqueMatch as e:
- module.fail_json(msg='%s' % e.message)
- except pyrax.exceptions.NotFound:
- try:
- domain = dns.create(name=name, emailAddress=email, ttl=ttl,
- comment=comment)
- changed = True
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
-
- update = {}
- if comment != getattr(domain, 'comment', None):
- update['comment'] = comment
- if ttl != getattr(domain, 'ttl', None):
- update['ttl'] = ttl
- if email != getattr(domain, 'emailAddress', None):
- update['emailAddress'] = email
-
- if update:
- try:
- domain.update(**update)
- changed = True
- domain.get()
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
-
- elif state == 'absent':
- try:
- domain = dns.find(name=name)
- except pyrax.exceptions.NotFound:
- domain = {}
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
-
- if domain:
- try:
- domain.delete()
- changed = True
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
-
- module.exit_json(changed=changed, domain=rax_to_dict(domain))
-
-
-def main():
- argument_spec = rax_argument_spec()
- argument_spec.update(
- dict(
- comment=dict(),
- email=dict(),
- name=dict(),
- state=dict(default='present', choices=['present', 'absent']),
- ttl=dict(type='int', default=3600),
- )
- )
-
- module = AnsibleModule(
- argument_spec=argument_spec,
- required_together=rax_required_together(),
- )
-
- if not HAS_PYRAX:
- module.fail_json(msg='pyrax is required for this module')
-
- comment = module.params.get('comment')
- email = module.params.get('email')
- name = module.params.get('name')
- state = module.params.get('state')
- ttl = module.params.get('ttl')
-
- setup_rax_module(module, pyrax, False)
-
- rax_dns(module, comment, email, name, state, ttl)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_dns_record.py b/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_dns_record.py
deleted file mode 100644
index 1a6986de..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_dns_record.py
+++ /dev/null
@@ -1,353 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: rax_dns_record
-short_description: Manage DNS records on Rackspace Cloud DNS
-description:
- - Manage DNS records on Rackspace Cloud DNS
-options:
- comment:
- type: str
- description:
- - Brief description of the domain. Maximum length of 160 characters
- data:
- type: str
- description:
- - IP address for A/AAAA record, FQDN for CNAME/MX/NS, or text data for
- SRV/TXT
- required: True
- domain:
- type: str
- description:
- - Domain name to create the record in. This is an invalid option when
- type=PTR
- loadbalancer:
- type: str
- description:
- - Load Balancer ID to create a PTR record for. Only used with type=PTR
- name:
- type: str
- description:
- - FQDN record name to create
- required: True
- overwrite:
- description:
- - Add new records if data doesn't match, instead of updating existing
- record with matching name. If there are already multiple records with
- matching name and overwrite=true, this module will fail.
- default: true
- type: bool
- priority:
- type: int
- description:
- - Required for MX and SRV records, but forbidden for other record types.
- If specified, must be an integer from 0 to 65535.
- server:
- type: str
- description:
- - Server ID to create a PTR record for. Only used with type=PTR
- state:
- type: str
- description:
- - Indicate desired state of the resource
- choices:
- - present
- - absent
- default: present
- ttl:
- type: int
- description:
- - Time to live of record in seconds
- default: 3600
- type:
- type: str
- description:
- - DNS record type
- choices:
- - A
- - AAAA
- - CNAME
- - MX
- - NS
- - SRV
- - TXT
- - PTR
- required: true
-notes:
- - "It is recommended that plays utilizing this module be run with
- C(serial: 1) to avoid exceeding the API request limit imposed by
- the Rackspace CloudDNS API"
- - To manipulate a C(PTR) record either C(loadbalancer) or C(server) must be
- supplied
- - As of version 1.7, the C(type) field is required and no longer defaults to an C(A) record.
- - C(PTR) record support was added in version 1.7
-author: "Matt Martz (@sivel)"
-extends_documentation_fragment:
-- community.general.rackspace
-- community.general.rackspace.openstack
-
-'''
-
-EXAMPLES = '''
-- name: Create DNS Records
- hosts: all
- gather_facts: False
- tasks:
- - name: Create A record
- local_action:
- module: rax_dns_record
- credentials: ~/.raxpub
- domain: example.org
- name: www.example.org
- data: "{{ rax_accessipv4 }}"
- type: A
- register: a_record
-
- - name: Create PTR record
- local_action:
- module: rax_dns_record
- credentials: ~/.raxpub
- server: "{{ rax_id }}"
- name: "{{ inventory_hostname }}"
- region: DFW
- register: ptr_record
-'''
-
-try:
- import pyrax
- HAS_PYRAX = True
-except ImportError:
- HAS_PYRAX = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec,
- rax_find_loadbalancer,
- rax_find_server,
- rax_required_together,
- rax_to_dict,
- setup_rax_module,
- )
-
-
-def rax_dns_record_ptr(module, data=None, comment=None, loadbalancer=None,
- name=None, server=None, state='present', ttl=7200):
- changed = False
- results = []
-
- dns = pyrax.cloud_dns
-
- if not dns:
- module.fail_json(msg='Failed to instantiate client. This '
- 'typically indicates an invalid region or an '
- 'incorrectly capitalized region name.')
-
- if loadbalancer:
- item = rax_find_loadbalancer(module, pyrax, loadbalancer)
- elif server:
- item = rax_find_server(module, pyrax, server)
-
- if state == 'present':
- current = dns.list_ptr_records(item)
- for record in current:
- if record.data == data:
- if record.ttl != ttl or record.name != name:
- try:
- dns.update_ptr_record(item, record, name, data, ttl)
- changed = True
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
- record.ttl = ttl
- record.name = name
- results.append(rax_to_dict(record))
- break
- else:
- results.append(rax_to_dict(record))
- break
-
- if not results:
- record = dict(name=name, type='PTR', data=data, ttl=ttl,
- comment=comment)
- try:
- results = dns.add_ptr_records(item, [record])
- changed = True
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
-
- module.exit_json(changed=changed, records=results)
-
- elif state == 'absent':
- current = dns.list_ptr_records(item)
- for record in current:
- if record.data == data:
- results.append(rax_to_dict(record))
- break
-
- if results:
- try:
- dns.delete_ptr_records(item, data)
- changed = True
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
-
- module.exit_json(changed=changed, records=results)
-
-
-def rax_dns_record(module, comment=None, data=None, domain=None, name=None,
- overwrite=True, priority=None, record_type='A',
- state='present', ttl=7200):
- """Function for manipulating record types other than PTR"""
-
- changed = False
-
- dns = pyrax.cloud_dns
- if not dns:
- module.fail_json(msg='Failed to instantiate client. This '
- 'typically indicates an invalid region or an '
- 'incorrectly capitalized region name.')
-
- if state == 'present':
- if not priority and record_type in ['MX', 'SRV']:
- module.fail_json(msg='A "priority" attribute is required for '
- 'creating a MX or SRV record')
-
- try:
- domain = dns.find(name=domain)
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
-
- try:
- if overwrite:
- record = domain.find_record(record_type, name=name)
- else:
- record = domain.find_record(record_type, name=name, data=data)
- except pyrax.exceptions.DomainRecordNotUnique as e:
- module.fail_json(msg='overwrite=true and there are multiple matching records')
- except pyrax.exceptions.DomainRecordNotFound as e:
- try:
- record_data = {
- 'type': record_type,
- 'name': name,
- 'data': data,
- 'ttl': ttl
- }
- if comment:
- record_data.update(dict(comment=comment))
- if priority and record_type.upper() in ['MX', 'SRV']:
- record_data.update(dict(priority=priority))
-
- record = domain.add_records([record_data])[0]
- changed = True
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
-
- update = {}
- if comment != getattr(record, 'comment', None):
- update['comment'] = comment
- if ttl != getattr(record, 'ttl', None):
- update['ttl'] = ttl
- if priority != getattr(record, 'priority', None):
- update['priority'] = priority
- if data != getattr(record, 'data', None):
- update['data'] = data
-
- if update:
- try:
- record.update(**update)
- changed = True
- record.get()
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
-
- elif state == 'absent':
- try:
- domain = dns.find(name=domain)
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
-
- try:
- record = domain.find_record(record_type, name=name, data=data)
- except pyrax.exceptions.DomainRecordNotFound as e:
- record = {}
- except pyrax.exceptions.DomainRecordNotUnique as e:
- module.fail_json(msg='%s' % e.message)
-
- if record:
- try:
- record.delete()
- changed = True
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
-
- module.exit_json(changed=changed, record=rax_to_dict(record))
-
-
-def main():
- argument_spec = rax_argument_spec()
- argument_spec.update(
- dict(
- comment=dict(),
- data=dict(required=True),
- domain=dict(),
- loadbalancer=dict(),
- name=dict(required=True),
- overwrite=dict(type='bool', default=True),
- priority=dict(type='int'),
- server=dict(),
- state=dict(default='present', choices=['present', 'absent']),
- ttl=dict(type='int', default=3600),
- type=dict(required=True, choices=['A', 'AAAA', 'CNAME', 'MX', 'NS',
- 'SRV', 'TXT', 'PTR'])
- )
- )
-
- module = AnsibleModule(
- argument_spec=argument_spec,
- required_together=rax_required_together(),
- mutually_exclusive=[
- ['server', 'loadbalancer', 'domain'],
- ],
- required_one_of=[
- ['server', 'loadbalancer', 'domain'],
- ],
- )
-
- if not HAS_PYRAX:
- module.fail_json(msg='pyrax is required for this module')
-
- comment = module.params.get('comment')
- data = module.params.get('data')
- domain = module.params.get('domain')
- loadbalancer = module.params.get('loadbalancer')
- name = module.params.get('name')
- overwrite = module.params.get('overwrite')
- priority = module.params.get('priority')
- server = module.params.get('server')
- state = module.params.get('state')
- ttl = module.params.get('ttl')
- record_type = module.params.get('type')
-
- setup_rax_module(module, pyrax, False)
-
- if record_type.upper() == 'PTR':
- if not server and not loadbalancer:
- module.fail_json(msg='one of the following is required: '
- 'server,loadbalancer')
- rax_dns_record_ptr(module, data=data, comment=comment,
- loadbalancer=loadbalancer, name=name, server=server,
- state=state, ttl=ttl)
- else:
- rax_dns_record(module, comment=comment, data=data, domain=domain,
- name=name, overwrite=overwrite, priority=priority,
- record_type=record_type, state=state, ttl=ttl)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_facts.py b/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_facts.py
deleted file mode 100644
index 0288a5e3..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_facts.py
+++ /dev/null
@@ -1,144 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: rax_facts
-short_description: Gather facts for Rackspace Cloud Servers
-description:
- - Gather facts for Rackspace Cloud Servers.
-options:
- address:
- type: str
- description:
- - Server IP address to retrieve facts for, will match any IP assigned to
- the server
- id:
- type: str
- description:
- - Server ID to retrieve facts for
- name:
- type: str
- description:
- - Server name to retrieve facts for
-author: "Matt Martz (@sivel)"
-extends_documentation_fragment:
-- community.general.rackspace.openstack
-
-'''
-
-EXAMPLES = '''
-- name: Gather info about servers
- hosts: all
- gather_facts: False
- tasks:
- - name: Get facts about servers
- local_action:
- module: rax_facts
- credentials: ~/.raxpub
- name: "{{ inventory_hostname }}"
- region: DFW
- - name: Map some facts
- ansible.builtin.set_fact:
- ansible_ssh_host: "{{ rax_accessipv4 }}"
-'''
-
-try:
- import pyrax
- HAS_PYRAX = True
-except ImportError:
- HAS_PYRAX = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec,
- rax_required_together,
- rax_to_dict,
- setup_rax_module,
- )
-
-
-def rax_facts(module, address, name, server_id):
- changed = False
-
- cs = pyrax.cloudservers
-
- if cs is None:
- module.fail_json(msg='Failed to instantiate client. This '
- 'typically indicates an invalid region or an '
- 'incorrectly capitalized region name.')
-
- ansible_facts = {}
-
- search_opts = {}
- if name:
- search_opts = dict(name='^%s$' % name)
- try:
- servers = cs.servers.list(search_opts=search_opts)
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
- elif address:
- servers = []
- try:
- for server in cs.servers.list():
- for addresses in server.networks.values():
- if address in addresses:
- servers.append(server)
- break
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
- elif server_id:
- servers = []
- try:
- servers.append(cs.servers.get(server_id))
- except Exception as e:
- pass
-
- servers[:] = [server for server in servers if server.status != "DELETED"]
-
- if len(servers) > 1:
- module.fail_json(msg='Multiple servers found matching provided '
- 'search parameters')
- elif len(servers) == 1:
- ansible_facts = rax_to_dict(servers[0], 'server')
-
- module.exit_json(changed=changed, ansible_facts=ansible_facts)
-
-
-def main():
- argument_spec = rax_argument_spec()
- argument_spec.update(
- dict(
- address=dict(),
- id=dict(),
- name=dict(),
- )
- )
-
- module = AnsibleModule(
- argument_spec=argument_spec,
- required_together=rax_required_together(),
- mutually_exclusive=[['address', 'id', 'name']],
- required_one_of=[['address', 'id', 'name']],
- supports_check_mode=True,
- )
-
- if not HAS_PYRAX:
- module.fail_json(msg='pyrax is required for this module')
-
- address = module.params.get('address')
- server_id = module.params.get('id')
- name = module.params.get('name')
-
- setup_rax_module(module, pyrax)
-
- rax_facts(module, address, name, server_id)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_files.py b/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_files.py
deleted file mode 100644
index 1e1f82c8..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_files.py
+++ /dev/null
@@ -1,393 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# (c) 2013, Paul Durivage
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: rax_files
-short_description: Manipulate Rackspace Cloud Files Containers
-description:
- - Manipulate Rackspace Cloud Files Containers
-options:
- clear_meta:
- description:
- - Optionally clear existing metadata when applying metadata to existing containers.
- Selecting this option is only appropriate when setting type=meta
- type: bool
- default: "no"
- container:
- type: str
- description:
- - The container to use for container or metadata operations.
- meta:
- type: dict
- description:
- - A hash of items to set as metadata values on a container
- private:
- description:
- - Used to set a container as private, removing it from the CDN. B(Warning!)
- Private containers, if previously made public, can have live objects
- available until the TTL on cached objects expires
- type: bool
- default: false
- public:
- description:
- - Used to set a container as public, available via the Cloud Files CDN
- type: bool
- default: false
- region:
- type: str
- description:
- - Region to create an instance in
- state:
- type: str
- description:
- - Indicate desired state of the resource
- choices: ['present', 'absent', 'list']
- default: present
- ttl:
- type: int
- description:
- - In seconds, set a container-wide TTL for all objects cached on CDN edge nodes.
- Setting a TTL is only appropriate for containers that are public
- type:
- type: str
- description:
- - Type of object to do work on, i.e. metadata object or a container object
- choices:
- - container
- - meta
- default: container
- web_error:
- type: str
- description:
- - Sets an object to be presented as the HTTP error page when accessed by the CDN URL
- web_index:
- type: str
- description:
- - Sets an object to be presented as the HTTP index page when accessed by the CDN URL
-author: "Paul Durivage (@angstwad)"
-extends_documentation_fragment:
-- community.general.rackspace
-- community.general.rackspace.openstack
-
-'''
-
-EXAMPLES = '''
-- name: "Test Cloud Files Containers"
- hosts: local
- gather_facts: no
- tasks:
- - name: "List all containers"
- community.general.rax_files:
- state: list
-
- - name: "Create container called 'mycontainer'"
- community.general.rax_files:
- container: mycontainer
-
- - name: "Create container 'mycontainer2' with metadata"
- community.general.rax_files:
- container: mycontainer2
- meta:
- key: value
- file_for: someuser@example.com
-
- - name: "Set a container's web index page"
- community.general.rax_files:
- container: mycontainer
- web_index: index.html
-
- - name: "Set a container's web error page"
- community.general.rax_files:
- container: mycontainer
- web_error: error.html
-
- - name: "Make container public"
- community.general.rax_files:
- container: mycontainer
- public: yes
-
- - name: "Make container public with a 24 hour TTL"
- community.general.rax_files:
- container: mycontainer
- public: yes
- ttl: 86400
-
- - name: "Make container private"
- community.general.rax_files:
- container: mycontainer
- private: yes
-
-- name: "Test Cloud Files Containers Metadata Storage"
- hosts: local
- gather_facts: no
- tasks:
- - name: "Get mycontainer2 metadata"
- community.general.rax_files:
- container: mycontainer2
- type: meta
-
- - name: "Set mycontainer2 metadata"
- community.general.rax_files:
- container: mycontainer2
- type: meta
- meta:
- uploaded_by: someuser@example.com
-
- - name: "Remove mycontainer2 metadata"
- community.general.rax_files:
- container: "mycontainer2"
- type: meta
- state: absent
- meta:
- key: ""
- file_for: ""
-'''
-
-try:
- import pyrax
- HAS_PYRAX = True
-except ImportError as e:
- HAS_PYRAX = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
-
-
-EXIT_DICT = dict(success=True)
-META_PREFIX = 'x-container-meta-'
-
-
-def _get_container(module, cf, container):
- try:
- return cf.get_container(container)
- except pyrax.exc.NoSuchContainer as e:
- module.fail_json(msg=e.message)
-
-
-def _fetch_meta(module, container):
- EXIT_DICT['meta'] = dict()
- try:
- for k, v in container.get_metadata().items():
- split_key = k.split(META_PREFIX)[-1]
- EXIT_DICT['meta'][split_key] = v
- except Exception as e:
- module.fail_json(msg=e.message)
-
-
-def meta(cf, module, container_, state, meta_, clear_meta):
- c = _get_container(module, cf, container_)
-
- if meta_ and state == 'present':
- try:
- meta_set = c.set_metadata(meta_, clear=clear_meta)
- except Exception as e:
- module.fail_json(msg=e.message)
- elif meta_ and state == 'absent':
- remove_results = []
- for k, v in meta_.items():
- c.remove_metadata_key(k)
- remove_results.append(k)
- EXIT_DICT['deleted_meta_keys'] = remove_results
- elif state == 'absent':
- remove_results = []
- for k, v in c.get_metadata().items():
- c.remove_metadata_key(k)
- remove_results.append(k)
- EXIT_DICT['deleted_meta_keys'] = remove_results
-
- _fetch_meta(module, c)
- _locals = locals().keys()
-
- EXIT_DICT['container'] = c.name
- if 'meta_set' in _locals or 'remove_results' in _locals:
- EXIT_DICT['changed'] = True
-
- module.exit_json(**EXIT_DICT)
-
-
-def container(cf, module, container_, state, meta_, clear_meta, ttl, public,
- private, web_index, web_error):
- if public and private:
- module.fail_json(msg='container cannot be simultaneously '
- 'set to public and private')
-
- if state == 'absent' and (meta_ or clear_meta or public or private or web_index or web_error):
- module.fail_json(msg='state cannot be omitted when setting/removing '
- 'attributes on a container')
-
- if state == 'list':
- # We don't care if attributes are specified, let's list containers
- EXIT_DICT['containers'] = cf.list_containers()
- module.exit_json(**EXIT_DICT)
-
- try:
- c = cf.get_container(container_)
- except pyrax.exc.NoSuchContainer as e:
- # Make the container if state=present, otherwise bomb out
- if state == 'present':
- try:
- c = cf.create_container(container_)
- except Exception as e:
- module.fail_json(msg=e.message)
- else:
- EXIT_DICT['changed'] = True
- EXIT_DICT['created'] = True
- else:
- module.fail_json(msg=e.message)
- else:
- # Successfully grabbed a container object
- # Delete if state is absent
- if state == 'absent':
- try:
- cont_deleted = c.delete()
- except Exception as e:
- module.fail_json(msg=e.message)
- else:
- EXIT_DICT['deleted'] = True
-
- if meta_:
- try:
- meta_set = c.set_metadata(meta_, clear=clear_meta)
- except Exception as e:
- module.fail_json(msg=e.message)
- finally:
- _fetch_meta(module, c)
-
- if ttl:
- try:
- c.cdn_ttl = ttl
- except Exception as e:
- module.fail_json(msg=e.message)
- else:
- EXIT_DICT['ttl'] = c.cdn_ttl
-
- if public:
- try:
- cont_public = c.make_public()
- except Exception as e:
- module.fail_json(msg=e.message)
- else:
- EXIT_DICT['container_urls'] = dict(url=c.cdn_uri,
- ssl_url=c.cdn_ssl_uri,
- streaming_url=c.cdn_streaming_uri,
- ios_uri=c.cdn_ios_uri)
-
- if private:
- try:
- cont_private = c.make_private()
- except Exception as e:
- module.fail_json(msg=e.message)
- else:
- EXIT_DICT['set_private'] = True
-
- if web_index:
- try:
- cont_web_index = c.set_web_index_page(web_index)
- except Exception as e:
- module.fail_json(msg=e.message)
- else:
- EXIT_DICT['set_index'] = True
- finally:
- _fetch_meta(module, c)
-
- if web_error:
- try:
- cont_err_index = c.set_web_error_page(web_error)
- except Exception as e:
- module.fail_json(msg=e.message)
- else:
- EXIT_DICT['set_error'] = True
- finally:
- _fetch_meta(module, c)
-
- EXIT_DICT['container'] = c.name
- EXIT_DICT['objs_in_container'] = c.object_count
- EXIT_DICT['total_bytes'] = c.total_bytes
-
- _locals = locals().keys()
- if ('cont_deleted' in _locals
- or 'meta_set' in _locals
- or 'cont_public' in _locals
- or 'cont_private' in _locals
- or 'cont_web_index' in _locals
- or 'cont_err_index' in _locals):
- EXIT_DICT['changed'] = True
-
- module.exit_json(**EXIT_DICT)
-
-
-def cloudfiles(module, container_, state, meta_, clear_meta, typ, ttl, public,
- private, web_index, web_error):
- """ Dispatch from here to work with metadata or file objects """
- cf = pyrax.cloudfiles
-
- if cf is None:
- module.fail_json(msg='Failed to instantiate client. This '
- 'typically indicates an invalid region or an '
- 'incorrectly capitalized region name.')
-
- if typ == "container":
- container(cf, module, container_, state, meta_, clear_meta, ttl,
- public, private, web_index, web_error)
- else:
- meta(cf, module, container_, state, meta_, clear_meta)
-
-
-def main():
- argument_spec = rax_argument_spec()
- argument_spec.update(
- dict(
- container=dict(),
- state=dict(choices=['present', 'absent', 'list'],
- default='present'),
- meta=dict(type='dict', default=dict()),
- clear_meta=dict(default=False, type='bool'),
- type=dict(choices=['container', 'meta'], default='container'),
- ttl=dict(type='int'),
- public=dict(default=False, type='bool'),
- private=dict(default=False, type='bool'),
- web_index=dict(),
- web_error=dict()
- )
- )
-
- module = AnsibleModule(
- argument_spec=argument_spec,
- required_together=rax_required_together()
- )
-
- if not HAS_PYRAX:
- module.fail_json(msg='pyrax is required for this module')
-
- container_ = module.params.get('container')
- state = module.params.get('state')
- meta_ = module.params.get('meta')
- clear_meta = module.params.get('clear_meta')
- typ = module.params.get('type')
- ttl = module.params.get('ttl')
- public = module.params.get('public')
- private = module.params.get('private')
- web_index = module.params.get('web_index')
- web_error = module.params.get('web_error')
-
- if state in ['present', 'absent'] and not container_:
- module.fail_json(msg='please specify a container name')
- if clear_meta and not typ == 'meta':
- module.fail_json(msg='clear_meta can only be used when setting '
- 'metadata')
-
- setup_rax_module(module, pyrax)
- cloudfiles(module, container_, state, meta_, clear_meta, typ, ttl, public,
- private, web_index, web_error)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_files_objects.py b/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_files_objects.py
deleted file mode 100644
index 3269fe05..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_files_objects.py
+++ /dev/null
@@ -1,609 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# (c) 2013, Paul Durivage
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: rax_files_objects
-short_description: Upload, download, and delete objects in Rackspace Cloud Files
-description:
- - Upload, download, and delete objects in Rackspace Cloud Files
-options:
- clear_meta:
- description:
- - Optionally clear existing metadata when applying metadata to existing objects.
- Selecting this option is only appropriate when setting type=meta
- type: bool
- default: 'no'
- container:
- type: str
- description:
- - The container to use for file object operations.
- required: true
- dest:
- type: str
- description:
- - The destination of a "get" operation; i.e. a local directory, "/home/user/myfolder".
- Used to specify the destination of an operation on a remote object; i.e. a file name,
- "file1", or a comma-separated list of remote objects, "file1,file2,file17"
- expires:
- type: int
- description:
- - Used to set an expiration on a file or folder uploaded to Cloud Files.
- Requires an integer, specifying expiration in seconds
- meta:
- type: dict
- description:
- - A hash of items to set as metadata values on an uploaded file or folder
- method:
- type: str
- description:
- - The method of operation to be performed. For example, put to upload files
- to Cloud Files, get to download files from Cloud Files or delete to delete
- remote objects in Cloud Files
- choices:
- - get
- - put
- - delete
- default: get
- src:
- type: str
- description:
- - Source from which to upload files. Used to specify a remote object as a source for
- an operation, i.e. a file name, "file1", or a comma-separated list of remote objects,
- "file1,file2,file17". src and dest are mutually exclusive on remote-only object operations
- structure:
- description:
- - Used to specify whether to maintain nested directory structure when downloading objects
- from Cloud Files. Setting to false downloads the contents of a container to a single,
- flat directory
- type: bool
- default: 'yes'
- type:
- type: str
- description:
- - Type of object to do work on
- - Metadata object or a file object
- choices:
- - file
- - meta
- default: file
-author: "Paul Durivage (@angstwad)"
-extends_documentation_fragment:
-- community.general.rackspace
-- community.general.rackspace.openstack
-
-'''
-
-EXAMPLES = '''
-- name: "Test Cloud Files Objects"
- hosts: local
- gather_facts: False
- tasks:
- - name: "Get objects from test container"
- community.general.rax_files_objects:
- container: testcont
- dest: ~/Downloads/testcont
-
- - name: "Get single object from test container"
- community.general.rax_files_objects:
- container: testcont
- src: file1
- dest: ~/Downloads/testcont
-
- - name: "Get several objects from test container"
- community.general.rax_files_objects:
- container: testcont
- src: file1,file2,file3
- dest: ~/Downloads/testcont
-
- - name: "Delete one object in test container"
- community.general.rax_files_objects:
- container: testcont
- method: delete
- dest: file1
-
- - name: "Delete several objects in test container"
- community.general.rax_files_objects:
- container: testcont
- method: delete
- dest: file2,file3,file4
-
- - name: "Delete all objects in test container"
- community.general.rax_files_objects:
- container: testcont
- method: delete
-
- - name: "Upload all files to test container"
- community.general.rax_files_objects:
- container: testcont
- method: put
- src: ~/Downloads/onehundred
-
- - name: "Upload one file to test container"
- community.general.rax_files_objects:
- container: testcont
- method: put
- src: ~/Downloads/testcont/file1
-
- - name: "Upload one file to test container with metadata"
- community.general.rax_files_objects:
- container: testcont
- src: ~/Downloads/testcont/file2
- method: put
- meta:
- testkey: testdata
- who_uploaded_this: someuser@example.com
-
- - name: "Upload one file to test container with TTL of 60 seconds"
- community.general.rax_files_objects:
- container: testcont
- method: put
- src: ~/Downloads/testcont/file3
- expires: 60
-
- - name: "Attempt to get remote object that does not exist"
- community.general.rax_files_objects:
- container: testcont
- method: get
- src: FileThatDoesNotExist.jpg
- dest: ~/Downloads/testcont
- ignore_errors: yes
-
- - name: "Attempt to delete remote object that does not exist"
- community.general.rax_files_objects:
- container: testcont
- method: delete
- dest: FileThatDoesNotExist.jpg
- ignore_errors: yes
-
-- name: "Test Cloud Files Objects Metadata"
- hosts: local
- gather_facts: false
- tasks:
- - name: "Get metadata on one object"
- community.general.rax_files_objects:
- container: testcont
- type: meta
- dest: file2
-
- - name: "Get metadata on several objects"
- community.general.rax_files_objects:
- container: testcont
- type: meta
- src: file2,file1
-
- - name: "Set metadata on an object"
- community.general.rax_files_objects:
- container: testcont
- type: meta
- dest: file17
- method: put
- meta:
- key1: value1
- key2: value2
- clear_meta: true
-
- - name: "Verify metadata is set"
- community.general.rax_files_objects:
- container: testcont
- type: meta
- src: file17
-
- - name: "Delete metadata"
- community.general.rax_files_objects:
- container: testcont
- type: meta
- dest: file17
- method: delete
- meta:
- key1: ''
- key2: ''
-
- - name: "Get metadata on all objects"
- community.general.rax_files_objects:
- container: testcont
- type: meta
-'''
-
-import os
-
-try:
- import pyrax
- HAS_PYRAX = True
-except ImportError:
- HAS_PYRAX = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
-
-
-EXIT_DICT = dict(success=False)
-META_PREFIX = 'x-object-meta-'
-
-
-def _get_container(module, cf, container):
- try:
- return cf.get_container(container)
- except pyrax.exc.NoSuchContainer as e:
- module.fail_json(msg=e.message)
-
-
-def _upload_folder(cf, folder, container, ttl=None, headers=None):
- """ Uploads a folder to Cloud Files.
- """
- total_bytes = 0
- for root, dirs, files in os.walk(folder):
- for fname in files:
- full_path = os.path.join(root, fname)
- obj_name = os.path.relpath(full_path, folder)
- obj_size = os.path.getsize(full_path)
- cf.upload_file(container, full_path,
- obj_name=obj_name, return_none=True, ttl=ttl, headers=headers)
- total_bytes += obj_size
- return total_bytes
-
-
-def upload(module, cf, container, src, dest, meta, expires):
- """ Uploads a single object or a folder to Cloud Files Optionally sets an
- metadata, TTL value (expires), or Content-Disposition and Content-Encoding
- headers.
- """
- if not src:
- module.fail_json(msg='src must be specified when uploading')
-
- c = _get_container(module, cf, container)
- src = os.path.abspath(os.path.expanduser(src))
- is_dir = os.path.isdir(src)
-
- if not is_dir and not os.path.isfile(src) or not os.path.exists(src):
- module.fail_json(msg='src must be a file or a directory')
- if dest and is_dir:
- module.fail_json(msg='dest cannot be set when whole '
- 'directories are uploaded')
-
- cont_obj = None
- total_bytes = 0
- if dest and not is_dir:
- try:
- cont_obj = c.upload_file(src, obj_name=dest, ttl=expires, headers=meta)
- except Exception as e:
- module.fail_json(msg=e.message)
- elif is_dir:
- try:
- total_bytes = _upload_folder(cf, src, c, ttl=expires, headers=meta)
- except Exception as e:
- module.fail_json(msg=e.message)
- else:
- try:
- cont_obj = c.upload_file(src, ttl=expires, headers=meta)
- except Exception as e:
- module.fail_json(msg=e.message)
-
- EXIT_DICT['success'] = True
- EXIT_DICT['container'] = c.name
- EXIT_DICT['msg'] = "Uploaded %s to container: %s" % (src, c.name)
- if cont_obj or total_bytes > 0:
- EXIT_DICT['changed'] = True
- if meta:
- EXIT_DICT['meta'] = dict(updated=True)
-
- if cont_obj:
- EXIT_DICT['bytes'] = cont_obj.total_bytes
- EXIT_DICT['etag'] = cont_obj.etag
- else:
- EXIT_DICT['bytes'] = total_bytes
-
- module.exit_json(**EXIT_DICT)
-
-
-def download(module, cf, container, src, dest, structure):
- """ Download objects from Cloud Files to a local path specified by "dest".
- Optionally disable maintaining a directory structure by by passing a
- false value to "structure".
- """
- # Looking for an explicit destination
- if not dest:
- module.fail_json(msg='dest is a required argument when '
- 'downloading from Cloud Files')
-
- # Attempt to fetch the container by name
- c = _get_container(module, cf, container)
-
- # Accept a single object name or a comma-separated list of objs
- # If not specified, get the entire container
- if src:
- objs = src.split(',')
- objs = map(str.strip, objs)
- else:
- objs = c.get_object_names()
-
- dest = os.path.abspath(os.path.expanduser(dest))
- is_dir = os.path.isdir(dest)
-
- if not is_dir:
- module.fail_json(msg='dest must be a directory')
-
- results = []
- for obj in objs:
- try:
- c.download_object(obj, dest, structure=structure)
- except Exception as e:
- module.fail_json(msg=e.message)
- else:
- results.append(obj)
-
- len_results = len(results)
- len_objs = len(objs)
-
- EXIT_DICT['container'] = c.name
- EXIT_DICT['requested_downloaded'] = results
- if results:
- EXIT_DICT['changed'] = True
- if len_results == len_objs:
- EXIT_DICT['success'] = True
- EXIT_DICT['msg'] = "%s objects downloaded to %s" % (len_results, dest)
- else:
- EXIT_DICT['msg'] = "Error: only %s of %s objects were " \
- "downloaded" % (len_results, len_objs)
- module.exit_json(**EXIT_DICT)
-
-
-def delete(module, cf, container, src, dest):
- """ Delete specific objects by proving a single file name or a
- comma-separated list to src OR dest (but not both). Omitting file name(s)
- assumes the entire container is to be deleted.
- """
- objs = None
- if src and dest:
- module.fail_json(msg="Error: ambiguous instructions; files to be deleted "
- "have been specified on both src and dest args")
- elif dest:
- objs = dest
- else:
- objs = src
-
- c = _get_container(module, cf, container)
-
- if objs:
- objs = objs.split(',')
- objs = map(str.strip, objs)
- else:
- objs = c.get_object_names()
-
- num_objs = len(objs)
-
- results = []
- for obj in objs:
- try:
- result = c.delete_object(obj)
- except Exception as e:
- module.fail_json(msg=e.message)
- else:
- results.append(result)
-
- num_deleted = results.count(True)
-
- EXIT_DICT['container'] = c.name
- EXIT_DICT['deleted'] = num_deleted
- EXIT_DICT['requested_deleted'] = objs
-
- if num_deleted:
- EXIT_DICT['changed'] = True
-
- if num_objs == num_deleted:
- EXIT_DICT['success'] = True
- EXIT_DICT['msg'] = "%s objects deleted" % num_deleted
- else:
- EXIT_DICT['msg'] = ("Error: only %s of %s objects "
- "deleted" % (num_deleted, num_objs))
- module.exit_json(**EXIT_DICT)
-
-
-def get_meta(module, cf, container, src, dest):
- """ Get metadata for a single file, comma-separated list, or entire
- container
- """
- c = _get_container(module, cf, container)
-
- objs = None
- if src and dest:
- module.fail_json(msg="Error: ambiguous instructions; files to be deleted "
- "have been specified on both src and dest args")
- elif dest:
- objs = dest
- else:
- objs = src
-
- if objs:
- objs = objs.split(',')
- objs = map(str.strip, objs)
- else:
- objs = c.get_object_names()
-
- results = dict()
- for obj in objs:
- try:
- meta = c.get_object(obj).get_metadata()
- except Exception as e:
- module.fail_json(msg=e.message)
- else:
- results[obj] = dict()
- for k, v in meta.items():
- meta_key = k.split(META_PREFIX)[-1]
- results[obj][meta_key] = v
-
- EXIT_DICT['container'] = c.name
- if results:
- EXIT_DICT['meta_results'] = results
- EXIT_DICT['success'] = True
- module.exit_json(**EXIT_DICT)
-
-
-def put_meta(module, cf, container, src, dest, meta, clear_meta):
- """ Set metadata on a container, single file, or comma-separated list.
- Passing a true value to clear_meta clears the metadata stored in Cloud
- Files before setting the new metadata to the value of "meta".
- """
- objs = None
- if src and dest:
- module.fail_json(msg="Error: ambiguous instructions; files to set meta"
- " have been specified on both src and dest args")
- elif dest:
- objs = dest
- else:
- objs = src
-
- objs = objs.split(',')
- objs = map(str.strip, objs)
-
- c = _get_container(module, cf, container)
-
- results = []
- for obj in objs:
- try:
- result = c.get_object(obj).set_metadata(meta, clear=clear_meta)
- except Exception as e:
- module.fail_json(msg=e.message)
- else:
- results.append(result)
-
- EXIT_DICT['container'] = c.name
- EXIT_DICT['success'] = True
- if results:
- EXIT_DICT['changed'] = True
- EXIT_DICT['num_changed'] = True
- module.exit_json(**EXIT_DICT)
-
-
-def delete_meta(module, cf, container, src, dest, meta):
- """ Removes metadata keys and values specified in meta, if any. Deletes on
- all objects specified by src or dest (but not both), if any; otherwise it
- deletes keys on all objects in the container
- """
- objs = None
- if src and dest:
- module.fail_json(msg="Error: ambiguous instructions; meta keys to be "
- "deleted have been specified on both src and dest"
- " args")
- elif dest:
- objs = dest
- else:
- objs = src
-
- objs = objs.split(',')
- objs = map(str.strip, objs)
-
- c = _get_container(module, cf, container)
-
- results = [] # Num of metadata keys removed, not objects affected
- for obj in objs:
- if meta:
- for k, v in meta.items():
- try:
- result = c.get_object(obj).remove_metadata_key(k)
- except Exception as e:
- module.fail_json(msg=e.message)
- else:
- results.append(result)
- else:
- try:
- o = c.get_object(obj)
- except pyrax.exc.NoSuchObject as e:
- module.fail_json(msg=e.message)
-
- for k, v in o.get_metadata().items():
- try:
- result = o.remove_metadata_key(k)
- except Exception as e:
- module.fail_json(msg=e.message)
- results.append(result)
-
- EXIT_DICT['container'] = c.name
- EXIT_DICT['success'] = True
- if results:
- EXIT_DICT['changed'] = True
- EXIT_DICT['num_deleted'] = len(results)
- module.exit_json(**EXIT_DICT)
-
-
-def cloudfiles(module, container, src, dest, method, typ, meta, clear_meta,
- structure, expires):
- """ Dispatch from here to work with metadata or file objects """
- cf = pyrax.cloudfiles
-
- if cf is None:
- module.fail_json(msg='Failed to instantiate client. This '
- 'typically indicates an invalid region or an '
- 'incorrectly capitalized region name.')
-
- if typ == "file":
- if method == 'put':
- upload(module, cf, container, src, dest, meta, expires)
-
- elif method == 'get':
- download(module, cf, container, src, dest, structure)
-
- elif method == 'delete':
- delete(module, cf, container, src, dest)
-
- else:
- if method == 'get':
- get_meta(module, cf, container, src, dest)
-
- if method == 'put':
- put_meta(module, cf, container, src, dest, meta, clear_meta)
-
- if method == 'delete':
- delete_meta(module, cf, container, src, dest, meta)
-
-
-def main():
- argument_spec = rax_argument_spec()
- argument_spec.update(
- dict(
- container=dict(required=True),
- src=dict(),
- dest=dict(),
- method=dict(default='get', choices=['put', 'get', 'delete']),
- type=dict(default='file', choices=['file', 'meta']),
- meta=dict(type='dict', default=dict()),
- clear_meta=dict(default=False, type='bool'),
- structure=dict(default=True, type='bool'),
- expires=dict(type='int'),
- )
- )
-
- module = AnsibleModule(
- argument_spec=argument_spec,
- required_together=rax_required_together()
- )
-
- if not HAS_PYRAX:
- module.fail_json(msg='pyrax is required for this module')
-
- container = module.params.get('container')
- src = module.params.get('src')
- dest = module.params.get('dest')
- method = module.params.get('method')
- typ = module.params.get('type')
- meta = module.params.get('meta')
- clear_meta = module.params.get('clear_meta')
- structure = module.params.get('structure')
- expires = module.params.get('expires')
-
- if clear_meta and not typ == 'meta':
- module.fail_json(msg='clear_meta can only be used when setting metadata')
-
- setup_rax_module(module, pyrax)
- cloudfiles(module, container, src, dest, method, typ, meta, clear_meta, structure, expires)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_identity.py b/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_identity.py
deleted file mode 100644
index 2021052f..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_identity.py
+++ /dev/null
@@ -1,103 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: rax_identity
-short_description: Load Rackspace Cloud Identity
-description:
- - Verifies Rackspace Cloud credentials and returns identity information
-options:
- state:
- type: str
- description:
- - Indicate desired state of the resource
- choices: ['present']
- default: present
- required: false
-author:
- - "Christopher H. Laco (@claco)"
- - "Matt Martz (@sivel)"
-extends_documentation_fragment:
-- community.general.rackspace.openstack
-
-'''
-
-EXAMPLES = '''
-- name: Load Rackspace Cloud Identity
- gather_facts: False
- hosts: local
- connection: local
- tasks:
- - name: Load Identity
- local_action:
- module: rax_identity
- credentials: ~/.raxpub
- region: DFW
- register: rackspace_identity
-'''
-
-try:
- import pyrax
- HAS_PYRAX = True
-except ImportError:
- HAS_PYRAX = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec, rax_required_together, rax_to_dict,
- setup_rax_module)
-
-
-def cloud_identity(module, state, identity):
- instance = dict(
- authenticated=identity.authenticated,
- credentials=identity._creds_file
- )
- changed = False
-
- instance.update(rax_to_dict(identity))
- instance['services'] = instance.get('services', {}).keys()
-
- if state == 'present':
- if not identity.authenticated:
- module.fail_json(msg='Credentials could not be verified!')
-
- module.exit_json(changed=changed, identity=instance)
-
-
-def main():
- argument_spec = rax_argument_spec()
- argument_spec.update(
- dict(
- state=dict(default='present', choices=['present'])
- )
- )
-
- module = AnsibleModule(
- argument_spec=argument_spec,
- required_together=rax_required_together()
- )
-
- if not HAS_PYRAX:
- module.fail_json(msg='pyrax is required for this module')
-
- state = module.params.get('state')
-
- setup_rax_module(module, pyrax)
-
- if not pyrax.identity:
- module.fail_json(msg='Failed to instantiate client. This '
- 'typically indicates an invalid region or an '
- 'incorrectly capitalized region name.')
-
- cloud_identity(module, state, pyrax.identity)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_keypair.py b/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_keypair.py
deleted file mode 100644
index 90b0183e..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_keypair.py
+++ /dev/null
@@ -1,172 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: rax_keypair
-short_description: Create a keypair for use with Rackspace Cloud Servers
-description:
- - Create a keypair for use with Rackspace Cloud Servers
-options:
- name:
- type: str
- description:
- - Name of keypair
- required: true
- public_key:
- type: str
- description:
- - Public Key string to upload. Can be a file path or string
- state:
- type: str
- description:
- - Indicate desired state of the resource
- choices:
- - present
- - absent
- default: present
-author: "Matt Martz (@sivel)"
-notes:
- - Keypairs cannot be manipulated, only created and deleted. To "update" a
- keypair you must first delete and then recreate.
- - The ability to specify a file path for the public key was added in 1.7
-extends_documentation_fragment:
-- community.general.rackspace.openstack
-
-'''
-
-EXAMPLES = '''
-- name: Create a keypair
- hosts: localhost
- gather_facts: False
- tasks:
- - name: Keypair request
- local_action:
- module: rax_keypair
- credentials: ~/.raxpub
- name: my_keypair
- region: DFW
- register: keypair
- - name: Create local public key
- local_action:
- module: copy
- content: "{{ keypair.keypair.public_key }}"
- dest: "{{ inventory_dir }}/{{ keypair.keypair.name }}.pub"
- - name: Create local private key
- local_action:
- module: copy
- content: "{{ keypair.keypair.private_key }}"
- dest: "{{ inventory_dir }}/{{ keypair.keypair.name }}"
-
-- name: Create a keypair
- hosts: localhost
- gather_facts: False
- tasks:
- - name: Keypair request
- local_action:
- module: rax_keypair
- credentials: ~/.raxpub
- name: my_keypair
- public_key: "{{ lookup('file', 'authorized_keys/id_rsa.pub') }}"
- region: DFW
- register: keypair
-'''
-import os
-
-try:
- import pyrax
- HAS_PYRAX = True
-except ImportError:
- HAS_PYRAX = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec,
- rax_required_together,
- rax_to_dict,
- setup_rax_module,
- )
-
-
-def rax_keypair(module, name, public_key, state):
- changed = False
-
- cs = pyrax.cloudservers
-
- if cs is None:
- module.fail_json(msg='Failed to instantiate client. This '
- 'typically indicates an invalid region or an '
- 'incorrectly capitalized region name.')
-
- keypair = {}
-
- if state == 'present':
- if public_key and os.path.isfile(public_key):
- try:
- f = open(public_key)
- public_key = f.read()
- f.close()
- except Exception as e:
- module.fail_json(msg='Failed to load %s' % public_key)
-
- try:
- keypair = cs.keypairs.find(name=name)
- except cs.exceptions.NotFound:
- try:
- keypair = cs.keypairs.create(name, public_key)
- changed = True
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
-
- elif state == 'absent':
- try:
- keypair = cs.keypairs.find(name=name)
- except Exception:
- pass
-
- if keypair:
- try:
- keypair.delete()
- changed = True
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
-
- module.exit_json(changed=changed, keypair=rax_to_dict(keypair))
-
-
-def main():
- argument_spec = rax_argument_spec()
- argument_spec.update(
- dict(
- name=dict(required=True),
- public_key=dict(),
- state=dict(default='present', choices=['absent', 'present']),
- )
- )
-
- module = AnsibleModule(
- argument_spec=argument_spec,
- required_together=rax_required_together(),
- )
-
- if not HAS_PYRAX:
- module.fail_json(msg='pyrax is required for this module')
-
- name = module.params.get('name')
- public_key = module.params.get('public_key')
- state = module.params.get('state')
-
- setup_rax_module(module, pyrax)
-
- rax_keypair(module, name, public_key, state)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_meta.py b/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_meta.py
deleted file mode 100644
index 3504181f..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_meta.py
+++ /dev/null
@@ -1,174 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: rax_meta
-short_description: Manipulate metadata for Rackspace Cloud Servers
-description:
- - Manipulate metadata for Rackspace Cloud Servers
-options:
- address:
- type: str
- description:
- - Server IP address to modify metadata for, will match any IP assigned to
- the server
- id:
- type: str
- description:
- - Server ID to modify metadata for
- name:
- type: str
- description:
- - Server name to modify metadata for
- meta:
- type: dict
- description:
- - A hash of metadata to associate with the instance
-author: "Matt Martz (@sivel)"
-extends_documentation_fragment:
-- community.general.rackspace.openstack
-
-'''
-
-EXAMPLES = '''
-- name: Set metadata for a server
- hosts: all
- gather_facts: False
- tasks:
- - name: Set metadata
- local_action:
- module: rax_meta
- credentials: ~/.raxpub
- name: "{{ inventory_hostname }}"
- region: DFW
- meta:
- group: primary_group
- groups:
- - group_two
- - group_three
- app: my_app
-
- - name: Clear metadata
- local_action:
- module: rax_meta
- credentials: ~/.raxpub
- name: "{{ inventory_hostname }}"
- region: DFW
-'''
-
-import json
-
-try:
- import pyrax
- HAS_PYRAX = True
-except ImportError:
- HAS_PYRAX = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
-from ansible.module_utils.six import string_types
-
-
-def rax_meta(module, address, name, server_id, meta):
- changed = False
-
- cs = pyrax.cloudservers
-
- if cs is None:
- module.fail_json(msg='Failed to instantiate client. This '
- 'typically indicates an invalid region or an '
- 'incorrectly capitalized region name.')
-
- search_opts = {}
- if name:
- search_opts = dict(name='^%s$' % name)
- try:
- servers = cs.servers.list(search_opts=search_opts)
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
- elif address:
- servers = []
- try:
- for server in cs.servers.list():
- for addresses in server.networks.values():
- if address in addresses:
- servers.append(server)
- break
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
- elif server_id:
- servers = []
- try:
- servers.append(cs.servers.get(server_id))
- except Exception as e:
- pass
-
- if len(servers) > 1:
- module.fail_json(msg='Multiple servers found matching provided '
- 'search parameters')
- elif not servers:
- module.fail_json(msg='Failed to find a server matching provided '
- 'search parameters')
-
- # Normalize and ensure all metadata values are strings
- for k, v in meta.items():
- if isinstance(v, list):
- meta[k] = ','.join(['%s' % i for i in v])
- elif isinstance(v, dict):
- meta[k] = json.dumps(v)
- elif not isinstance(v, string_types):
- meta[k] = '%s' % v
-
- server = servers[0]
- if server.metadata == meta:
- changed = False
- else:
- changed = True
- removed = set(server.metadata.keys()).difference(meta.keys())
- cs.servers.delete_meta(server, list(removed))
- cs.servers.set_meta(server, meta)
- server.get()
-
- module.exit_json(changed=changed, meta=server.metadata)
-
-
-def main():
- argument_spec = rax_argument_spec()
- argument_spec.update(
- dict(
- address=dict(),
- id=dict(),
- name=dict(),
- meta=dict(type='dict', default=dict()),
- )
- )
-
- module = AnsibleModule(
- argument_spec=argument_spec,
- required_together=rax_required_together(),
- mutually_exclusive=[['address', 'id', 'name']],
- required_one_of=[['address', 'id', 'name']],
- )
-
- if not HAS_PYRAX:
- module.fail_json(msg='pyrax is required for this module')
-
- address = module.params.get('address')
- server_id = module.params.get('id')
- name = module.params.get('name')
- meta = module.params.get('meta')
-
- setup_rax_module(module, pyrax)
-
- rax_meta(module, address, name, server_id, meta)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_mon_alarm.py b/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_mon_alarm.py
deleted file mode 100644
index 7e99db3f..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_mon_alarm.py
+++ /dev/null
@@ -1,228 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: rax_mon_alarm
-short_description: Create or delete a Rackspace Cloud Monitoring alarm.
-description:
-- Create or delete a Rackspace Cloud Monitoring alarm that associates an
- existing rax_mon_entity, rax_mon_check, and rax_mon_notification_plan with
- criteria that specify what conditions will trigger which levels of
- notifications. Rackspace monitoring module flow | rax_mon_entity ->
- rax_mon_check -> rax_mon_notification -> rax_mon_notification_plan ->
- *rax_mon_alarm*
-options:
- state:
- type: str
- description:
- - Ensure that the alarm with this C(label) exists or does not exist.
- choices: [ "present", "absent" ]
- required: false
- default: present
- label:
- type: str
- description:
- - Friendly name for this alarm, used to achieve idempotence. Must be a String
- between 1 and 255 characters long.
- required: true
- entity_id:
- type: str
- description:
- - ID of the entity this alarm is attached to. May be acquired by registering
- the value of a rax_mon_entity task.
- required: true
- check_id:
- type: str
- description:
- - ID of the check that should be alerted on. May be acquired by registering
- the value of a rax_mon_check task.
- required: true
- notification_plan_id:
- type: str
- description:
- - ID of the notification plan to trigger if this alarm fires. May be acquired
- by registering the value of a rax_mon_notification_plan task.
- required: true
- criteria:
- type: str
- description:
- - Alarm DSL that describes alerting conditions and their output states. Must
- be between 1 and 16384 characters long. See
- http://docs.rackspace.com/cm/api/v1.0/cm-devguide/content/alerts-language.html
- for a reference on the alerting language.
- disabled:
- description:
- - If yes, create this alarm, but leave it in an inactive state. Defaults to
- no.
- type: bool
- default: false
- metadata:
- type: dict
- description:
- - Arbitrary key/value pairs to accompany the alarm. Must be a hash of String
- keys and values between 1 and 255 characters long.
-author: Ash Wilson (@smashwilson)
-extends_documentation_fragment:
-- community.general.rackspace.openstack
-
-'''
-
-EXAMPLES = '''
-- name: Alarm example
- gather_facts: False
- hosts: local
- connection: local
- tasks:
- - name: Ensure that a specific alarm exists.
- community.general.rax_mon_alarm:
- credentials: ~/.rax_pub
- state: present
- label: uhoh
- entity_id: "{{ the_entity['entity']['id'] }}"
- check_id: "{{ the_check['check']['id'] }}"
- notification_plan_id: "{{ defcon1['notification_plan']['id'] }}"
- criteria: >
- if (rate(metric['average']) > 10) {
- return new AlarmStatus(WARNING);
- }
- return new AlarmStatus(OK);
- register: the_alarm
-'''
-
-try:
- import pyrax
- HAS_PYRAX = True
-except ImportError:
- HAS_PYRAX = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
-
-
-def alarm(module, state, label, entity_id, check_id, notification_plan_id, criteria,
- disabled, metadata):
-
- if len(label) < 1 or len(label) > 255:
- module.fail_json(msg='label must be between 1 and 255 characters long')
-
- if criteria and len(criteria) < 1 or len(criteria) > 16384:
- module.fail_json(msg='criteria must be between 1 and 16384 characters long')
-
- # Coerce attributes.
-
- changed = False
- alarm = None
-
- cm = pyrax.cloud_monitoring
- if not cm:
- module.fail_json(msg='Failed to instantiate client. This typically '
- 'indicates an invalid region or an incorrectly '
- 'capitalized region name.')
-
- existing = [a for a in cm.list_alarms(entity_id) if a.label == label]
-
- if existing:
- alarm = existing[0]
-
- if state == 'present':
- should_create = False
- should_update = False
- should_delete = False
-
- if len(existing) > 1:
- module.fail_json(msg='%s existing alarms have the label %s.' %
- (len(existing), label))
-
- if alarm:
- if check_id != alarm.check_id or notification_plan_id != alarm.notification_plan_id:
- should_delete = should_create = True
-
- should_update = (disabled and disabled != alarm.disabled) or \
- (metadata and metadata != alarm.metadata) or \
- (criteria and criteria != alarm.criteria)
-
- if should_update and not should_delete:
- cm.update_alarm(entity=entity_id, alarm=alarm,
- criteria=criteria, disabled=disabled,
- label=label, metadata=metadata)
- changed = True
-
- if should_delete:
- alarm.delete()
- changed = True
- else:
- should_create = True
-
- if should_create:
- alarm = cm.create_alarm(entity=entity_id, check=check_id,
- notification_plan=notification_plan_id,
- criteria=criteria, disabled=disabled, label=label,
- metadata=metadata)
- changed = True
- else:
- for a in existing:
- a.delete()
- changed = True
-
- if alarm:
- alarm_dict = {
- "id": alarm.id,
- "label": alarm.label,
- "check_id": alarm.check_id,
- "notification_plan_id": alarm.notification_plan_id,
- "criteria": alarm.criteria,
- "disabled": alarm.disabled,
- "metadata": alarm.metadata
- }
- module.exit_json(changed=changed, alarm=alarm_dict)
- else:
- module.exit_json(changed=changed)
-
-
-def main():
- argument_spec = rax_argument_spec()
- argument_spec.update(
- dict(
- state=dict(default='present', choices=['present', 'absent']),
- label=dict(required=True),
- entity_id=dict(required=True),
- check_id=dict(required=True),
- notification_plan_id=dict(required=True),
- criteria=dict(),
- disabled=dict(type='bool', default=False),
- metadata=dict(type='dict')
- )
- )
-
- module = AnsibleModule(
- argument_spec=argument_spec,
- required_together=rax_required_together()
- )
-
- if not HAS_PYRAX:
- module.fail_json(msg='pyrax is required for this module')
-
- state = module.params.get('state')
- label = module.params.get('label')
- entity_id = module.params.get('entity_id')
- check_id = module.params.get('check_id')
- notification_plan_id = module.params.get('notification_plan_id')
- criteria = module.params.get('criteria')
- disabled = module.boolean(module.params.get('disabled'))
- metadata = module.params.get('metadata')
-
- setup_rax_module(module, pyrax)
-
- alarm(module, state, label, entity_id, check_id, notification_plan_id,
- criteria, disabled, metadata)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_mon_check.py b/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_mon_check.py
deleted file mode 100644
index 17a3932f..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_mon_check.py
+++ /dev/null
@@ -1,320 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: rax_mon_check
-short_description: Create or delete a Rackspace Cloud Monitoring check for an
- existing entity.
-description:
-- Create or delete a Rackspace Cloud Monitoring check associated with an
- existing rax_mon_entity. A check is a specific test or measurement that is
- performed, possibly from different monitoring zones, on the systems you
- monitor. Rackspace monitoring module flow | rax_mon_entity ->
- *rax_mon_check* -> rax_mon_notification -> rax_mon_notification_plan ->
- rax_mon_alarm
-options:
- state:
- type: str
- description:
- - Ensure that a check with this C(label) exists or does not exist.
- choices: ["present", "absent"]
- default: present
- entity_id:
- type: str
- description:
- - ID of the rax_mon_entity to target with this check.
- required: true
- label:
- type: str
- description:
- - Defines a label for this check, between 1 and 64 characters long.
- required: true
- check_type:
- type: str
- description:
- - The type of check to create. C(remote.) checks may be created on any
- rax_mon_entity. C(agent.) checks may only be created on rax_mon_entities
- that have a non-null C(agent_id).
- - |
- Choices for this option are:
- - C(remote.dns)
- - C(remote.ftp-banner)
- - C(remote.http)
- - C(remote.imap-banner)
- - C(remote.mssql-banner)
- - C(remote.mysql-banner)
- - C(remote.ping)
- - C(remote.pop3-banner)
- - C(remote.postgresql-banner)
- - C(remote.smtp-banner)
- - C(remote.smtp)
- - C(remote.ssh)
- - C(remote.tcp)
- - C(remote.telnet-banner)
- - C(agent.filesystem)
- - C(agent.memory)
- - C(agent.load_average)
- - C(agent.cpu)
- - C(agent.disk)
- - C(agent.network)
- - C(agent.plugin)
- required: true
- monitoring_zones_poll:
- type: str
- description:
- - Comma-separated list of the names of the monitoring zones the check should
- run from. Available monitoring zones include mzdfw, mzhkg, mziad, mzlon,
- mzord and mzsyd. Required for remote.* checks; prohibited for agent.* checks.
- target_hostname:
- type: str
- description:
- - One of `target_hostname` and `target_alias` is required for remote.* checks,
- but prohibited for agent.* checks. The hostname this check should target.
- Must be a valid IPv4, IPv6, or FQDN.
- target_alias:
- type: str
- description:
- - One of `target_alias` and `target_hostname` is required for remote.* checks,
- but prohibited for agent.* checks. Use the corresponding key in the entity's
- `ip_addresses` hash to resolve an IP address to target.
- details:
- type: dict
- description:
- - Additional details specific to the check type. Must be a hash of strings
- between 1 and 255 characters long, or an array or object containing 0 to
- 256 items.
- disabled:
- description:
- - If "yes", ensure the check is created, but don't actually use it yet.
- type: bool
- default: false
- metadata:
- type: dict
- description:
- - Hash of arbitrary key-value pairs to accompany this check if it fires.
- Keys and values must be strings between 1 and 255 characters long.
- period:
- type: int
- description:
- - The number of seconds between each time the check is performed. Must be
- greater than the minimum period set on your account.
- timeout:
- type: int
- description:
- - The number of seconds this check will wait when attempting to collect
- results. Must be less than the period.
-author: Ash Wilson (@smashwilson)
-extends_documentation_fragment:
-- community.general.rackspace.openstack
-
-'''
-
-EXAMPLES = '''
-- name: Create a monitoring check
- gather_facts: False
- hosts: local
- connection: local
- tasks:
- - name: Associate a check with an existing entity.
- community.general.rax_mon_check:
- credentials: ~/.rax_pub
- state: present
- entity_id: "{{ the_entity['entity']['id'] }}"
- label: the_check
- check_type: remote.ping
- monitoring_zones_poll: mziad,mzord,mzdfw
- details:
- count: 10
- meta:
- hurf: durf
- register: the_check
-'''
-
-try:
- import pyrax
- HAS_PYRAX = True
-except ImportError:
- HAS_PYRAX = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
-
-
-def cloud_check(module, state, entity_id, label, check_type,
- monitoring_zones_poll, target_hostname, target_alias, details,
- disabled, metadata, period, timeout):
-
- # Coerce attributes.
-
- if monitoring_zones_poll and not isinstance(monitoring_zones_poll, list):
- monitoring_zones_poll = [monitoring_zones_poll]
-
- if period:
- period = int(period)
-
- if timeout:
- timeout = int(timeout)
-
- changed = False
- check = None
-
- cm = pyrax.cloud_monitoring
- if not cm:
- module.fail_json(msg='Failed to instantiate client. This typically '
- 'indicates an invalid region or an incorrectly '
- 'capitalized region name.')
-
- entity = cm.get_entity(entity_id)
- if not entity:
- module.fail_json(msg='Failed to instantiate entity. "%s" may not be'
- ' a valid entity id.' % entity_id)
-
- existing = [e for e in entity.list_checks() if e.label == label]
-
- if existing:
- check = existing[0]
-
- if state == 'present':
- if len(existing) > 1:
- module.fail_json(msg='%s existing checks have a label of %s.' %
- (len(existing), label))
-
- should_delete = False
- should_create = False
- should_update = False
-
- if check:
- # Details may include keys set to default values that are not
- # included in the initial creation.
- #
- # Only force a recreation of the check if one of the *specified*
- # keys is missing or has a different value.
- if details:
- for (key, value) in details.items():
- if key not in check.details:
- should_delete = should_create = True
- elif value != check.details[key]:
- should_delete = should_create = True
-
- should_update = label != check.label or \
- (target_hostname and target_hostname != check.target_hostname) or \
- (target_alias and target_alias != check.target_alias) or \
- (disabled != check.disabled) or \
- (metadata and metadata != check.metadata) or \
- (period and period != check.period) or \
- (timeout and timeout != check.timeout) or \
- (monitoring_zones_poll and monitoring_zones_poll != check.monitoring_zones_poll)
-
- if should_update and not should_delete:
- check.update(label=label,
- disabled=disabled,
- metadata=metadata,
- monitoring_zones_poll=monitoring_zones_poll,
- timeout=timeout,
- period=period,
- target_alias=target_alias,
- target_hostname=target_hostname)
- changed = True
- else:
- # The check doesn't exist yet.
- should_create = True
-
- if should_delete:
- check.delete()
-
- if should_create:
- check = cm.create_check(entity,
- label=label,
- check_type=check_type,
- target_hostname=target_hostname,
- target_alias=target_alias,
- monitoring_zones_poll=monitoring_zones_poll,
- details=details,
- disabled=disabled,
- metadata=metadata,
- period=period,
- timeout=timeout)
- changed = True
- elif state == 'absent':
- if check:
- check.delete()
- changed = True
- else:
- module.fail_json(msg='state must be either present or absent.')
-
- if check:
- check_dict = {
- "id": check.id,
- "label": check.label,
- "type": check.type,
- "target_hostname": check.target_hostname,
- "target_alias": check.target_alias,
- "monitoring_zones_poll": check.monitoring_zones_poll,
- "details": check.details,
- "disabled": check.disabled,
- "metadata": check.metadata,
- "period": check.period,
- "timeout": check.timeout
- }
- module.exit_json(changed=changed, check=check_dict)
- else:
- module.exit_json(changed=changed)
-
-
-def main():
- argument_spec = rax_argument_spec()
- argument_spec.update(
- dict(
- entity_id=dict(required=True),
- label=dict(required=True),
- check_type=dict(required=True),
- monitoring_zones_poll=dict(),
- target_hostname=dict(),
- target_alias=dict(),
- details=dict(type='dict', default={}),
- disabled=dict(type='bool', default=False),
- metadata=dict(type='dict', default={}),
- period=dict(type='int'),
- timeout=dict(type='int'),
- state=dict(default='present', choices=['present', 'absent'])
- )
- )
-
- module = AnsibleModule(
- argument_spec=argument_spec,
- required_together=rax_required_together()
- )
-
- if not HAS_PYRAX:
- module.fail_json(msg='pyrax is required for this module')
-
- entity_id = module.params.get('entity_id')
- label = module.params.get('label')
- check_type = module.params.get('check_type')
- monitoring_zones_poll = module.params.get('monitoring_zones_poll')
- target_hostname = module.params.get('target_hostname')
- target_alias = module.params.get('target_alias')
- details = module.params.get('details')
- disabled = module.boolean(module.params.get('disabled'))
- metadata = module.params.get('metadata')
- period = module.params.get('period')
- timeout = module.params.get('timeout')
-
- state = module.params.get('state')
-
- setup_rax_module(module, pyrax)
-
- cloud_check(module, state, entity_id, label, check_type,
- monitoring_zones_poll, target_hostname, target_alias, details,
- disabled, metadata, period, timeout)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_mon_entity.py b/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_mon_entity.py
deleted file mode 100644
index 2f8cdeef..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_mon_entity.py
+++ /dev/null
@@ -1,192 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: rax_mon_entity
-short_description: Create or delete a Rackspace Cloud Monitoring entity
-description:
-- Create or delete a Rackspace Cloud Monitoring entity, which represents a device
- to monitor. Entities associate checks and alarms with a target system and
- provide a convenient, centralized place to store IP addresses. Rackspace
- monitoring module flow | *rax_mon_entity* -> rax_mon_check ->
- rax_mon_notification -> rax_mon_notification_plan -> rax_mon_alarm
-options:
- label:
- type: str
- description:
- - Defines a name for this entity. Must be a non-empty string between 1 and
- 255 characters long.
- required: true
- state:
- type: str
- description:
- - Ensure that an entity with this C(name) exists or does not exist.
- choices: ["present", "absent"]
- default: present
- agent_id:
- type: str
- description:
- - Rackspace monitoring agent on the target device to which this entity is
- bound. Necessary to collect C(agent.) rax_mon_checks against this entity.
- named_ip_addresses:
- type: dict
- description:
- - Hash of IP addresses that may be referenced by name by rax_mon_checks
- added to this entity. Must be a dictionary of with keys that are names
- between 1 and 64 characters long, and values that are valid IPv4 or IPv6
- addresses.
- metadata:
- type: dict
- description:
- - Hash of arbitrary C(name), C(value) pairs that are passed to associated
- rax_mon_alarms. Names and values must all be between 1 and 255 characters
- long.
-author: Ash Wilson (@smashwilson)
-extends_documentation_fragment:
-- community.general.rackspace.openstack
-
-'''
-
-EXAMPLES = '''
-- name: Entity example
- gather_facts: False
- hosts: local
- connection: local
- tasks:
- - name: Ensure an entity exists
- community.general.rax_mon_entity:
- credentials: ~/.rax_pub
- state: present
- label: my_entity
- named_ip_addresses:
- web_box: 192.0.2.4
- db_box: 192.0.2.5
- meta:
- hurf: durf
- register: the_entity
-'''
-
-try:
- import pyrax
- HAS_PYRAX = True
-except ImportError:
- HAS_PYRAX = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
-
-
-def cloud_monitoring(module, state, label, agent_id, named_ip_addresses,
- metadata):
-
- if len(label) < 1 or len(label) > 255:
- module.fail_json(msg='label must be between 1 and 255 characters long')
-
- changed = False
-
- cm = pyrax.cloud_monitoring
- if not cm:
- module.fail_json(msg='Failed to instantiate client. This typically '
- 'indicates an invalid region or an incorrectly '
- 'capitalized region name.')
-
- existing = []
- for entity in cm.list_entities():
- if label == entity.label:
- existing.append(entity)
-
- entity = None
-
- if existing:
- entity = existing[0]
-
- if state == 'present':
- should_update = False
- should_delete = False
- should_create = False
-
- if len(existing) > 1:
- module.fail_json(msg='%s existing entities have the label %s.' %
- (len(existing), label))
-
- if entity:
- if named_ip_addresses and named_ip_addresses != entity.ip_addresses:
- should_delete = should_create = True
-
- # Change an existing Entity, unless there's nothing to do.
- should_update = agent_id and agent_id != entity.agent_id or \
- (metadata and metadata != entity.metadata)
-
- if should_update and not should_delete:
- entity.update(agent_id, metadata)
- changed = True
-
- if should_delete:
- entity.delete()
- else:
- should_create = True
-
- if should_create:
- # Create a new Entity.
- entity = cm.create_entity(label=label, agent=agent_id,
- ip_addresses=named_ip_addresses,
- metadata=metadata)
- changed = True
- else:
- # Delete the existing Entities.
- for e in existing:
- e.delete()
- changed = True
-
- if entity:
- entity_dict = {
- "id": entity.id,
- "name": entity.name,
- "agent_id": entity.agent_id,
- }
- module.exit_json(changed=changed, entity=entity_dict)
- else:
- module.exit_json(changed=changed)
-
-
-def main():
- argument_spec = rax_argument_spec()
- argument_spec.update(
- dict(
- state=dict(default='present', choices=['present', 'absent']),
- label=dict(required=True),
- agent_id=dict(),
- named_ip_addresses=dict(type='dict', default={}),
- metadata=dict(type='dict', default={})
- )
- )
-
- module = AnsibleModule(
- argument_spec=argument_spec,
- required_together=rax_required_together()
- )
-
- if not HAS_PYRAX:
- module.fail_json(msg='pyrax is required for this module')
-
- state = module.params.get('state')
-
- label = module.params.get('label')
- agent_id = module.params.get('agent_id')
- named_ip_addresses = module.params.get('named_ip_addresses')
- metadata = module.params.get('metadata')
-
- setup_rax_module(module, pyrax)
-
- cloud_monitoring(module, state, label, agent_id, named_ip_addresses, metadata)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_mon_notification.py b/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_mon_notification.py
deleted file mode 100644
index fb645c30..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_mon_notification.py
+++ /dev/null
@@ -1,175 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: rax_mon_notification
-short_description: Create or delete a Rackspace Cloud Monitoring notification.
-description:
-- Create or delete a Rackspace Cloud Monitoring notification that specifies a
- channel that can be used to communicate alarms, such as email, webhooks, or
- PagerDuty. Rackspace monitoring module flow | rax_mon_entity -> rax_mon_check ->
- *rax_mon_notification* -> rax_mon_notification_plan -> rax_mon_alarm
-options:
- state:
- type: str
- description:
- - Ensure that the notification with this C(label) exists or does not exist.
- choices: ['present', 'absent']
- default: present
- label:
- type: str
- description:
- - Defines a friendly name for this notification. String between 1 and 255
- characters long.
- required: true
- notification_type:
- type: str
- description:
- - A supported notification type.
- choices: ["webhook", "email", "pagerduty"]
- required: true
- details:
- type: dict
- description:
- - Dictionary of key-value pairs used to initialize the notification.
- Required keys and meanings vary with notification type. See
- http://docs.rackspace.com/cm/api/v1.0/cm-devguide/content/
- service-notification-types-crud.html for details.
- required: true
-author: Ash Wilson (@smashwilson)
-extends_documentation_fragment:
-- community.general.rackspace.openstack
-
-'''
-
-EXAMPLES = '''
-- name: Monitoring notification example
- gather_facts: False
- hosts: local
- connection: local
- tasks:
- - name: Email me when something goes wrong.
- rax_mon_entity:
- credentials: ~/.rax_pub
- label: omg
- type: email
- details:
- address: me@mailhost.com
- register: the_notification
-'''
-
-try:
- import pyrax
- HAS_PYRAX = True
-except ImportError:
- HAS_PYRAX = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
-
-
-def notification(module, state, label, notification_type, details):
-
- if len(label) < 1 or len(label) > 255:
- module.fail_json(msg='label must be between 1 and 255 characters long')
-
- changed = False
- notification = None
-
- cm = pyrax.cloud_monitoring
- if not cm:
- module.fail_json(msg='Failed to instantiate client. This typically '
- 'indicates an invalid region or an incorrectly '
- 'capitalized region name.')
-
- existing = []
- for n in cm.list_notifications():
- if n.label == label:
- existing.append(n)
-
- if existing:
- notification = existing[0]
-
- if state == 'present':
- should_update = False
- should_delete = False
- should_create = False
-
- if len(existing) > 1:
- module.fail_json(msg='%s existing notifications are labelled %s.' %
- (len(existing), label))
-
- if notification:
- should_delete = (notification_type != notification.type)
-
- should_update = (details != notification.details)
-
- if should_update and not should_delete:
- notification.update(details=notification.details)
- changed = True
-
- if should_delete:
- notification.delete()
- else:
- should_create = True
-
- if should_create:
- notification = cm.create_notification(notification_type,
- label=label, details=details)
- changed = True
- else:
- for n in existing:
- n.delete()
- changed = True
-
- if notification:
- notification_dict = {
- "id": notification.id,
- "type": notification.type,
- "label": notification.label,
- "details": notification.details
- }
- module.exit_json(changed=changed, notification=notification_dict)
- else:
- module.exit_json(changed=changed)
-
-
-def main():
- argument_spec = rax_argument_spec()
- argument_spec.update(
- dict(
- state=dict(default='present', choices=['present', 'absent']),
- label=dict(required=True),
- notification_type=dict(required=True, choices=['webhook', 'email', 'pagerduty']),
- details=dict(required=True, type='dict')
- )
- )
-
- module = AnsibleModule(
- argument_spec=argument_spec,
- required_together=rax_required_together()
- )
-
- if not HAS_PYRAX:
- module.fail_json(msg='pyrax is required for this module')
-
- state = module.params.get('state')
-
- label = module.params.get('label')
- notification_type = module.params.get('notification_type')
- details = module.params.get('details')
-
- setup_rax_module(module, pyrax)
-
- notification(module, state, label, notification_type, details)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_mon_notification_plan.py b/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_mon_notification_plan.py
deleted file mode 100644
index 25e50682..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_mon_notification_plan.py
+++ /dev/null
@@ -1,184 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: rax_mon_notification_plan
-short_description: Create or delete a Rackspace Cloud Monitoring notification
- plan.
-description:
-- Create or delete a Rackspace Cloud Monitoring notification plan by
- associating existing rax_mon_notifications with severity levels. Rackspace
- monitoring module flow | rax_mon_entity -> rax_mon_check ->
- rax_mon_notification -> *rax_mon_notification_plan* -> rax_mon_alarm
-options:
- state:
- type: str
- description:
- - Ensure that the notification plan with this C(label) exists or does not
- exist.
- choices: ['present', 'absent']
- default: present
- label:
- type: str
- description:
- - Defines a friendly name for this notification plan. String between 1 and
- 255 characters long.
- required: true
- critical_state:
- type: list
- elements: str
- description:
- - Notification list to use when the alarm state is CRITICAL. Must be an
- array of valid rax_mon_notification ids.
- warning_state:
- type: list
- elements: str
- description:
- - Notification list to use when the alarm state is WARNING. Must be an array
- of valid rax_mon_notification ids.
- ok_state:
- type: list
- elements: str
- description:
- - Notification list to use when the alarm state is OK. Must be an array of
- valid rax_mon_notification ids.
-author: Ash Wilson (@smashwilson)
-extends_documentation_fragment:
-- community.general.rackspace.openstack
-
-'''
-
-EXAMPLES = '''
-- name: Example notification plan
- gather_facts: False
- hosts: local
- connection: local
- tasks:
- - name: Establish who gets called when.
- community.general.rax_mon_notification_plan:
- credentials: ~/.rax_pub
- state: present
- label: defcon1
- critical_state:
- - "{{ everyone['notification']['id'] }}"
- warning_state:
- - "{{ opsfloor['notification']['id'] }}"
- register: defcon1
-'''
-
-try:
- import pyrax
- HAS_PYRAX = True
-except ImportError:
- HAS_PYRAX = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
-
-
-def notification_plan(module, state, label, critical_state, warning_state, ok_state):
-
- if len(label) < 1 or len(label) > 255:
- module.fail_json(msg='label must be between 1 and 255 characters long')
-
- changed = False
- notification_plan = None
-
- cm = pyrax.cloud_monitoring
- if not cm:
- module.fail_json(msg='Failed to instantiate client. This typically '
- 'indicates an invalid region or an incorrectly '
- 'capitalized region name.')
-
- existing = []
- for n in cm.list_notification_plans():
- if n.label == label:
- existing.append(n)
-
- if existing:
- notification_plan = existing[0]
-
- if state == 'present':
- should_create = False
- should_delete = False
-
- if len(existing) > 1:
- module.fail_json(msg='%s notification plans are labelled %s.' %
- (len(existing), label))
-
- if notification_plan:
- should_delete = (critical_state and critical_state != notification_plan.critical_state) or \
- (warning_state and warning_state != notification_plan.warning_state) or \
- (ok_state and ok_state != notification_plan.ok_state)
-
- if should_delete:
- notification_plan.delete()
- should_create = True
- else:
- should_create = True
-
- if should_create:
- notification_plan = cm.create_notification_plan(label=label,
- critical_state=critical_state,
- warning_state=warning_state,
- ok_state=ok_state)
- changed = True
- else:
- for np in existing:
- np.delete()
- changed = True
-
- if notification_plan:
- notification_plan_dict = {
- "id": notification_plan.id,
- "critical_state": notification_plan.critical_state,
- "warning_state": notification_plan.warning_state,
- "ok_state": notification_plan.ok_state,
- "metadata": notification_plan.metadata
- }
- module.exit_json(changed=changed, notification_plan=notification_plan_dict)
- else:
- module.exit_json(changed=changed)
-
-
-def main():
- argument_spec = rax_argument_spec()
- argument_spec.update(
- dict(
- state=dict(default='present', choices=['present', 'absent']),
- label=dict(required=True),
- critical_state=dict(type='list', elements='str'),
- warning_state=dict(type='list', elements='str'),
- ok_state=dict(type='list', elements='str'),
- )
- )
-
- module = AnsibleModule(
- argument_spec=argument_spec,
- required_together=rax_required_together()
- )
-
- if not HAS_PYRAX:
- module.fail_json(msg='pyrax is required for this module')
-
- state = module.params.get('state')
-
- label = module.params.get('label')
- critical_state = module.params.get('critical_state')
- warning_state = module.params.get('warning_state')
- ok_state = module.params.get('ok_state')
-
- setup_rax_module(module, pyrax)
-
- notification_plan(module, state, label, critical_state, warning_state, ok_state)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_network.py b/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_network.py
deleted file mode 100644
index 146c08c8..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_network.py
+++ /dev/null
@@ -1,139 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: rax_network
-short_description: create / delete an isolated network in Rackspace Public Cloud
-description:
- - creates / deletes a Rackspace Public Cloud isolated network.
-options:
- state:
- type: str
- description:
- - Indicate desired state of the resource
- choices:
- - present
- - absent
- default: present
- label:
- type: str
- description:
- - Label (name) to give the network
- required: yes
- cidr:
- type: str
- description:
- - cidr of the network being created
-author:
- - "Christopher H. Laco (@claco)"
- - "Jesse Keating (@omgjlk)"
-extends_documentation_fragment:
-- community.general.rackspace.openstack
-
-'''
-
-EXAMPLES = '''
-- name: Build an Isolated Network
- gather_facts: False
-
- tasks:
- - name: Network create request
- local_action:
- module: rax_network
- credentials: ~/.raxpub
- label: my-net
- cidr: 192.168.3.0/24
- state: present
-'''
-
-try:
- import pyrax
- HAS_PYRAX = True
-except ImportError:
- HAS_PYRAX = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
-
-
-def cloud_network(module, state, label, cidr):
- changed = False
- network = None
- networks = []
-
- if not pyrax.cloud_networks:
- module.fail_json(msg='Failed to instantiate client. This '
- 'typically indicates an invalid region or an '
- 'incorrectly capitalized region name.')
-
- if state == 'present':
- if not cidr:
- module.fail_json(msg='missing required arguments: cidr')
-
- try:
- network = pyrax.cloud_networks.find_network_by_label(label)
- except pyrax.exceptions.NetworkNotFound:
- try:
- network = pyrax.cloud_networks.create(label, cidr=cidr)
- changed = True
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
-
- elif state == 'absent':
- try:
- network = pyrax.cloud_networks.find_network_by_label(label)
- network.delete()
- changed = True
- except pyrax.exceptions.NetworkNotFound:
- pass
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
-
- if network:
- instance = dict(id=network.id,
- label=network.label,
- cidr=network.cidr)
- networks.append(instance)
-
- module.exit_json(changed=changed, networks=networks)
-
-
-def main():
- argument_spec = rax_argument_spec()
- argument_spec.update(
- dict(
- state=dict(default='present',
- choices=['present', 'absent']),
- label=dict(required=True),
- cidr=dict()
- )
- )
-
- module = AnsibleModule(
- argument_spec=argument_spec,
- required_together=rax_required_together(),
- )
-
- if not HAS_PYRAX:
- module.fail_json(msg='pyrax is required for this module')
-
- state = module.params.get('state')
- label = module.params.get('label')
- cidr = module.params.get('cidr')
-
- setup_rax_module(module, pyrax)
-
- cloud_network(module, state, label, cidr)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_queue.py b/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_queue.py
deleted file mode 100644
index 46c942c7..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_queue.py
+++ /dev/null
@@ -1,140 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: rax_queue
-short_description: create / delete a queue in Rackspace Public Cloud
-description:
- - creates / deletes a Rackspace Public Cloud queue.
-options:
- name:
- type: str
- description:
- - Name to give the queue
- state:
- type: str
- description:
- - Indicate desired state of the resource
- choices:
- - present
- - absent
- default: present
-author:
- - "Christopher H. Laco (@claco)"
- - "Matt Martz (@sivel)"
-extends_documentation_fragment:
-- community.general.rackspace
-- community.general.rackspace.openstack
-
-'''
-
-EXAMPLES = '''
-- name: Build a Queue
- gather_facts: False
- hosts: local
- connection: local
- tasks:
- - name: Queue create request
- local_action:
- module: rax_queue
- credentials: ~/.raxpub
- name: my-queue
- region: DFW
- state: present
- register: my_queue
-'''
-
-try:
- import pyrax
- HAS_PYRAX = True
-except ImportError:
- HAS_PYRAX = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
-
-
-def cloud_queue(module, state, name):
- for arg in (state, name):
- if not arg:
- module.fail_json(msg='%s is required for rax_queue' % arg)
-
- changed = False
- queues = []
- instance = {}
-
- cq = pyrax.queues
- if not cq:
- module.fail_json(msg='Failed to instantiate client. This '
- 'typically indicates an invalid region or an '
- 'incorrectly capitalized region name.')
-
- for queue in cq.list():
- if name != queue.name:
- continue
-
- queues.append(queue)
-
- if len(queues) > 1:
- module.fail_json(msg='Multiple Queues were matched by name')
-
- if state == 'present':
- if not queues:
- try:
- queue = cq.create(name)
- changed = True
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
- else:
- queue = queues[0]
-
- instance = dict(name=queue.name)
- result = dict(changed=changed, queue=instance)
- module.exit_json(**result)
-
- elif state == 'absent':
- if queues:
- queue = queues[0]
- try:
- queue.delete()
- changed = True
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
-
- module.exit_json(changed=changed, queue=instance)
-
-
-def main():
- argument_spec = rax_argument_spec()
- argument_spec.update(
- dict(
- name=dict(),
- state=dict(default='present', choices=['present', 'absent']),
- )
- )
-
- module = AnsibleModule(
- argument_spec=argument_spec,
- required_together=rax_required_together()
- )
-
- if not HAS_PYRAX:
- module.fail_json(msg='pyrax is required for this module')
-
- name = module.params.get('name')
- state = module.params.get('state')
-
- setup_rax_module(module, pyrax)
-
- cloud_queue(module, state, name)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_scaling_group.py b/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_scaling_group.py
deleted file mode 100644
index 4080e4c6..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_scaling_group.py
+++ /dev/null
@@ -1,441 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: rax_scaling_group
-short_description: Manipulate Rackspace Cloud Autoscale Groups
-description:
- - Manipulate Rackspace Cloud Autoscale Groups
-options:
- config_drive:
- description:
- - Attach read-only configuration drive to server as label config-2
- type: bool
- default: 'no'
- cooldown:
- type: int
- description:
- - The period of time, in seconds, that must pass before any scaling can
- occur after the previous scaling. Must be an integer between 0 and
- 86400 (24 hrs).
- default: 300
- disk_config:
- type: str
- description:
- - Disk partitioning strategy
- - If not specified, it will fallback to C(auto).
- choices:
- - auto
- - manual
- files:
- type: dict
- description:
- - 'Files to insert into the instance. Hash of C(remotepath: localpath)'
- flavor:
- type: str
- description:
- - flavor to use for the instance
- required: true
- image:
- type: str
- description:
- - image to use for the instance. Can be an C(id), C(human_id) or C(name)
- required: true
- key_name:
- type: str
- description:
- - key pair to use on the instance
- loadbalancers:
- type: list
- elements: dict
- description:
- - List of load balancer C(id) and C(port) hashes
- max_entities:
- type: int
- description:
- - The maximum number of entities that are allowed in the scaling group.
- Must be an integer between 0 and 1000.
- required: true
- meta:
- type: dict
- description:
- - A hash of metadata to associate with the instance
- min_entities:
- type: int
- description:
- - The minimum number of entities that are allowed in the scaling group.
- Must be an integer between 0 and 1000.
- required: true
- name:
- type: str
- description:
- - Name to give the scaling group
- required: true
- networks:
- type: list
- elements: str
- description:
- - The network to attach to the instances. If specified, you must include
- ALL networks including the public and private interfaces. Can be C(id)
- or C(label).
- default:
- - public
- - private
- server_name:
- type: str
- description:
- - The base name for servers created by Autoscale
- required: true
- state:
- type: str
- description:
- - Indicate desired state of the resource
- choices:
- - present
- - absent
- default: present
- user_data:
- type: str
- description:
- - Data to be uploaded to the servers config drive. This option implies
- I(config_drive). Can be a file path or a string
- wait:
- description:
- - wait for the scaling group to finish provisioning the minimum amount of
- servers
- type: bool
- default: 'no'
- wait_timeout:
- type: int
- description:
- - how long before wait gives up, in seconds
- default: 300
-author: "Matt Martz (@sivel)"
-extends_documentation_fragment:
-- community.general.rackspace
-- community.general.rackspace.openstack
-
-'''
-
-EXAMPLES = '''
----
-- hosts: localhost
- gather_facts: false
- connection: local
- tasks:
- - community.general.rax_scaling_group:
- credentials: ~/.raxpub
- region: ORD
- cooldown: 300
- flavor: performance1-1
- image: bb02b1a3-bc77-4d17-ab5b-421d89850fca
- min_entities: 5
- max_entities: 10
- name: ASG Test
- server_name: asgtest
- loadbalancers:
- - id: 228385
- port: 80
- register: asg
-'''
-
-import base64
-import json
-import os
-import time
-
-try:
- import pyrax
- HAS_PYRAX = True
-except ImportError:
- HAS_PYRAX = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec, rax_find_image, rax_find_network,
- rax_required_together, rax_to_dict, setup_rax_module)
-from ansible.module_utils.six import string_types
-
-
-def rax_asg(module, cooldown=300, disk_config=None, files=None, flavor=None,
- image=None, key_name=None, loadbalancers=None, meta=None,
- min_entities=0, max_entities=0, name=None, networks=None,
- server_name=None, state='present', user_data=None,
- config_drive=False, wait=True, wait_timeout=300):
- files = {} if files is None else files
- loadbalancers = [] if loadbalancers is None else loadbalancers
- meta = {} if meta is None else meta
- networks = [] if networks is None else networks
-
- changed = False
-
- au = pyrax.autoscale
- if not au:
- module.fail_json(msg='Failed to instantiate clients. This '
- 'typically indicates an invalid region or an '
- 'incorrectly capitalized region name.')
-
- if user_data:
- config_drive = True
-
- if user_data and os.path.isfile(user_data):
- try:
- f = open(user_data)
- user_data = f.read()
- f.close()
- except Exception as e:
- module.fail_json(msg='Failed to load %s' % user_data)
-
- if state == 'present':
- # Normalize and ensure all metadata values are strings
- if meta:
- for k, v in meta.items():
- if isinstance(v, list):
- meta[k] = ','.join(['%s' % i for i in v])
- elif isinstance(v, dict):
- meta[k] = json.dumps(v)
- elif not isinstance(v, string_types):
- meta[k] = '%s' % v
-
- if image:
- image = rax_find_image(module, pyrax, image)
-
- nics = []
- if networks:
- for network in networks:
- nics.extend(rax_find_network(module, pyrax, network))
-
- for nic in nics:
- # pyrax is currently returning net-id, but we need uuid
- # this check makes this forward compatible for a time when
- # pyrax uses uuid instead
- if nic.get('net-id'):
- nic.update(uuid=nic['net-id'])
- del nic['net-id']
-
- # Handle the file contents
- personality = []
- if files:
- for rpath in files.keys():
- lpath = os.path.expanduser(files[rpath])
- try:
- f = open(lpath, 'r')
- personality.append({
- 'path': rpath,
- 'contents': f.read()
- })
- f.close()
- except Exception as e:
- module.fail_json(msg='Failed to load %s' % lpath)
-
- lbs = []
- if loadbalancers:
- for lb in loadbalancers:
- try:
- lb_id = int(lb.get('id'))
- except (ValueError, TypeError):
- module.fail_json(msg='Load balancer ID is not an integer: '
- '%s' % lb.get('id'))
- try:
- port = int(lb.get('port'))
- except (ValueError, TypeError):
- module.fail_json(msg='Load balancer port is not an '
- 'integer: %s' % lb.get('port'))
- if not lb_id or not port:
- continue
- lbs.append((lb_id, port))
-
- try:
- sg = au.find(name=name)
- except pyrax.exceptions.NoUniqueMatch as e:
- module.fail_json(msg='%s' % e.message)
- except pyrax.exceptions.NotFound:
- try:
- sg = au.create(name, cooldown=cooldown,
- min_entities=min_entities,
- max_entities=max_entities,
- launch_config_type='launch_server',
- server_name=server_name, image=image,
- flavor=flavor, disk_config=disk_config,
- metadata=meta, personality=personality,
- networks=nics, load_balancers=lbs,
- key_name=key_name, config_drive=config_drive,
- user_data=user_data)
- changed = True
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
-
- if not changed:
- # Scaling Group Updates
- group_args = {}
- if cooldown != sg.cooldown:
- group_args['cooldown'] = cooldown
-
- if min_entities != sg.min_entities:
- group_args['min_entities'] = min_entities
-
- if max_entities != sg.max_entities:
- group_args['max_entities'] = max_entities
-
- if group_args:
- changed = True
- sg.update(**group_args)
-
- # Launch Configuration Updates
- lc = sg.get_launch_config()
- lc_args = {}
- if server_name != lc.get('name'):
- lc_args['server_name'] = server_name
-
- if image != lc.get('image'):
- lc_args['image'] = image
-
- if flavor != lc.get('flavor'):
- lc_args['flavor'] = flavor
-
- disk_config = disk_config or 'AUTO'
- if ((disk_config or lc.get('disk_config')) and
- disk_config != lc.get('disk_config', 'AUTO')):
- lc_args['disk_config'] = disk_config
-
- if (meta or lc.get('meta')) and meta != lc.get('metadata'):
- lc_args['metadata'] = meta
-
- test_personality = []
- for p in personality:
- test_personality.append({
- 'path': p['path'],
- 'contents': base64.b64encode(p['contents'])
- })
- if ((test_personality or lc.get('personality')) and
- test_personality != lc.get('personality')):
- lc_args['personality'] = personality
-
- if nics != lc.get('networks'):
- lc_args['networks'] = nics
-
- if lbs != lc.get('load_balancers'):
- # Work around for https://github.com/rackspace/pyrax/pull/393
- lc_args['load_balancers'] = sg.manager._resolve_lbs(lbs)
-
- if key_name != lc.get('key_name'):
- lc_args['key_name'] = key_name
-
- if config_drive != lc.get('config_drive', False):
- lc_args['config_drive'] = config_drive
-
- if (user_data and
- base64.b64encode(user_data) != lc.get('user_data')):
- lc_args['user_data'] = user_data
-
- if lc_args:
- # Work around for https://github.com/rackspace/pyrax/pull/389
- if 'flavor' not in lc_args:
- lc_args['flavor'] = lc.get('flavor')
- changed = True
- sg.update_launch_config(**lc_args)
-
- sg.get()
-
- if wait:
- end_time = time.time() + wait_timeout
- infinite = wait_timeout == 0
- while infinite or time.time() < end_time:
- state = sg.get_state()
- if state["pending_capacity"] == 0:
- break
-
- time.sleep(5)
-
- module.exit_json(changed=changed, autoscale_group=rax_to_dict(sg))
-
- else:
- try:
- sg = au.find(name=name)
- sg.delete()
- changed = True
- except pyrax.exceptions.NotFound as e:
- sg = {}
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
-
- module.exit_json(changed=changed, autoscale_group=rax_to_dict(sg))
-
-
-def main():
- argument_spec = rax_argument_spec()
- argument_spec.update(
- dict(
- config_drive=dict(default=False, type='bool'),
- cooldown=dict(type='int', default=300),
- disk_config=dict(choices=['auto', 'manual']),
- files=dict(type='dict', default={}),
- flavor=dict(required=True),
- image=dict(required=True),
- key_name=dict(),
- loadbalancers=dict(type='list', elements='dict'),
- meta=dict(type='dict', default={}),
- min_entities=dict(type='int', required=True),
- max_entities=dict(type='int', required=True),
- name=dict(required=True),
- networks=dict(type='list', elements='str', default=['public', 'private']),
- server_name=dict(required=True),
- state=dict(default='present', choices=['present', 'absent']),
- user_data=dict(no_log=True),
- wait=dict(default=False, type='bool'),
- wait_timeout=dict(default=300, type='int'),
- )
- )
-
- module = AnsibleModule(
- argument_spec=argument_spec,
- required_together=rax_required_together(),
- )
-
- if not HAS_PYRAX:
- module.fail_json(msg='pyrax is required for this module')
-
- config_drive = module.params.get('config_drive')
- cooldown = module.params.get('cooldown')
- disk_config = module.params.get('disk_config')
- if disk_config:
- disk_config = disk_config.upper()
- files = module.params.get('files')
- flavor = module.params.get('flavor')
- image = module.params.get('image')
- key_name = module.params.get('key_name')
- loadbalancers = module.params.get('loadbalancers')
- meta = module.params.get('meta')
- min_entities = module.params.get('min_entities')
- max_entities = module.params.get('max_entities')
- name = module.params.get('name')
- networks = module.params.get('networks')
- server_name = module.params.get('server_name')
- state = module.params.get('state')
- user_data = module.params.get('user_data')
-
- if not 0 <= min_entities <= 1000 or not 0 <= max_entities <= 1000:
- module.fail_json(msg='min_entities and max_entities must be an '
- 'integer between 0 and 1000')
-
- if not 0 <= cooldown <= 86400:
- module.fail_json(msg='cooldown must be an integer between 0 and 86400')
-
- setup_rax_module(module, pyrax)
-
- rax_asg(module, cooldown=cooldown, disk_config=disk_config,
- files=files, flavor=flavor, image=image, meta=meta,
- key_name=key_name, loadbalancers=loadbalancers,
- min_entities=min_entities, max_entities=max_entities,
- name=name, networks=networks, server_name=server_name,
- state=state, config_drive=config_drive, user_data=user_data)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_scaling_policy.py b/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_scaling_policy.py
deleted file mode 100644
index be46bd62..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/rackspace/rax_scaling_policy.py
+++ /dev/null
@@ -1,287 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: rax_scaling_policy
-short_description: Manipulate Rackspace Cloud Autoscale Scaling Policy
-description:
- - Manipulate Rackspace Cloud Autoscale Scaling Policy
-options:
- at:
- type: str
- description:
- - The UTC time when this policy will be executed. The time must be
- formatted according to C(yyyy-MM-dd'T'HH:mm:ss.SSS) such as
- C(2013-05-19T08:07:08Z)
- change:
- type: int
- description:
- - The change, either as a number of servers or as a percentage, to make
- in the scaling group. If this is a percentage, you must set
- I(is_percent) to C(true) also.
- cron:
- type: str
- description:
- - The time when the policy will be executed, as a cron entry. For
- example, if this is parameter is set to C(1 0 * * *)
- cooldown:
- type: int
- description:
- - The period of time, in seconds, that must pass before any scaling can
- occur after the previous scaling. Must be an integer between 0 and
- 86400 (24 hrs).
- default: 300
- desired_capacity:
- type: int
- description:
- - The desired server capacity of the scaling the group; that is, how
- many servers should be in the scaling group.
- is_percent:
- description:
- - Whether the value in I(change) is a percent value
- default: false
- type: bool
- name:
- type: str
- description:
- - Name to give the policy
- required: true
- policy_type:
- type: str
- description:
- - The type of policy that will be executed for the current release.
- choices:
- - webhook
- - schedule
- required: true
- scaling_group:
- type: str
- description:
- - Name of the scaling group that this policy will be added to
- required: true
- state:
- type: str
- description:
- - Indicate desired state of the resource
- choices:
- - present
- - absent
- default: present
-author: "Matt Martz (@sivel)"
-extends_documentation_fragment:
-- community.general.rackspace
-- community.general.rackspace.openstack
-
-'''
-
-EXAMPLES = '''
----
-- hosts: localhost
- gather_facts: false
- connection: local
- tasks:
- - community.general.rax_scaling_policy:
- credentials: ~/.raxpub
- region: ORD
- at: '2013-05-19T08:07:08Z'
- change: 25
- cooldown: 300
- is_percent: true
- name: ASG Test Policy - at
- policy_type: schedule
- scaling_group: ASG Test
- register: asps_at
-
- - community.general.rax_scaling_policy:
- credentials: ~/.raxpub
- region: ORD
- cron: '1 0 * * *'
- change: 25
- cooldown: 300
- is_percent: true
- name: ASG Test Policy - cron
- policy_type: schedule
- scaling_group: ASG Test
- register: asp_cron
-
- - community.general.rax_scaling_policy:
- credentials: ~/.raxpub
- region: ORD
- cooldown: 300
- desired_capacity: 5
- name: ASG Test Policy - webhook
- policy_type: webhook
- scaling_group: ASG Test
- register: asp_webhook
-'''
-
-try:
- import pyrax
- HAS_PYRAX = True
-except ImportError:
- HAS_PYRAX = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.rax import (UUID, rax_argument_spec, rax_required_together, rax_to_dict,
- setup_rax_module)
-
-
-def rax_asp(module, at=None, change=0, cron=None, cooldown=300,
- desired_capacity=0, is_percent=False, name=None,
- policy_type=None, scaling_group=None, state='present'):
- changed = False
-
- au = pyrax.autoscale
- if not au:
- module.fail_json(msg='Failed to instantiate client. This '
- 'typically indicates an invalid region or an '
- 'incorrectly capitalized region name.')
-
- try:
- UUID(scaling_group)
- except ValueError:
- try:
- sg = au.find(name=scaling_group)
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
- else:
- try:
- sg = au.get(scaling_group)
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
-
- if state == 'present':
- policies = filter(lambda p: name == p.name, sg.list_policies())
- if len(policies) > 1:
- module.fail_json(msg='No unique policy match found by name')
- if at:
- args = dict(at=at)
- elif cron:
- args = dict(cron=cron)
- else:
- args = None
-
- if not policies:
- try:
- policy = sg.add_policy(name, policy_type=policy_type,
- cooldown=cooldown, change=change,
- is_percent=is_percent,
- desired_capacity=desired_capacity,
- args=args)
- changed = True
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
-
- else:
- policy = policies[0]
- kwargs = {}
- if policy_type != policy.type:
- kwargs['policy_type'] = policy_type
-
- if cooldown != policy.cooldown:
- kwargs['cooldown'] = cooldown
-
- if hasattr(policy, 'change') and change != policy.change:
- kwargs['change'] = change
-
- if hasattr(policy, 'changePercent') and is_percent is False:
- kwargs['change'] = change
- kwargs['is_percent'] = False
- elif hasattr(policy, 'change') and is_percent is True:
- kwargs['change'] = change
- kwargs['is_percent'] = True
-
- if hasattr(policy, 'desiredCapacity') and change:
- kwargs['change'] = change
- elif ((hasattr(policy, 'change') or
- hasattr(policy, 'changePercent')) and desired_capacity):
- kwargs['desired_capacity'] = desired_capacity
-
- if hasattr(policy, 'args') and args != policy.args:
- kwargs['args'] = args
-
- if kwargs:
- policy.update(**kwargs)
- changed = True
-
- policy.get()
-
- module.exit_json(changed=changed, autoscale_policy=rax_to_dict(policy))
-
- else:
- try:
- policies = filter(lambda p: name == p.name, sg.list_policies())
- if len(policies) > 1:
- module.fail_json(msg='No unique policy match found by name')
- elif not policies:
- policy = {}
- else:
- policy.delete()
- changed = True
- except Exception as e:
- module.fail_json(msg='%s' % e.message)
-
- module.exit_json(changed=changed, autoscale_policy=rax_to_dict(policy))
-
-
-def main():
- argument_spec = rax_argument_spec()
- argument_spec.update(
- dict(
- at=dict(),
- change=dict(type='int'),
- cron=dict(),
- cooldown=dict(type='int', default=300),
- desired_capacity=dict(type='int'),
- is_percent=dict(type='bool', default=False),
- name=dict(required=True),
- policy_type=dict(required=True, choices=['webhook', 'schedule']),
- scaling_group=dict(required=True),
- state=dict(default='present', choices=['present', 'absent']),
- )
- )
-
- module = AnsibleModule(
- argument_spec=argument_spec,
- required_together=rax_required_together(),
- mutually_exclusive=[
- ['cron', 'at'],
- ['change', 'desired_capacity'],
- ]
- )
-
- if not HAS_PYRAX:
- module.fail_json(msg='pyrax is required for this module')
-
- at = module.params.get('at')
- change = module.params.get('change')
- cron = module.params.get('cron')
- cooldown = module.params.get('cooldown')
- desired_capacity = module.params.get('desired_capacity')
- is_percent = module.params.get('is_percent')
- name = module.params.get('name')
- policy_type = module.params.get('policy_type')
- scaling_group = module.params.get('scaling_group')
- state = module.params.get('state')
-
- if (at or cron) and policy_type == 'webhook':
- module.fail_json(msg='policy_type=schedule is required for a time '
- 'based policy')
-
- setup_rax_module(module, pyrax)
-
- rax_asp(module, at=at, change=change, cron=cron, cooldown=cooldown,
- desired_capacity=desired_capacity, is_percent=is_percent,
- name=name, policy_type=policy_type, scaling_group=scaling_group,
- state=state)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_compute.py b/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_compute.py
deleted file mode 100644
index a195d7fb..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_compute.py
+++ /dev/null
@@ -1,693 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Scaleway Compute management module
-#
-# Copyright (C) 2018 Online SAS.
-# https://www.scaleway.com
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-
-__metaclass__ = type
-
-DOCUMENTATION = '''
----
-module: scaleway_compute
-short_description: Scaleway compute management module
-author: Remy Leone (@remyleone)
-description:
- - "This module manages compute instances on Scaleway."
-extends_documentation_fragment:
-- community.general.scaleway
-
-
-options:
-
- public_ip:
- type: str
- description:
- - Manage public IP on a Scaleway server
- - Could be Scaleway IP address UUID
- - C(dynamic) Means that IP is destroyed at the same time the host is destroyed
- - C(absent) Means no public IP at all
- default: absent
-
- enable_ipv6:
- description:
- - Enable public IPv6 connectivity on the instance
- default: false
- type: bool
-
- image:
- type: str
- description:
- - Image identifier used to start the instance with
- required: true
-
- name:
- type: str
- description:
- - Name of the instance
-
- organization:
- type: str
- description:
- - Organization identifier.
- - Exactly one of I(project) and I(organization) must be specified.
-
- project:
- type: str
- description:
- - Project identifier.
- - Exactly one of I(project) and I(organization) must be specified.
- version_added: 4.3.0
-
- state:
- type: str
- description:
- - Indicate desired state of the instance.
- default: present
- choices:
- - present
- - absent
- - running
- - restarted
- - stopped
-
- tags:
- type: list
- elements: str
- description:
- - List of tags to apply to the instance (5 max)
- required: false
- default: []
-
- region:
- type: str
- description:
- - Scaleway compute zone
- required: true
- choices:
- - ams1
- - EMEA-NL-EVS
- - par1
- - EMEA-FR-PAR1
- - par2
- - EMEA-FR-PAR2
- - waw1
- - EMEA-PL-WAW1
-
- commercial_type:
- type: str
- description:
- - Commercial name of the compute node
- required: true
-
- wait:
- description:
- - Wait for the instance to reach its desired state before returning.
- type: bool
- default: 'no'
-
- wait_timeout:
- type: int
- description:
- - Time to wait for the server to reach the expected state
- required: false
- default: 300
-
- wait_sleep_time:
- type: int
- description:
- - Time to wait before every attempt to check the state of the server
- required: false
- default: 3
-
- security_group:
- type: str
- description:
- - Security group unique identifier
- - If no value provided, the default security group or current security group will be used
- required: false
-'''
-
-EXAMPLES = '''
-- name: Create a server
- community.general.scaleway_compute:
- name: foobar
- state: present
- image: 89ee4018-f8c3-4dc4-a6b5-bca14f985ebe
- project: 951df375-e094-4d26-97c1-ba548eeb9c42
- region: ams1
- commercial_type: VC1S
- tags:
- - test
- - www
-
-- name: Create a server attached to a security group
- community.general.scaleway_compute:
- name: foobar
- state: present
- image: 89ee4018-f8c3-4dc4-a6b5-bca14f985ebe
- project: 951df375-e094-4d26-97c1-ba548eeb9c42
- region: ams1
- commercial_type: VC1S
- security_group: 4a31b633-118e-4900-bd52-facf1085fc8d
- tags:
- - test
- - www
-
-- name: Destroy it right after
- community.general.scaleway_compute:
- name: foobar
- state: absent
- image: 89ee4018-f8c3-4dc4-a6b5-bca14f985ebe
- project: 951df375-e094-4d26-97c1-ba548eeb9c42
- region: ams1
- commercial_type: VC1S
-'''
-
-RETURN = '''
-'''
-
-import datetime
-import time
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.six.moves.urllib.parse import quote as urlquote
-from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway
-
-SCALEWAY_SERVER_STATES = (
- 'stopped',
- 'stopping',
- 'starting',
- 'running',
- 'locked'
-)
-
-SCALEWAY_TRANSITIONS_STATES = (
- "stopping",
- "starting",
- "pending"
-)
-
-
-def check_image_id(compute_api, image_id):
- response = compute_api.get(path="images/%s" % image_id)
-
- if not response.ok:
- msg = 'Error in getting image %s on %s : %s' % (image_id, compute_api.module.params.get('api_url'), response.json)
- compute_api.module.fail_json(msg=msg)
-
-
-def fetch_state(compute_api, server):
- compute_api.module.debug("fetch_state of server: %s" % server["id"])
- response = compute_api.get(path="servers/%s" % server["id"])
-
- if response.status_code == 404:
- return "absent"
-
- if not response.ok:
- msg = 'Error during state fetching: (%s) %s' % (response.status_code, response.json)
- compute_api.module.fail_json(msg=msg)
-
- try:
- compute_api.module.debug("Server %s in state: %s" % (server["id"], response.json["server"]["state"]))
- return response.json["server"]["state"]
- except KeyError:
- compute_api.module.fail_json(msg="Could not fetch state in %s" % response.json)
-
-
-def wait_to_complete_state_transition(compute_api, server, wait=None):
- if wait is None:
- wait = compute_api.module.params["wait"]
- if not wait:
- return
-
- wait_timeout = compute_api.module.params["wait_timeout"]
- wait_sleep_time = compute_api.module.params["wait_sleep_time"]
-
- start = datetime.datetime.utcnow()
- end = start + datetime.timedelta(seconds=wait_timeout)
- while datetime.datetime.utcnow() < end:
- compute_api.module.debug("We are going to wait for the server to finish its transition")
- if fetch_state(compute_api, server) not in SCALEWAY_TRANSITIONS_STATES:
- compute_api.module.debug("It seems that the server is not in transition anymore.")
- compute_api.module.debug("Server in state: %s" % fetch_state(compute_api, server))
- break
- time.sleep(wait_sleep_time)
- else:
- compute_api.module.fail_json(msg="Server takes too long to finish its transition")
-
-
-def public_ip_payload(compute_api, public_ip):
- # We don't want a public ip
- if public_ip in ("absent",):
- return {"dynamic_ip_required": False}
-
- # IP is only attached to the instance and is released as soon as the instance terminates
- if public_ip in ("dynamic", "allocated"):
- return {"dynamic_ip_required": True}
-
- # We check that the IP we want to attach exists, if so its ID is returned
- response = compute_api.get("ips")
- if not response.ok:
- msg = 'Error during public IP validation: (%s) %s' % (response.status_code, response.json)
- compute_api.module.fail_json(msg=msg)
-
- ip_list = []
- try:
- ip_list = response.json["ips"]
- except KeyError:
- compute_api.module.fail_json(msg="Error in getting the IP information from: %s" % response.json)
-
- lookup = [ip["id"] for ip in ip_list]
- if public_ip in lookup:
- return {"public_ip": public_ip}
-
-
-def create_server(compute_api, server):
- compute_api.module.debug("Starting a create_server")
- target_server = None
- data = {"enable_ipv6": server["enable_ipv6"],
- "tags": server["tags"],
- "commercial_type": server["commercial_type"],
- "image": server["image"],
- "dynamic_ip_required": server["dynamic_ip_required"],
- "name": server["name"]
- }
-
- if server["project"]:
- data["project"] = server["project"]
-
- if server["organization"]:
- data["organization"] = server["organization"]
-
- if server["security_group"]:
- data["security_group"] = server["security_group"]
-
- response = compute_api.post(path="servers", data=data)
-
- if not response.ok:
- msg = 'Error during server creation: (%s) %s' % (response.status_code, response.json)
- compute_api.module.fail_json(msg=msg)
-
- try:
- target_server = response.json["server"]
- except KeyError:
- compute_api.module.fail_json(msg="Error in getting the server information from: %s" % response.json)
-
- wait_to_complete_state_transition(compute_api=compute_api, server=target_server)
-
- return target_server
-
-
-def restart_server(compute_api, server):
- return perform_action(compute_api=compute_api, server=server, action="reboot")
-
-
-def stop_server(compute_api, server):
- return perform_action(compute_api=compute_api, server=server, action="poweroff")
-
-
-def start_server(compute_api, server):
- return perform_action(compute_api=compute_api, server=server, action="poweron")
-
-
-def perform_action(compute_api, server, action):
- response = compute_api.post(path="servers/%s/action" % server["id"],
- data={"action": action})
- if not response.ok:
- msg = 'Error during server %s: (%s) %s' % (action, response.status_code, response.json)
- compute_api.module.fail_json(msg=msg)
-
- wait_to_complete_state_transition(compute_api=compute_api, server=server)
-
- return response
-
-
-def remove_server(compute_api, server):
- compute_api.module.debug("Starting remove server strategy")
- response = compute_api.delete(path="servers/%s" % server["id"])
- if not response.ok:
- msg = 'Error during server deletion: (%s) %s' % (response.status_code, response.json)
- compute_api.module.fail_json(msg=msg)
-
- wait_to_complete_state_transition(compute_api=compute_api, server=server)
-
- return response
-
-
-def present_strategy(compute_api, wished_server):
- compute_api.module.debug("Starting present strategy")
- changed = False
- query_results = find(compute_api=compute_api, wished_server=wished_server, per_page=1)
-
- if not query_results:
- changed = True
- if compute_api.module.check_mode:
- return changed, {"status": "A server would be created."}
-
- target_server = create_server(compute_api=compute_api, server=wished_server)
- else:
- target_server = query_results[0]
-
- if server_attributes_should_be_changed(compute_api=compute_api, target_server=target_server,
- wished_server=wished_server):
- changed = True
-
- if compute_api.module.check_mode:
- return changed, {"status": "Server %s attributes would be changed." % target_server["id"]}
-
- target_server = server_change_attributes(compute_api=compute_api, target_server=target_server, wished_server=wished_server)
-
- return changed, target_server
-
-
-def absent_strategy(compute_api, wished_server):
- compute_api.module.debug("Starting absent strategy")
- changed = False
- target_server = None
- query_results = find(compute_api=compute_api, wished_server=wished_server, per_page=1)
-
- if not query_results:
- return changed, {"status": "Server already absent."}
- else:
- target_server = query_results[0]
-
- changed = True
-
- if compute_api.module.check_mode:
- return changed, {"status": "Server %s would be made absent." % target_server["id"]}
-
- # A server MUST be stopped to be deleted.
- while fetch_state(compute_api=compute_api, server=target_server) != "stopped":
- wait_to_complete_state_transition(compute_api=compute_api, server=target_server, wait=True)
- response = stop_server(compute_api=compute_api, server=target_server)
-
- if not response.ok:
- err_msg = 'Error while stopping a server before removing it [{0}: {1}]'.format(response.status_code,
- response.json)
- compute_api.module.fail_json(msg=err_msg)
-
- wait_to_complete_state_transition(compute_api=compute_api, server=target_server, wait=True)
-
- response = remove_server(compute_api=compute_api, server=target_server)
-
- if not response.ok:
- err_msg = 'Error while removing server [{0}: {1}]'.format(response.status_code, response.json)
- compute_api.module.fail_json(msg=err_msg)
-
- return changed, {"status": "Server %s deleted" % target_server["id"]}
-
-
-def running_strategy(compute_api, wished_server):
- compute_api.module.debug("Starting running strategy")
- changed = False
- query_results = find(compute_api=compute_api, wished_server=wished_server, per_page=1)
-
- if not query_results:
- changed = True
- if compute_api.module.check_mode:
- return changed, {"status": "A server would be created before being run."}
-
- target_server = create_server(compute_api=compute_api, server=wished_server)
- else:
- target_server = query_results[0]
-
- if server_attributes_should_be_changed(compute_api=compute_api, target_server=target_server,
- wished_server=wished_server):
- changed = True
-
- if compute_api.module.check_mode:
- return changed, {"status": "Server %s attributes would be changed before running it." % target_server["id"]}
-
- target_server = server_change_attributes(compute_api=compute_api, target_server=target_server, wished_server=wished_server)
-
- current_state = fetch_state(compute_api=compute_api, server=target_server)
- if current_state not in ("running", "starting"):
- compute_api.module.debug("running_strategy: Server in state: %s" % current_state)
- changed = True
-
- if compute_api.module.check_mode:
- return changed, {"status": "Server %s attributes would be changed." % target_server["id"]}
-
- response = start_server(compute_api=compute_api, server=target_server)
- if not response.ok:
- msg = 'Error while running server [{0}: {1}]'.format(response.status_code, response.json)
- compute_api.module.fail_json(msg=msg)
-
- return changed, target_server
-
-
-def stop_strategy(compute_api, wished_server):
- compute_api.module.debug("Starting stop strategy")
- query_results = find(compute_api=compute_api, wished_server=wished_server, per_page=1)
-
- changed = False
-
- if not query_results:
-
- if compute_api.module.check_mode:
- return changed, {"status": "A server would be created before being stopped."}
-
- target_server = create_server(compute_api=compute_api, server=wished_server)
- changed = True
- else:
- target_server = query_results[0]
-
- compute_api.module.debug("stop_strategy: Servers are found.")
-
- if server_attributes_should_be_changed(compute_api=compute_api, target_server=target_server,
- wished_server=wished_server):
- changed = True
-
- if compute_api.module.check_mode:
- return changed, {
- "status": "Server %s attributes would be changed before stopping it." % target_server["id"]}
-
- target_server = server_change_attributes(compute_api=compute_api, target_server=target_server, wished_server=wished_server)
-
- wait_to_complete_state_transition(compute_api=compute_api, server=target_server)
-
- current_state = fetch_state(compute_api=compute_api, server=target_server)
- if current_state not in ("stopped",):
- compute_api.module.debug("stop_strategy: Server in state: %s" % current_state)
-
- changed = True
-
- if compute_api.module.check_mode:
- return changed, {"status": "Server %s would be stopped." % target_server["id"]}
-
- response = stop_server(compute_api=compute_api, server=target_server)
- compute_api.module.debug(response.json)
- compute_api.module.debug(response.ok)
-
- if not response.ok:
- msg = 'Error while stopping server [{0}: {1}]'.format(response.status_code, response.json)
- compute_api.module.fail_json(msg=msg)
-
- return changed, target_server
-
-
-def restart_strategy(compute_api, wished_server):
- compute_api.module.debug("Starting restart strategy")
- changed = False
- query_results = find(compute_api=compute_api, wished_server=wished_server, per_page=1)
-
- if not query_results:
- changed = True
- if compute_api.module.check_mode:
- return changed, {"status": "A server would be created before being rebooted."}
-
- target_server = create_server(compute_api=compute_api, server=wished_server)
- else:
- target_server = query_results[0]
-
- if server_attributes_should_be_changed(compute_api=compute_api,
- target_server=target_server,
- wished_server=wished_server):
- changed = True
-
- if compute_api.module.check_mode:
- return changed, {
- "status": "Server %s attributes would be changed before rebooting it." % target_server["id"]}
-
- target_server = server_change_attributes(compute_api=compute_api, target_server=target_server, wished_server=wished_server)
-
- changed = True
- if compute_api.module.check_mode:
- return changed, {"status": "Server %s would be rebooted." % target_server["id"]}
-
- wait_to_complete_state_transition(compute_api=compute_api, server=target_server)
-
- if fetch_state(compute_api=compute_api, server=target_server) in ("running",):
- response = restart_server(compute_api=compute_api, server=target_server)
- wait_to_complete_state_transition(compute_api=compute_api, server=target_server)
- if not response.ok:
- msg = 'Error while restarting server that was running [{0}: {1}].'.format(response.status_code,
- response.json)
- compute_api.module.fail_json(msg=msg)
-
- if fetch_state(compute_api=compute_api, server=target_server) in ("stopped",):
- response = restart_server(compute_api=compute_api, server=target_server)
- wait_to_complete_state_transition(compute_api=compute_api, server=target_server)
- if not response.ok:
- msg = 'Error while restarting server that was stopped [{0}: {1}].'.format(response.status_code,
- response.json)
- compute_api.module.fail_json(msg=msg)
-
- return changed, target_server
-
-
-state_strategy = {
- "present": present_strategy,
- "restarted": restart_strategy,
- "stopped": stop_strategy,
- "running": running_strategy,
- "absent": absent_strategy
-}
-
-
-def find(compute_api, wished_server, per_page=1):
- compute_api.module.debug("Getting inside find")
- # Only the name attribute is accepted in the Compute query API
- response = compute_api.get("servers", params={"name": wished_server["name"],
- "per_page": per_page})
-
- if not response.ok:
- msg = 'Error during server search: (%s) %s' % (response.status_code, response.json)
- compute_api.module.fail_json(msg=msg)
-
- search_results = response.json["servers"]
-
- return search_results
-
-
-PATCH_MUTABLE_SERVER_ATTRIBUTES = (
- "ipv6",
- "tags",
- "name",
- "dynamic_ip_required",
- "security_group",
-)
-
-
-def server_attributes_should_be_changed(compute_api, target_server, wished_server):
- compute_api.module.debug("Checking if server attributes should be changed")
- compute_api.module.debug("Current Server: %s" % target_server)
- compute_api.module.debug("Wished Server: %s" % wished_server)
- debug_dict = dict((x, (target_server[x], wished_server[x]))
- for x in PATCH_MUTABLE_SERVER_ATTRIBUTES
- if x in target_server and x in wished_server)
- compute_api.module.debug("Debug dict %s" % debug_dict)
- try:
- for key in PATCH_MUTABLE_SERVER_ATTRIBUTES:
- if key in target_server and key in wished_server:
- # When you are working with dict, only ID matter as we ask user to put only the resource ID in the playbook
- if isinstance(target_server[key], dict) and wished_server[key] and "id" in target_server[key].keys(
- ) and target_server[key]["id"] != wished_server[key]:
- return True
- # Handling other structure compare simply the two objects content
- elif not isinstance(target_server[key], dict) and target_server[key] != wished_server[key]:
- return True
- return False
- except AttributeError:
- compute_api.module.fail_json(msg="Error while checking if attributes should be changed")
-
-
-def server_change_attributes(compute_api, target_server, wished_server):
- compute_api.module.debug("Starting patching server attributes")
- patch_payload = dict()
-
- for key in PATCH_MUTABLE_SERVER_ATTRIBUTES:
- if key in target_server and key in wished_server:
- # When you are working with dict, only ID matter as we ask user to put only the resource ID in the playbook
- if isinstance(target_server[key], dict) and "id" in target_server[key] and wished_server[key]:
- # Setting all key to current value except ID
- key_dict = dict((x, target_server[key][x]) for x in target_server[key].keys() if x != "id")
- # Setting ID to the user specified ID
- key_dict["id"] = wished_server[key]
- patch_payload[key] = key_dict
- elif not isinstance(target_server[key], dict):
- patch_payload[key] = wished_server[key]
-
- response = compute_api.patch(path="servers/%s" % target_server["id"],
- data=patch_payload)
- if not response.ok:
- msg = 'Error during server attributes patching: (%s) %s' % (response.status_code, response.json)
- compute_api.module.fail_json(msg=msg)
-
- try:
- target_server = response.json["server"]
- except KeyError:
- compute_api.module.fail_json(msg="Error in getting the server information from: %s" % response.json)
-
- wait_to_complete_state_transition(compute_api=compute_api, server=target_server)
-
- return target_server
-
-
-def core(module):
- region = module.params["region"]
- wished_server = {
- "state": module.params["state"],
- "image": module.params["image"],
- "name": module.params["name"],
- "commercial_type": module.params["commercial_type"],
- "enable_ipv6": module.params["enable_ipv6"],
- "tags": module.params["tags"],
- "organization": module.params["organization"],
- "project": module.params["project"],
- "security_group": module.params["security_group"]
- }
- module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
-
- compute_api = Scaleway(module=module)
-
- check_image_id(compute_api, wished_server["image"])
-
- # IP parameters of the wished server depends on the configuration
- ip_payload = public_ip_payload(compute_api=compute_api, public_ip=module.params["public_ip"])
- wished_server.update(ip_payload)
-
- changed, summary = state_strategy[wished_server["state"]](compute_api=compute_api, wished_server=wished_server)
- module.exit_json(changed=changed, msg=summary)
-
-
-def main():
- argument_spec = scaleway_argument_spec()
- argument_spec.update(dict(
- image=dict(required=True),
- name=dict(),
- region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
- commercial_type=dict(required=True),
- enable_ipv6=dict(default=False, type="bool"),
- public_ip=dict(default="absent"),
- state=dict(choices=list(state_strategy.keys()), default='present'),
- tags=dict(type="list", elements="str", default=[]),
- organization=dict(),
- project=dict(),
- wait=dict(type="bool", default=False),
- wait_timeout=dict(type="int", default=300),
- wait_sleep_time=dict(type="int", default=3),
- security_group=dict(),
- ))
- module = AnsibleModule(
- argument_spec=argument_spec,
- supports_check_mode=True,
- mutually_exclusive=[
- ('organization', 'project'),
- ],
- required_one_of=[
- ('organization', 'project'),
- ],
- )
-
- core(module)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_database_backup.py b/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_database_backup.py
deleted file mode 100644
index 35f35f82..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_database_backup.py
+++ /dev/null
@@ -1,372 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Scaleway database backups management module
-#
-# Copyright (C) 2020 Guillaume Rodriguez (g.rodriguez@opendecide.com).
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-
-__metaclass__ = type
-
-DOCUMENTATION = '''
----
-module: scaleway_database_backup
-short_description: Scaleway database backups management module
-version_added: 1.2.0
-author: Guillaume Rodriguez (@guillaume_ro_fr)
-description:
- - This module manages database backups on Scaleway account U(https://developer.scaleway.com).
-extends_documentation_fragment:
- - community.general.scaleway
-options:
- state:
- description:
- - Indicate desired state of the database backup.
- - C(present) creates a backup.
- - C(absent) deletes the backup.
- - C(exported) creates a download link for the backup.
- - C(restored) restores the backup to a new database.
- type: str
- default: present
- choices:
- - present
- - absent
- - exported
- - restored
-
- region:
- description:
- - Scaleway region to use (for example C(fr-par)).
- type: str
- required: true
- choices:
- - fr-par
- - nl-ams
- - pl-waw
-
- id:
- description:
- - UUID used to identify the database backup.
- - Required for C(absent), C(exported) and C(restored) states.
- type: str
-
- name:
- description:
- - Name used to identify the database backup.
- - Required for C(present) state.
- - Ignored when C(state=absent), C(state=exported) or C(state=restored).
- type: str
- required: false
-
- database_name:
- description:
- - Name used to identify the database.
- - Required for C(present) and C(restored) states.
- - Ignored when C(state=absent) or C(state=exported).
- type: str
- required: false
-
- instance_id:
- description:
- - UUID of the instance associated to the database backup.
- - Required for C(present) and C(restored) states.
- - Ignored when C(state=absent) or C(state=exported).
- type: str
- required: false
-
- expires_at:
- description:
- - Expiration datetime of the database backup (ISO 8601 format).
- - Ignored when C(state=absent), C(state=exported) or C(state=restored).
- type: str
- required: false
-
- wait:
- description:
- - Wait for the instance to reach its desired state before returning.
- type: bool
- default: false
-
- wait_timeout:
- description:
- - Time to wait for the backup to reach the expected state.
- type: int
- required: false
- default: 300
-
- wait_sleep_time:
- description:
- - Time to wait before every attempt to check the state of the backup.
- type: int
- required: false
- default: 3
-'''
-
-EXAMPLES = '''
- - name: Create a backup
- community.general.scaleway_database_backup:
- name: 'my_backup'
- state: present
- region: 'fr-par'
- database_name: 'my-database'
- instance_id: '50968a80-2909-4e5c-b1af-a2e19860dddb'
-
- - name: Export a backup
- community.general.scaleway_database_backup:
- id: '6ef1125a-037e-494f-a911-6d9c49a51691'
- state: exported
- region: 'fr-par'
-
- - name: Restore a backup
- community.general.scaleway_database_backup:
- id: '6ef1125a-037e-494f-a911-6d9c49a51691'
- state: restored
- region: 'fr-par'
- database_name: 'my-new-database'
- instance_id: '50968a80-2909-4e5c-b1af-a2e19860dddb'
-
- - name: Remove a backup
- community.general.scaleway_database_backup:
- id: '6ef1125a-037e-494f-a911-6d9c49a51691'
- state: absent
- region: 'fr-par'
-'''
-
-RETURN = '''
-metadata:
- description: Backup metadata.
- returned: when C(state=present), C(state=exported) or C(state=restored)
- type: dict
- sample: {
- "metadata": {
- "created_at": "2020-08-06T12:42:05.631049Z",
- "database_name": "my-database",
- "download_url": null,
- "download_url_expires_at": null,
- "expires_at": null,
- "id": "a15297bd-0c4a-4b4f-8fbb-b36a35b7eb07",
- "instance_id": "617be32e-6497-4ed7-b4c7-0ee5a81edf49",
- "instance_name": "my-instance",
- "name": "backup_name",
- "region": "fr-par",
- "size": 600000,
- "status": "ready",
- "updated_at": "2020-08-06T12:42:10.581649Z"
- }
- }
-'''
-
-import datetime
-import time
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.scaleway import (
- Scaleway,
- scaleway_argument_spec,
- SCALEWAY_REGIONS,
-)
-
-stable_states = (
- 'ready',
- 'deleting',
-)
-
-
-def wait_to_complete_state_transition(module, account_api, backup=None):
- wait_timeout = module.params['wait_timeout']
- wait_sleep_time = module.params['wait_sleep_time']
-
- if backup is None or backup['status'] in stable_states:
- return backup
-
- start = datetime.datetime.utcnow()
- end = start + datetime.timedelta(seconds=wait_timeout)
- while datetime.datetime.utcnow() < end:
- module.debug('We are going to wait for the backup to finish its transition')
-
- response = account_api.get('/rdb/v1/regions/%s/backups/%s' % (module.params.get('region'), backup['id']))
- if not response.ok:
- module.fail_json(msg='Error getting backup [{0}: {1}]'.format(response.status_code, response.json))
- break
- response_json = response.json
-
- if response_json['status'] in stable_states:
- module.debug('It seems that the backup is not in transition anymore.')
- module.debug('Backup in state: %s' % response_json['status'])
- return response_json
- time.sleep(wait_sleep_time)
- else:
- module.fail_json(msg='Backup takes too long to finish its transition')
-
-
-def present_strategy(module, account_api, backup):
- name = module.params['name']
- database_name = module.params['database_name']
- instance_id = module.params['instance_id']
- expiration_date = module.params['expires_at']
-
- if backup is not None:
- if (backup['name'] == name or name is None) and (
- backup['expires_at'] == expiration_date or expiration_date is None):
- wait_to_complete_state_transition(module, account_api, backup)
- module.exit_json(changed=False)
-
- if module.check_mode:
- module.exit_json(changed=True)
-
- payload = {}
- if name is not None:
- payload['name'] = name
- if expiration_date is not None:
- payload['expires_at'] = expiration_date
-
- response = account_api.patch('/rdb/v1/regions/%s/backups/%s' % (module.params.get('region'), backup['id']),
- payload)
- if response.ok:
- result = wait_to_complete_state_transition(module, account_api, response.json)
- module.exit_json(changed=True, metadata=result)
-
- module.fail_json(msg='Error modifying backup [{0}: {1}]'.format(response.status_code, response.json))
-
- if module.check_mode:
- module.exit_json(changed=True)
-
- payload = {'name': name, 'database_name': database_name, 'instance_id': instance_id}
- if expiration_date is not None:
- payload['expires_at'] = expiration_date
-
- response = account_api.post('/rdb/v1/regions/%s/backups' % module.params.get('region'), payload)
-
- if response.ok:
- result = wait_to_complete_state_transition(module, account_api, response.json)
- module.exit_json(changed=True, metadata=result)
-
- module.fail_json(msg='Error creating backup [{0}: {1}]'.format(response.status_code, response.json))
-
-
-def absent_strategy(module, account_api, backup):
- if backup is None:
- module.exit_json(changed=False)
-
- if module.check_mode:
- module.exit_json(changed=True)
-
- response = account_api.delete('/rdb/v1/regions/%s/backups/%s' % (module.params.get('region'), backup['id']))
- if response.ok:
- result = wait_to_complete_state_transition(module, account_api, response.json)
- module.exit_json(changed=True, metadata=result)
-
- module.fail_json(msg='Error deleting backup [{0}: {1}]'.format(response.status_code, response.json))
-
-
-def exported_strategy(module, account_api, backup):
- if backup is None:
- module.fail_json(msg=('Backup "%s" not found' % module.params['id']))
-
- if backup['download_url'] is not None:
- module.exit_json(changed=False, metadata=backup)
-
- if module.check_mode:
- module.exit_json(changed=True)
-
- backup = wait_to_complete_state_transition(module, account_api, backup)
- response = account_api.post(
- '/rdb/v1/regions/%s/backups/%s/export' % (module.params.get('region'), backup['id']), {})
-
- if response.ok:
- result = wait_to_complete_state_transition(module, account_api, response.json)
- module.exit_json(changed=True, metadata=result)
-
- module.fail_json(msg='Error exporting backup [{0}: {1}]'.format(response.status_code, response.json))
-
-
-def restored_strategy(module, account_api, backup):
- if backup is None:
- module.fail_json(msg=('Backup "%s" not found' % module.params['id']))
-
- database_name = module.params['database_name']
- instance_id = module.params['instance_id']
-
- if module.check_mode:
- module.exit_json(changed=True)
-
- backup = wait_to_complete_state_transition(module, account_api, backup)
-
- payload = {'database_name': database_name, 'instance_id': instance_id}
- response = account_api.post('/rdb/v1/regions/%s/backups/%s/restore' % (module.params.get('region'), backup['id']),
- payload)
-
- if response.ok:
- result = wait_to_complete_state_transition(module, account_api, response.json)
- module.exit_json(changed=True, metadata=result)
-
- module.fail_json(msg='Error restoring backup [{0}: {1}]'.format(response.status_code, response.json))
-
-
-state_strategy = {
- 'present': present_strategy,
- 'absent': absent_strategy,
- 'exported': exported_strategy,
- 'restored': restored_strategy,
-}
-
-
-def core(module):
- state = module.params['state']
- backup_id = module.params['id']
-
- account_api = Scaleway(module)
-
- if backup_id is None:
- backup_by_id = None
- else:
- response = account_api.get('/rdb/v1/regions/%s/backups/%s' % (module.params.get('region'), backup_id))
- status_code = response.status_code
- backup_json = response.json
- backup_by_id = None
- if status_code == 404:
- backup_by_id = None
- elif response.ok:
- backup_by_id = backup_json
- else:
- module.fail_json(msg='Error getting backup [{0}: {1}]'.format(status_code, response.json['message']))
-
- state_strategy[state](module, account_api, backup_by_id)
-
-
-def main():
- argument_spec = scaleway_argument_spec()
- argument_spec.update(dict(
- state=dict(default='present', choices=['absent', 'present', 'exported', 'restored']),
- region=dict(required=True, choices=SCALEWAY_REGIONS),
- id=dict(),
- name=dict(type='str'),
- database_name=dict(required=False),
- instance_id=dict(required=False),
- expires_at=dict(),
- wait=dict(type='bool', default=False),
- wait_timeout=dict(type='int', default=300),
- wait_sleep_time=dict(type='int', default=3),
- ))
- module = AnsibleModule(
- argument_spec=argument_spec,
- supports_check_mode=True,
- required_together=[
- ['database_name', 'instance_id'],
- ],
- required_if=[
- ['state', 'present', ['name', 'database_name', 'instance_id']],
- ['state', 'absent', ['id']],
- ['state', 'exported', ['id']],
- ['state', 'restored', ['id', 'database_name', 'instance_id']],
- ],
- )
-
- core(module)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_image_info.py b/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_image_info.py
deleted file mode 100644
index 98aa453f..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_image_info.py
+++ /dev/null
@@ -1,127 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# (c) 2018, Yanis Guenane
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-DOCUMENTATION = r'''
----
-module: scaleway_image_info
-short_description: Gather information about the Scaleway images available.
-description:
- - Gather information about the Scaleway images available.
-author:
- - "Yanis Guenane (@Spredzy)"
- - "Remy Leone (@remyleone)"
-extends_documentation_fragment:
-- community.general.scaleway
-
-options:
- region:
- type: str
- description:
- - Scaleway compute zone
- required: true
- choices:
- - ams1
- - EMEA-NL-EVS
- - par1
- - EMEA-FR-PAR1
- - par2
- - EMEA-FR-PAR2
- - waw1
- - EMEA-PL-WAW1
-'''
-
-EXAMPLES = r'''
-- name: Gather Scaleway images information
- community.general.scaleway_image_info:
- region: par1
- register: result
-
-- ansible.builtin.debug:
- msg: "{{ result.scaleway_image_info }}"
-'''
-
-RETURN = r'''
----
-scaleway_image_info:
- description:
- - Response from Scaleway API.
- - "For more details please refer to: U(https://developers.scaleway.com/en/products/instance/api/)."
- returned: success
- type: list
- elements: dict
- sample:
- "scaleway_image_info": [
- {
- "arch": "x86_64",
- "creation_date": "2018-07-17T16:18:49.276456+00:00",
- "default_bootscript": {
- "architecture": "x86_64",
- "bootcmdargs": "LINUX_COMMON scaleway boot=local nbd.max_part=16",
- "default": false,
- "dtb": "",
- "id": "15fbd2f7-a0f9-412b-8502-6a44da8d98b8",
- "initrd": "http://169.254.42.24/initrd/initrd-Linux-x86_64-v3.14.5.gz",
- "kernel": "http://169.254.42.24/kernel/x86_64-mainline-lts-4.9-4.9.93-rev1/vmlinuz-4.9.93",
- "organization": "11111111-1111-4111-8111-111111111111",
- "public": true,
- "title": "x86_64 mainline 4.9.93 rev1"
- },
- "extra_volumes": [],
- "from_server": null,
- "id": "00ae4a88-3252-4eda-9feb-5f6b56bf5ef0",
- "modification_date": "2018-07-17T16:42:06.319315+00:00",
- "name": "Debian Stretch",
- "organization": "51b656e3-4865-41e8-adbc-0c45bdd780db",
- "public": true,
- "root_volume": {
- "id": "da32dfbb-c5ff-476d-ae2d-c297dd09b7dd",
- "name": "snapshot-2a7229dc-d431-4dc5-b66e-95db08b773af-2018-07-17_16:18",
- "size": 25000000000,
- "volume_type": "l_ssd"
- },
- "state": "available"
- }
- ]
-'''
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.scaleway import (
- Scaleway, ScalewayException, scaleway_argument_spec, SCALEWAY_LOCATION)
-
-
-class ScalewayImageInfo(Scaleway):
-
- def __init__(self, module):
- super(ScalewayImageInfo, self).__init__(module)
- self.name = 'images'
-
- region = module.params["region"]
- self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
-
-
-def main():
- argument_spec = scaleway_argument_spec()
- argument_spec.update(dict(
- region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
- ))
- module = AnsibleModule(
- argument_spec=argument_spec,
- supports_check_mode=True,
- )
-
- try:
- module.exit_json(
- scaleway_image_info=ScalewayImageInfo(module).get_resources()
- )
- except ScalewayException as exc:
- module.fail_json(msg=exc.message)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_ip.py b/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_ip.py
deleted file mode 100644
index 7901aaad..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_ip.py
+++ /dev/null
@@ -1,262 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Scaleway IP management module
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-
-__metaclass__ = type
-
-DOCUMENTATION = '''
----
-module: scaleway_ip
-short_description: Scaleway IP management module
-author: Remy Leone (@remyleone)
-description:
- - This module manages IP on Scaleway account
- U(https://developer.scaleway.com)
-extends_documentation_fragment:
-- community.general.scaleway
-
-
-options:
- state:
- type: str
- description:
- - Indicate desired state of the IP.
- default: present
- choices:
- - present
- - absent
-
- organization:
- type: str
- description:
- - Scaleway organization identifier
- required: true
-
- region:
- type: str
- description:
- - Scaleway region to use (for example par1).
- required: true
- choices:
- - ams1
- - EMEA-NL-EVS
- - par1
- - EMEA-FR-PAR1
- - par2
- - EMEA-FR-PAR2
- - waw1
- - EMEA-PL-WAW1
-
- id:
- type: str
- description:
- - id of the Scaleway IP (UUID)
-
- server:
- type: str
- description:
- - id of the server you want to attach an IP to.
- - To unattach an IP don't specify this option
-
- reverse:
- type: str
- description:
- - Reverse to assign to the IP
-'''
-
-EXAMPLES = '''
-- name: Create an IP
- community.general.scaleway_ip:
- organization: '{{ scw_org }}'
- state: present
- region: par1
- register: ip_creation_task
-
-- name: Make sure IP deleted
- community.general.scaleway_ip:
- id: '{{ ip_creation_task.scaleway_ip.id }}'
- state: absent
- region: par1
-'''
-
-RETURN = '''
-data:
- description: This is only present when C(state=present)
- returned: when C(state=present)
- type: dict
- sample: {
- "ips": [
- {
- "organization": "951df375-e094-4d26-97c1-ba548eeb9c42",
- "reverse": null,
- "id": "dd9e8df6-6775-4863-b517-e0b0ee3d7477",
- "server": {
- "id": "3f1568ca-b1a2-4e98-b6f7-31a0588157f1",
- "name": "ansible_tuto-1"
- },
- "address": "212.47.232.136"
- }
- ]
- }
-'''
-
-from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway
-from ansible.module_utils.basic import AnsibleModule
-
-
-def ip_attributes_should_be_changed(api, target_ip, wished_ip):
- patch_payload = {}
-
- if target_ip["reverse"] != wished_ip["reverse"]:
- patch_payload["reverse"] = wished_ip["reverse"]
-
- # IP is assigned to a server
- if target_ip["server"] is None and wished_ip["server"]:
- patch_payload["server"] = wished_ip["server"]
-
- # IP is unassigned to a server
- try:
- if target_ip["server"]["id"] and wished_ip["server"] is None:
- patch_payload["server"] = wished_ip["server"]
- except (TypeError, KeyError):
- pass
-
- # IP is migrated between 2 different servers
- try:
- if target_ip["server"]["id"] != wished_ip["server"]:
- patch_payload["server"] = wished_ip["server"]
- except (TypeError, KeyError):
- pass
-
- return patch_payload
-
-
-def payload_from_wished_ip(wished_ip):
- return dict(
- (k, v)
- for k, v in wished_ip.items()
- if k != 'id' and v is not None
- )
-
-
-def present_strategy(api, wished_ip):
- changed = False
-
- response = api.get('ips')
- if not response.ok:
- api.module.fail_json(msg='Error getting IPs [{0}: {1}]'.format(
- response.status_code, response.json['message']))
-
- ips_list = response.json["ips"]
- ip_lookup = dict((ip["id"], ip)
- for ip in ips_list)
-
- if wished_ip["id"] not in ip_lookup.keys():
- changed = True
- if api.module.check_mode:
- return changed, {"status": "An IP would be created."}
-
- # Create IP
- creation_response = api.post('/ips',
- data=payload_from_wished_ip(wished_ip))
-
- if not creation_response.ok:
- msg = "Error during ip creation: %s: '%s' (%s)" % (creation_response.info['msg'],
- creation_response.json['message'],
- creation_response.json)
- api.module.fail_json(msg=msg)
- return changed, creation_response.json["ip"]
-
- target_ip = ip_lookup[wished_ip["id"]]
- patch_payload = ip_attributes_should_be_changed(api=api, target_ip=target_ip, wished_ip=wished_ip)
-
- if not patch_payload:
- return changed, target_ip
-
- changed = True
- if api.module.check_mode:
- return changed, {"status": "IP attributes would be changed."}
-
- ip_patch_response = api.patch(path="ips/%s" % target_ip["id"],
- data=patch_payload)
-
- if not ip_patch_response.ok:
- api.module.fail_json(msg='Error during IP attributes update: [{0}: {1}]'.format(
- ip_patch_response.status_code, ip_patch_response.json['message']))
-
- return changed, ip_patch_response.json["ip"]
-
-
-def absent_strategy(api, wished_ip):
- response = api.get('ips')
- changed = False
-
- status_code = response.status_code
- ips_json = response.json
- ips_list = ips_json["ips"]
-
- if not response.ok:
- api.module.fail_json(msg='Error getting IPs [{0}: {1}]'.format(
- status_code, response.json['message']))
-
- ip_lookup = dict((ip["id"], ip)
- for ip in ips_list)
- if wished_ip["id"] not in ip_lookup.keys():
- return changed, {}
-
- changed = True
- if api.module.check_mode:
- return changed, {"status": "IP would be destroyed"}
-
- response = api.delete('/ips/' + wished_ip["id"])
- if not response.ok:
- api.module.fail_json(msg='Error deleting IP [{0}: {1}]'.format(
- response.status_code, response.json))
-
- return changed, response.json
-
-
-def core(module):
- wished_ip = {
- "organization": module.params['organization'],
- "reverse": module.params["reverse"],
- "id": module.params["id"],
- "server": module.params["server"]
- }
-
- region = module.params["region"]
- module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
-
- api = Scaleway(module=module)
- if module.params["state"] == "absent":
- changed, summary = absent_strategy(api=api, wished_ip=wished_ip)
- else:
- changed, summary = present_strategy(api=api, wished_ip=wished_ip)
- module.exit_json(changed=changed, scaleway_ip=summary)
-
-
-def main():
- argument_spec = scaleway_argument_spec()
- argument_spec.update(dict(
- state=dict(default='present', choices=['absent', 'present']),
- organization=dict(required=True),
- server=dict(),
- reverse=dict(),
- region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
- id=dict()
- ))
- module = AnsibleModule(
- argument_spec=argument_spec,
- supports_check_mode=True,
- )
-
- core(module)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_ip_info.py b/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_ip_info.py
deleted file mode 100644
index 189ee1cf..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_ip_info.py
+++ /dev/null
@@ -1,111 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# (c) 2018, Yanis Guenane
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-DOCUMENTATION = r'''
----
-module: scaleway_ip_info
-short_description: Gather information about the Scaleway ips available.
-description:
- - Gather information about the Scaleway ips available.
-author:
- - "Yanis Guenane (@Spredzy)"
- - "Remy Leone (@remyleone)"
-extends_documentation_fragment:
-- community.general.scaleway
-
-options:
- region:
- type: str
- description:
- - Scaleway region to use (for example C(par1)).
- required: true
- choices:
- - ams1
- - EMEA-NL-EVS
- - par1
- - EMEA-FR-PAR1
- - par2
- - EMEA-FR-PAR2
- - waw1
- - EMEA-PL-WAW1
-'''
-
-EXAMPLES = r'''
-- name: Gather Scaleway ips information
- community.general.scaleway_ip_info:
- region: par1
- register: result
-
-- ansible.builtin.debug:
- msg: "{{ result.scaleway_ip_info }}"
-'''
-
-RETURN = r'''
----
-scaleway_ip_info:
- description:
- - Response from Scaleway API.
- - "For more details please refer to: U(https://developers.scaleway.com/en/products/instance/api/)."
- returned: success
- type: list
- elements: dict
- sample:
- "scaleway_ip_info": [
- {
- "address": "163.172.170.243",
- "id": "ea081794-a581-8899-8451-386ddaf0a451",
- "organization": "3f709602-5e6c-4619-b80c-e324324324af",
- "reverse": null,
- "server": {
- "id": "12f19bc7-109c-4517-954c-e6b3d0311363",
- "name": "scw-e0d158"
- }
- }
- ]
-'''
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.scaleway import (
- Scaleway,
- ScalewayException,
- scaleway_argument_spec,
- SCALEWAY_LOCATION,
-)
-
-
-class ScalewayIpInfo(Scaleway):
-
- def __init__(self, module):
- super(ScalewayIpInfo, self).__init__(module)
- self.name = 'ips'
-
- region = module.params["region"]
- self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
-
-
-def main():
- argument_spec = scaleway_argument_spec()
- argument_spec.update(dict(
- region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
- ))
- module = AnsibleModule(
- argument_spec=argument_spec,
- supports_check_mode=True,
- )
-
- try:
- module.exit_json(
- scaleway_ip_info=ScalewayIpInfo(module).get_resources()
- )
- except ScalewayException as exc:
- module.fail_json(msg=exc.message)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_lb.py b/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_lb.py
deleted file mode 100644
index 2112ae44..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_lb.py
+++ /dev/null
@@ -1,358 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Scaleway Load-balancer management module
-#
-# Copyright (C) 2018 Online SAS.
-# https://www.scaleway.com
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-
-__metaclass__ = type
-
-DOCUMENTATION = '''
----
-module: scaleway_lb
-short_description: Scaleway load-balancer management module
-author: Remy Leone (@remyleone)
-description:
- - "This module manages load-balancers on Scaleway."
-extends_documentation_fragment:
-- community.general.scaleway
-
-
-options:
-
- name:
- type: str
- description:
- - Name of the load-balancer
- required: true
-
- description:
- type: str
- description:
- - Description of the load-balancer
- required: true
-
- organization_id:
- type: str
- description:
- - Organization identifier
- required: true
-
- state:
- type: str
- description:
- - Indicate desired state of the instance.
- default: present
- choices:
- - present
- - absent
-
- region:
- type: str
- description:
- - Scaleway zone
- required: true
- choices:
- - nl-ams
- - fr-par
- - pl-waw
-
- tags:
- type: list
- elements: str
- description:
- - List of tags to apply to the load-balancer
-
- wait:
- description:
- - Wait for the load-balancer to reach its desired state before returning.
- type: bool
- default: 'no'
-
- wait_timeout:
- type: int
- description:
- - Time to wait for the load-balancer to reach the expected state
- required: false
- default: 300
-
- wait_sleep_time:
- type: int
- description:
- - Time to wait before every attempt to check the state of the load-balancer
- required: false
- default: 3
-'''
-
-EXAMPLES = '''
-- name: Create a load-balancer
- community.general.scaleway_lb:
- name: foobar
- state: present
- organization_id: 951df375-e094-4d26-97c1-ba548eeb9c42
- region: fr-par
- tags:
- - hello
-
-- name: Delete a load-balancer
- community.general.scaleway_lb:
- name: foobar
- state: absent
- organization_id: 951df375-e094-4d26-97c1-ba548eeb9c42
- region: fr-par
-'''
-
-RETURNS = '''
-{
- "scaleway_lb": {
- "backend_count": 0,
- "frontend_count": 0,
- "description": "Description of my load-balancer",
- "id": "00000000-0000-0000-0000-000000000000",
- "instances": [
- {
- "id": "00000000-0000-0000-0000-000000000000",
- "ip_address": "10.0.0.1",
- "region": "fr-par",
- "status": "ready"
- },
- {
- "id": "00000000-0000-0000-0000-000000000000",
- "ip_address": "10.0.0.2",
- "region": "fr-par",
- "status": "ready"
- }
- ],
- "ip": [
- {
- "id": "00000000-0000-0000-0000-000000000000",
- "ip_address": "192.168.0.1",
- "lb_id": "00000000-0000-0000-0000-000000000000",
- "region": "fr-par",
- "organization_id": "00000000-0000-0000-0000-000000000000",
- "reverse": ""
- }
- ],
- "name": "lb_ansible_test",
- "organization_id": "00000000-0000-0000-0000-000000000000",
- "region": "fr-par",
- "status": "ready",
- "tags": [
- "first_tag",
- "second_tag"
- ]
- }
-}
-'''
-
-import datetime
-import time
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_REGIONS, SCALEWAY_ENDPOINT, scaleway_argument_spec, Scaleway
-
-STABLE_STATES = (
- "ready",
- "absent"
-)
-
-MUTABLE_ATTRIBUTES = (
- "name",
- "description"
-)
-
-
-def payload_from_wished_lb(wished_lb):
- return {
- "organization_id": wished_lb["organization_id"],
- "name": wished_lb["name"],
- "tags": wished_lb["tags"],
- "description": wished_lb["description"]
- }
-
-
-def fetch_state(api, lb):
- api.module.debug("fetch_state of load-balancer: %s" % lb["id"])
- response = api.get(path=api.api_path + "/%s" % lb["id"])
-
- if response.status_code == 404:
- return "absent"
-
- if not response.ok:
- msg = 'Error during state fetching: (%s) %s' % (response.status_code, response.json)
- api.module.fail_json(msg=msg)
-
- try:
- api.module.debug("Load-balancer %s in state: %s" % (lb["id"], response.json["status"]))
- return response.json["status"]
- except KeyError:
- api.module.fail_json(msg="Could not fetch state in %s" % response.json)
-
-
-def wait_to_complete_state_transition(api, lb, force_wait=False):
- wait = api.module.params["wait"]
- if not (wait or force_wait):
- return
- wait_timeout = api.module.params["wait_timeout"]
- wait_sleep_time = api.module.params["wait_sleep_time"]
-
- start = datetime.datetime.utcnow()
- end = start + datetime.timedelta(seconds=wait_timeout)
- while datetime.datetime.utcnow() < end:
- api.module.debug("We are going to wait for the load-balancer to finish its transition")
- state = fetch_state(api, lb)
- if state in STABLE_STATES:
- api.module.debug("It seems that the load-balancer is not in transition anymore.")
- api.module.debug("load-balancer in state: %s" % fetch_state(api, lb))
- break
- time.sleep(wait_sleep_time)
- else:
- api.module.fail_json(msg="Server takes too long to finish its transition")
-
-
-def lb_attributes_should_be_changed(target_lb, wished_lb):
- diff = dict((attr, wished_lb[attr]) for attr in MUTABLE_ATTRIBUTES if target_lb[attr] != wished_lb[attr])
-
- if diff:
- return dict((attr, wished_lb[attr]) for attr in MUTABLE_ATTRIBUTES)
- else:
- return diff
-
-
-def present_strategy(api, wished_lb):
- changed = False
-
- response = api.get(path=api.api_path)
- if not response.ok:
- api.module.fail_json(msg='Error getting load-balancers [{0}: {1}]'.format(
- response.status_code, response.json['message']))
-
- lbs_list = response.json["lbs"]
- lb_lookup = dict((lb["name"], lb)
- for lb in lbs_list)
-
- if wished_lb["name"] not in lb_lookup.keys():
- changed = True
- if api.module.check_mode:
- return changed, {"status": "A load-balancer would be created."}
-
- # Create Load-balancer
- api.warn(payload_from_wished_lb(wished_lb))
- creation_response = api.post(path=api.api_path,
- data=payload_from_wished_lb(wished_lb))
-
- if not creation_response.ok:
- msg = "Error during lb creation: %s: '%s' (%s)" % (creation_response.info['msg'],
- creation_response.json['message'],
- creation_response.json)
- api.module.fail_json(msg=msg)
-
- wait_to_complete_state_transition(api=api, lb=creation_response.json)
- response = api.get(path=api.api_path + "/%s" % creation_response.json["id"])
- return changed, response.json
-
- target_lb = lb_lookup[wished_lb["name"]]
- patch_payload = lb_attributes_should_be_changed(target_lb=target_lb,
- wished_lb=wished_lb)
-
- if not patch_payload:
- return changed, target_lb
-
- changed = True
- if api.module.check_mode:
- return changed, {"status": "Load-balancer attributes would be changed."}
-
- lb_patch_response = api.put(path=api.api_path + "/%s" % target_lb["id"],
- data=patch_payload)
-
- if not lb_patch_response.ok:
- api.module.fail_json(msg='Error during load-balancer attributes update: [{0}: {1}]'.format(
- lb_patch_response.status_code, lb_patch_response.json['message']))
-
- wait_to_complete_state_transition(api=api, lb=target_lb)
- return changed, lb_patch_response.json
-
-
-def absent_strategy(api, wished_lb):
- response = api.get(path=api.api_path)
- changed = False
-
- status_code = response.status_code
- lbs_json = response.json
- lbs_list = lbs_json["lbs"]
-
- if not response.ok:
- api.module.fail_json(msg='Error getting load-balancers [{0}: {1}]'.format(
- status_code, response.json['message']))
-
- lb_lookup = dict((lb["name"], lb)
- for lb in lbs_list)
- if wished_lb["name"] not in lb_lookup.keys():
- return changed, {}
-
- target_lb = lb_lookup[wished_lb["name"]]
- changed = True
- if api.module.check_mode:
- return changed, {"status": "Load-balancer would be destroyed"}
-
- wait_to_complete_state_transition(api=api, lb=target_lb, force_wait=True)
- response = api.delete(path=api.api_path + "/%s" % target_lb["id"])
- if not response.ok:
- api.module.fail_json(msg='Error deleting load-balancer [{0}: {1}]'.format(
- response.status_code, response.json))
-
- wait_to_complete_state_transition(api=api, lb=target_lb)
- return changed, response.json
-
-
-state_strategy = {
- "present": present_strategy,
- "absent": absent_strategy
-}
-
-
-def core(module):
- region = module.params["region"]
- wished_load_balancer = {
- "state": module.params["state"],
- "name": module.params["name"],
- "description": module.params["description"],
- "tags": module.params["tags"],
- "organization_id": module.params["organization_id"]
- }
- module.params['api_url'] = SCALEWAY_ENDPOINT
- api = Scaleway(module=module)
- api.api_path = "lb/v1/regions/%s/lbs" % region
-
- changed, summary = state_strategy[wished_load_balancer["state"]](api=api,
- wished_lb=wished_load_balancer)
- module.exit_json(changed=changed, scaleway_lb=summary)
-
-
-def main():
- argument_spec = scaleway_argument_spec()
- argument_spec.update(dict(
- name=dict(required=True),
- description=dict(required=True),
- region=dict(required=True, choices=SCALEWAY_REGIONS),
- state=dict(choices=list(state_strategy.keys()), default='present'),
- tags=dict(type="list", elements="str", default=[]),
- organization_id=dict(required=True),
- wait=dict(type="bool", default=False),
- wait_timeout=dict(type="int", default=300),
- wait_sleep_time=dict(type="int", default=3),
- ))
- module = AnsibleModule(
- argument_spec=argument_spec,
- supports_check_mode=True,
- )
-
- core(module)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_organization_info.py b/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_organization_info.py
deleted file mode 100644
index a09d1bb5..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_organization_info.py
+++ /dev/null
@@ -1,104 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# (c) 2018, Yanis Guenane
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-DOCUMENTATION = r'''
----
-module: scaleway_organization_info
-short_description: Gather information about the Scaleway organizations available.
-description:
- - Gather information about the Scaleway organizations available.
-author:
- - "Yanis Guenane (@Spredzy)"
- - "Remy Leone (@remyleone)"
-options:
- api_url:
- description:
- - Scaleway API URL
- default: 'https://account.scaleway.com'
- aliases: ['base_url']
-extends_documentation_fragment:
-- community.general.scaleway
-
-'''
-
-EXAMPLES = r'''
-- name: Gather Scaleway organizations information
- community.general.scaleway_organization_info:
- register: result
-
-- ansible.builtin.debug:
- msg: "{{ result.scaleway_organization_info }}"
-'''
-
-RETURN = r'''
----
-scaleway_organization_info:
- description: Response from Scaleway API
- returned: success
- type: complex
- sample:
- "scaleway_organization_info": [
- {
- "address_city_name": "Paris",
- "address_country_code": "FR",
- "address_line1": "42 Rue de l'univers",
- "address_line2": null,
- "address_postal_code": "75042",
- "address_subdivision_code": "FR-75",
- "creation_date": "2018-08-06T13:43:28.508575+00:00",
- "currency": "EUR",
- "customer_class": "individual",
- "id": "3f709602-5e6c-4619-b80c-e8432ferewtr",
- "locale": "fr_FR",
- "modification_date": "2018-08-06T14:56:41.401685+00:00",
- "name": "James Bond",
- "support_id": "694324",
- "support_level": "basic",
- "support_pin": "9324",
- "users": [],
- "vat_number": null,
- "warnings": []
- }
- ]
-'''
-
-from ansible.module_utils.basic import AnsibleModule, env_fallback
-from ansible_collections.community.general.plugins.module_utils.scaleway import (
- Scaleway, ScalewayException, scaleway_argument_spec
-)
-
-
-class ScalewayOrganizationInfo(Scaleway):
-
- def __init__(self, module):
- super(ScalewayOrganizationInfo, self).__init__(module)
- self.name = 'organizations'
-
-
-def main():
- argument_spec = scaleway_argument_spec()
- argument_spec.update(dict(
- api_url=dict(fallback=(env_fallback, ['SCW_API_URL']), default='https://account.scaleway.com', aliases=['base_url']),
- ))
-
- module = AnsibleModule(
- argument_spec=argument_spec,
- supports_check_mode=True,
- )
-
- try:
- module.exit_json(
- scaleway_organization_info=ScalewayOrganizationInfo(module).get_resources()
- )
- except ScalewayException as exc:
- module.fail_json(msg=exc.message)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_private_network.py b/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_private_network.py
deleted file mode 100644
index 996a3cce..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_private_network.py
+++ /dev/null
@@ -1,234 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Scaleway VPC management module
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-
-__metaclass__ = type
-
-DOCUMENTATION = '''
----
-module: scaleway_private_network
-short_description: Scaleway private network management
-version_added: 4.5.0
-author: Pascal MANGIN (@pastral)
-description:
- - This module manages private network on Scaleway account
- (U(https://developer.scaleway.com)).
-extends_documentation_fragment:
-- community.general.scaleway
-
-
-options:
- state:
- type: str
- description:
- - Indicate desired state of the VPC.
- default: present
- choices:
- - present
- - absent
-
- project:
- type: str
- description:
- - Project identifier.
- required: true
-
- region:
- type: str
- description:
- - Scaleway region to use (for example C(par1)).
- required: true
- choices:
- - ams1
- - EMEA-NL-EVS
- - par1
- - EMEA-FR-PAR1
- - par2
- - EMEA-FR-PAR2
- - waw1
- - EMEA-PL-WAW1
-
- name:
- type: str
- description:
- - Name of the VPC.
-
- tags:
- type: list
- elements: str
- description:
- - List of tags to apply to the instance.
- default: []
-
-'''
-
-EXAMPLES = '''
-- name: Create an private network
- community.general.scaleway_vpc:
- project: '{{ scw_project }}'
- name: 'vpc_one'
- state: present
- region: par1
- register: vpc_creation_task
-
-- name: Make sure private network with name 'foo' is deleted in region par1
- community.general.scaleway_vpc:
- name: 'foo'
- state: absent
- region: par1
-'''
-
-RETURN = '''
-scaleway_private_network:
- description: Information on the VPC.
- returned: success when C(state=present)
- type: dict
- sample:
- {
- "created_at": "2022-01-15T11:11:12.676445Z",
- "id": "12345678-f1e6-40ec-83e5-12345d67ed89",
- "name": "network",
- "organization_id": "a123b4cd-ef5g-678h-90i1-jk2345678l90",
- "project_id": "a123b4cd-ef5g-678h-90i1-jk2345678l90",
- "tags": [
- "tag1",
- "tag2",
- "tag3",
- "tag4",
- "tag5"
- ],
- "updated_at": "2022-01-15T11:12:04.624837Z",
- "zone": "fr-par-2"
- }
-'''
-
-from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway
-from ansible.module_utils.basic import AnsibleModule
-
-
-def get_private_network(api, name, page=1):
- page_size = 10
- response = api.get('private-networks', params={'name': name, 'order_by': 'name_asc', 'page': page, 'page_size': page_size})
- if not response.ok:
- msg = "Error during get private network creation: %s: '%s' (%s)" % (response.info['msg'], response.json['message'], response.json)
- api.module.fail_json(msg=msg)
-
- if response.json['total_count'] == 0:
- return None
-
- i = 0
- while i < len(response.json['private_networks']):
- if response.json['private_networks'][i]['name'] == name:
- return response.json['private_networks'][i]
- i += 1
-
- # search on next page if needed
- if (page * page_size) < response.json['total_count']:
- return get_private_network(api, name, page + 1)
-
- return None
-
-
-def present_strategy(api, wished_private_network):
-
- changed = False
- private_network = get_private_network(api, wished_private_network['name'])
- if private_network is not None:
- if set(wished_private_network['tags']) == set(private_network['tags']):
- return changed, private_network
- else:
- # private network need to be updated
- data = {'name': wished_private_network['name'],
- 'tags': wished_private_network['tags']
- }
- changed = True
- if api.module.check_mode:
- return changed, {"status": "private network would be updated"}
-
- response = api.patch(path='private-networks/' + private_network['id'], data=data)
- if not response.ok:
- api.module.fail_json(msg='Error updating private network [{0}: {1}]'.format(response.status_code, response.json))
-
- return changed, response.json
-
- # private network need to be create
- changed = True
- if api.module.check_mode:
- return changed, {"status": "private network would be created"}
-
- data = {'name': wished_private_network['name'],
- 'project_id': wished_private_network['project'],
- 'tags': wished_private_network['tags']
- }
-
- response = api.post(path='private-networks/', data=data)
-
- if not response.ok:
- api.module.fail_json(msg='Error creating private network [{0}: {1}]'.format(response.status_code, response.json))
-
- return changed, response.json
-
-
-def absent_strategy(api, wished_private_network):
-
- changed = False
- private_network = get_private_network(api, wished_private_network['name'])
- if private_network is None:
- return changed, {}
-
- changed = True
- if api.module.check_mode:
- return changed, {"status": "private network would be destroyed"}
-
- response = api.delete('private-networks/' + private_network['id'])
-
- if not response.ok:
- api.module.fail_json(msg='Error deleting private network [{0}: {1}]'.format(
- response.status_code, response.json))
-
- return changed, response.json
-
-
-def core(module):
-
- wished_private_network = {
- "project": module.params['project'],
- "tags": module.params['tags'],
- "name": module.params['name']
- }
-
- region = module.params["region"]
- module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint_vpc"]
-
- api = Scaleway(module=module)
- if module.params["state"] == "absent":
- changed, summary = absent_strategy(api=api, wished_private_network=wished_private_network)
- else:
- changed, summary = present_strategy(api=api, wished_private_network=wished_private_network)
- module.exit_json(changed=changed, scaleway_private_network=summary)
-
-
-def main():
- argument_spec = scaleway_argument_spec()
- argument_spec.update(dict(
- state=dict(default='present', choices=['absent', 'present']),
- project=dict(required=True),
- region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
- tags=dict(type="list", elements="str", default=[]),
- name=dict()
- ))
- module = AnsibleModule(
- argument_spec=argument_spec,
- supports_check_mode=True,
- )
-
- core(module)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_security_group.py b/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_security_group.py
deleted file mode 100644
index f9faee61..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_security_group.py
+++ /dev/null
@@ -1,239 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Scaleway Security Group management module
-#
-# Copyright (C) 2018 Antoine Barbare (antoinebarbare@gmail.com).
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-
-__metaclass__ = type
-
-DOCUMENTATION = '''
----
-module: scaleway_security_group
-short_description: Scaleway Security Group management module
-author: Antoine Barbare (@abarbare)
-description:
- - This module manages Security Group on Scaleway account
- U(https://developer.scaleway.com).
-extends_documentation_fragment:
-- community.general.scaleway
-
-
-options:
- state:
- description:
- - Indicate desired state of the Security Group.
- type: str
- choices: [ absent, present ]
- default: present
-
- organization:
- description:
- - Organization identifier.
- type: str
- required: true
-
- region:
- description:
- - Scaleway region to use (for example C(par1)).
- type: str
- required: true
- choices:
- - ams1
- - EMEA-NL-EVS
- - par1
- - EMEA-FR-PAR1
- - par2
- - EMEA-FR-PAR2
- - waw1
- - EMEA-PL-WAW1
-
- name:
- description:
- - Name of the Security Group.
- type: str
- required: true
-
- description:
- description:
- - Description of the Security Group.
- type: str
-
- stateful:
- description:
- - Create a stateful security group which allows established connections in and out.
- type: bool
- required: true
-
- inbound_default_policy:
- description:
- - Default policy for incoming traffic.
- type: str
- choices: [ accept, drop ]
-
- outbound_default_policy:
- description:
- - Default policy for outcoming traffic.
- type: str
- choices: [ accept, drop ]
-
- organization_default:
- description:
- - Create security group to be the default one.
- type: bool
-'''
-
-EXAMPLES = '''
-- name: Create a Security Group
- community.general.scaleway_security_group:
- state: present
- region: par1
- name: security_group
- description: "my security group description"
- organization: "43a3b6c8-916f-477b-b7ec-ff1898f5fdd9"
- stateful: false
- inbound_default_policy: accept
- outbound_default_policy: accept
- organization_default: false
- register: security_group_creation_task
-'''
-
-RETURN = '''
-data:
- description: This is only present when C(state=present)
- returned: when C(state=present)
- type: dict
- sample: {
- "scaleway_security_group": {
- "description": "my security group description",
- "enable_default_security": true,
- "id": "0168fb1f-cc46-4f69-b4be-c95d2a19bcae",
- "inbound_default_policy": "accept",
- "name": "security_group",
- "organization": "43a3b6c8-916f-477b-b7ec-ff1898f5fdd9",
- "organization_default": false,
- "outbound_default_policy": "accept",
- "servers": [],
- "stateful": false
- }
- }
-'''
-
-from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway
-from ansible.module_utils.basic import AnsibleModule
-from uuid import uuid4
-
-
-def payload_from_security_group(security_group):
- return dict(
- (k, v)
- for k, v in security_group.items()
- if k != 'id' and v is not None
- )
-
-
-def present_strategy(api, security_group):
- ret = {'changed': False}
-
- response = api.get('security_groups')
- if not response.ok:
- api.module.fail_json(msg='Error getting security groups "%s": "%s" (%s)' % (response.info['msg'], response.json['message'], response.json))
-
- security_group_lookup = dict((sg['name'], sg)
- for sg in response.json['security_groups'])
-
- if security_group['name'] not in security_group_lookup.keys():
- ret['changed'] = True
- if api.module.check_mode:
- # Help user when check mode is enabled by defining id key
- ret['scaleway_security_group'] = {'id': str(uuid4())}
- return ret
-
- # Create Security Group
- response = api.post('/security_groups',
- data=payload_from_security_group(security_group))
-
- if not response.ok:
- msg = 'Error during security group creation: "%s": "%s" (%s)' % (response.info['msg'], response.json['message'], response.json)
- api.module.fail_json(msg=msg)
- ret['scaleway_security_group'] = response.json['security_group']
-
- else:
- ret['scaleway_security_group'] = security_group_lookup[security_group['name']]
-
- return ret
-
-
-def absent_strategy(api, security_group):
- response = api.get('security_groups')
- ret = {'changed': False}
-
- if not response.ok:
- api.module.fail_json(msg='Error getting security groups "%s": "%s" (%s)' % (response.info['msg'], response.json['message'], response.json))
-
- security_group_lookup = dict((sg['name'], sg)
- for sg in response.json['security_groups'])
- if security_group['name'] not in security_group_lookup.keys():
- return ret
-
- ret['changed'] = True
- if api.module.check_mode:
- return ret
-
- response = api.delete('/security_groups/' + security_group_lookup[security_group['name']]['id'])
- if not response.ok:
- api.module.fail_json(msg='Error deleting security group "%s": "%s" (%s)' % (response.info['msg'], response.json['message'], response.json))
-
- return ret
-
-
-def core(module):
- security_group = {
- 'organization': module.params['organization'],
- 'name': module.params['name'],
- 'description': module.params['description'],
- 'stateful': module.params['stateful'],
- 'inbound_default_policy': module.params['inbound_default_policy'],
- 'outbound_default_policy': module.params['outbound_default_policy'],
- 'organization_default': module.params['organization_default'],
- }
-
- region = module.params['region']
- module.params['api_url'] = SCALEWAY_LOCATION[region]['api_endpoint']
-
- api = Scaleway(module=module)
- if module.params['state'] == 'present':
- summary = present_strategy(api=api, security_group=security_group)
- else:
- summary = absent_strategy(api=api, security_group=security_group)
- module.exit_json(**summary)
-
-
-def main():
- argument_spec = scaleway_argument_spec()
- argument_spec.update(dict(
- state=dict(type='str', default='present', choices=['absent', 'present']),
- organization=dict(type='str', required=True),
- name=dict(type='str', required=True),
- description=dict(type='str'),
- region=dict(type='str', required=True, choices=list(SCALEWAY_LOCATION.keys())),
- stateful=dict(type='bool', required=True),
- inbound_default_policy=dict(type='str', choices=['accept', 'drop']),
- outbound_default_policy=dict(type='str', choices=['accept', 'drop']),
- organization_default=dict(type='bool'),
- ))
- module = AnsibleModule(
- argument_spec=argument_spec,
- supports_check_mode=True,
- required_if=[['stateful', True, ['inbound_default_policy', 'outbound_default_policy']]]
- )
-
- core(module)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_security_group_info.py b/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_security_group_info.py
deleted file mode 100644
index a15044e6..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_security_group_info.py
+++ /dev/null
@@ -1,115 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# (c) 2018, Yanis Guenane
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-DOCUMENTATION = r'''
----
-module: scaleway_security_group_info
-short_description: Gather information about the Scaleway security groups available.
-description:
- - Gather information about the Scaleway security groups available.
-author:
- - "Yanis Guenane (@Spredzy)"
- - "Remy Leone (@remyleone)"
-options:
- region:
- type: str
- description:
- - Scaleway region to use (for example C(par1)).
- required: true
- choices:
- - ams1
- - EMEA-NL-EVS
- - par1
- - EMEA-FR-PAR1
- - par2
- - EMEA-FR-PAR2
- - waw1
- - EMEA-PL-WAW1
-extends_documentation_fragment:
-- community.general.scaleway
-
-'''
-
-EXAMPLES = r'''
-- name: Gather Scaleway security groups information
- community.general.scaleway_security_group_info:
- region: par1
- register: result
-
-- ansible.builtin.debug:
- msg: "{{ result.scaleway_security_group_info }}"
-'''
-
-RETURN = r'''
----
-scaleway_security_group_info:
- description:
- - Response from Scaleway API.
- - "For more details please refer to: U(https://developers.scaleway.com/en/products/instance/api/)."
- returned: success
- type: list
- elements: dict
- sample:
- "scaleway_security_group_info": [
- {
- "description": "test-ams",
- "enable_default_security": true,
- "id": "7fcde327-8bed-43a6-95c4-6dfbc56d8b51",
- "name": "test-ams",
- "organization": "3f709602-5e6c-4619-b80c-e841c89734af",
- "organization_default": false,
- "servers": [
- {
- "id": "12f19bc7-108c-4517-954c-e6b3d0311363",
- "name": "scw-e0d158"
- }
- ]
- }
- ]
-'''
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.scaleway import (
- Scaleway,
- ScalewayException,
- scaleway_argument_spec,
- SCALEWAY_LOCATION,
-)
-
-
-class ScalewaySecurityGroupInfo(Scaleway):
-
- def __init__(self, module):
- super(ScalewaySecurityGroupInfo, self).__init__(module)
- self.name = 'security_groups'
-
- region = module.params["region"]
- self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
-
-
-def main():
- argument_spec = scaleway_argument_spec()
- argument_spec.update(dict(
- region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
- ))
- module = AnsibleModule(
- argument_spec=argument_spec,
- supports_check_mode=True,
- )
-
- try:
- module.exit_json(
- scaleway_security_group_info=ScalewaySecurityGroupInfo(module).get_resources()
- )
- except ScalewayException as exc:
- module.fail_json(msg=exc.message)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_security_group_rule.py b/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_security_group_rule.py
deleted file mode 100644
index 9f959212..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_security_group_rule.py
+++ /dev/null
@@ -1,276 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Scaleway Security Group Rule management module
-#
-# Copyright (C) 2018 Antoine Barbare (antoinebarbare@gmail.com).
-#
-# GNU General Public License v3.0+ (see COPYING or
-# https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-
-__metaclass__ = type
-
-DOCUMENTATION = '''
----
-module: scaleway_security_group_rule
-short_description: Scaleway Security Group Rule management module
-author: Antoine Barbare (@abarbare)
-description:
- - This module manages Security Group Rule on Scaleway account
- U(https://developer.scaleway.com)
-extends_documentation_fragment:
- - community.general.scaleway
-requirements:
- - ipaddress
-
-options:
- state:
- type: str
- description:
- - Indicate desired state of the Security Group Rule.
- default: present
- choices:
- - present
- - absent
-
- region:
- type: str
- description:
- - Scaleway region to use (for example C(par1)).
- required: true
- choices:
- - ams1
- - EMEA-NL-EVS
- - par1
- - EMEA-FR-PAR1
- - par2
- - EMEA-FR-PAR2
- - waw1
- - EMEA-PL-WAW1
-
- protocol:
- type: str
- description:
- - Network protocol to use
- choices:
- - TCP
- - UDP
- - ICMP
- required: true
-
- port:
- description:
- - Port related to the rule, null value for all the ports
- required: true
- type: int
-
- ip_range:
- type: str
- description:
- - IPV4 CIDR notation to apply to the rule
- default: 0.0.0.0/0
-
- direction:
- type: str
- description:
- - Rule direction
- choices:
- - inbound
- - outbound
- required: true
-
- action:
- type: str
- description:
- - Rule action
- choices:
- - accept
- - drop
- required: true
-
- security_group:
- type: str
- description:
- - Security Group unique identifier
- required: true
-'''
-
-EXAMPLES = '''
- - name: Create a Security Group Rule
- community.general.scaleway_security_group_rule:
- state: present
- region: par1
- protocol: TCP
- port: 80
- ip_range: 0.0.0.0/0
- direction: inbound
- action: accept
- security_group: b57210ee-1281-4820-a6db-329f78596ecb
- register: security_group_rule_creation_task
-'''
-
-RETURN = '''
-data:
- description: This is only present when C(state=present)
- returned: when C(state=present)
- type: dict
- sample: {
- "scaleway_security_group_rule": {
- "direction": "inbound",
- "protocol": "TCP",
- "ip_range": "0.0.0.0/0",
- "dest_port_from": 80,
- "action": "accept",
- "position": 2,
- "dest_port_to": null,
- "editable": null,
- "id": "10cb0b9a-80f6-4830-abd7-a31cd828b5e9"
- }
- }
-'''
-
-import traceback
-
-from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway, payload_from_object
-from ansible.module_utils.common.text.converters import to_text
-from ansible.module_utils.basic import AnsibleModule, missing_required_lib
-
-try:
- from ipaddress import ip_network
-except ImportError:
- IPADDRESS_IMP_ERR = traceback.format_exc()
- HAS_IPADDRESS = False
-else:
- HAS_IPADDRESS = True
-
-
-def get_sgr_from_api(security_group_rules, security_group_rule):
- """ Check if a security_group_rule specs are present in security_group_rules
- Return None if no rules match the specs
- Return the rule if found
- """
- for sgr in security_group_rules:
- if (sgr['ip_range'] == security_group_rule['ip_range'] and sgr['dest_port_from'] == security_group_rule['dest_port_from'] and
- sgr['direction'] == security_group_rule['direction'] and sgr['action'] == security_group_rule['action'] and
- sgr['protocol'] == security_group_rule['protocol']):
- return sgr
-
- return None
-
-
-def present_strategy(api, security_group_id, security_group_rule):
- ret = {'changed': False}
-
- response = api.get('security_groups/%s/rules' % security_group_id)
- if not response.ok:
- api.module.fail_json(
- msg='Error getting security group rules "%s": "%s" (%s)' %
- (response.info['msg'], response.json['message'], response.json))
-
- existing_rule = get_sgr_from_api(
- response.json['rules'], security_group_rule)
-
- if not existing_rule:
- ret['changed'] = True
- if api.module.check_mode:
- return ret
-
- # Create Security Group Rule
- response = api.post('/security_groups/%s/rules' % security_group_id,
- data=payload_from_object(security_group_rule))
-
- if not response.ok:
- api.module.fail_json(
- msg='Error during security group rule creation: "%s": "%s" (%s)' %
- (response.info['msg'], response.json['message'], response.json))
- ret['scaleway_security_group_rule'] = response.json['rule']
-
- else:
- ret['scaleway_security_group_rule'] = existing_rule
-
- return ret
-
-
-def absent_strategy(api, security_group_id, security_group_rule):
- ret = {'changed': False}
-
- response = api.get('security_groups/%s/rules' % security_group_id)
- if not response.ok:
- api.module.fail_json(
- msg='Error getting security group rules "%s": "%s" (%s)' %
- (response.info['msg'], response.json['message'], response.json))
-
- existing_rule = get_sgr_from_api(
- response.json['rules'], security_group_rule)
-
- if not existing_rule:
- return ret
-
- ret['changed'] = True
- if api.module.check_mode:
- return ret
-
- response = api.delete(
- '/security_groups/%s/rules/%s' %
- (security_group_id, existing_rule['id']))
- if not response.ok:
- api.module.fail_json(
- msg='Error deleting security group rule "%s": "%s" (%s)' %
- (response.info['msg'], response.json['message'], response.json))
-
- return ret
-
-
-def core(module):
- api = Scaleway(module=module)
-
- security_group_rule = {
- 'protocol': module.params['protocol'],
- 'dest_port_from': module.params['port'],
- 'ip_range': module.params['ip_range'],
- 'direction': module.params['direction'],
- 'action': module.params['action'],
- }
-
- region = module.params['region']
- module.params['api_url'] = SCALEWAY_LOCATION[region]['api_endpoint']
-
- if module.params['state'] == 'present':
- summary = present_strategy(
- api=api,
- security_group_id=module.params['security_group'],
- security_group_rule=security_group_rule)
- else:
- summary = absent_strategy(
- api=api,
- security_group_id=module.params['security_group'],
- security_group_rule=security_group_rule)
- module.exit_json(**summary)
-
-
-def main():
- argument_spec = scaleway_argument_spec()
- argument_spec.update(
- state=dict(type='str', default='present', choices=['absent', 'present']),
- region=dict(type='str', required=True, choices=list(SCALEWAY_LOCATION.keys())),
- protocol=dict(type='str', required=True, choices=['TCP', 'UDP', 'ICMP']),
- port=dict(type='int', required=True),
- ip_range=dict(type='str', default='0.0.0.0/0'),
- direction=dict(type='str', required=True, choices=['inbound', 'outbound']),
- action=dict(type='str', required=True, choices=['accept', 'drop']),
- security_group=dict(type='str', required=True),
- )
- module = AnsibleModule(
- argument_spec=argument_spec,
- supports_check_mode=True,
- )
- if not HAS_IPADDRESS:
- module.fail_json(msg=missing_required_lib('ipaddress'), exception=IPADDRESS_IMP_ERR)
-
- core(module)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_server_info.py b/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_server_info.py
deleted file mode 100644
index 2b9d91b4..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_server_info.py
+++ /dev/null
@@ -1,198 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# (c) 2018, Yanis Guenane
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-DOCUMENTATION = r'''
----
-module: scaleway_server_info
-short_description: Gather information about the Scaleway servers available.
-description:
- - Gather information about the Scaleway servers available.
-author:
- - "Yanis Guenane (@Spredzy)"
- - "Remy Leone (@remyleone)"
-extends_documentation_fragment:
-- community.general.scaleway
-
-options:
- region:
- type: str
- description:
- - Scaleway region to use (for example C(par1)).
- required: true
- choices:
- - ams1
- - EMEA-NL-EVS
- - par1
- - EMEA-FR-PAR1
- - par2
- - EMEA-FR-PAR2
- - waw1
- - EMEA-PL-WAW1
-'''
-
-EXAMPLES = r'''
-- name: Gather Scaleway servers information
- community.general.scaleway_server_info:
- region: par1
- register: result
-
-- ansible.builtin.debug:
- msg: "{{ result.scaleway_server_info }}"
-'''
-
-RETURN = r'''
----
-scaleway_server_info:
- description:
- - Response from Scaleway API.
- - "For more details please refer to: U(https://developers.scaleway.com/en/products/instance/api/)."
- returned: success
- type: list
- elements: dict
- sample:
- "scaleway_server_info": [
- {
- "arch": "x86_64",
- "boot_type": "local",
- "bootscript": {
- "architecture": "x86_64",
- "bootcmdargs": "LINUX_COMMON scaleway boot=local nbd.max_part=16",
- "default": true,
- "dtb": "",
- "id": "b1e68c26-a19c-4eac-9222-498b22bd7ad9",
- "initrd": "http://169.254.42.24/initrd/initrd-Linux-x86_64-v3.14.5.gz",
- "kernel": "http://169.254.42.24/kernel/x86_64-mainline-lts-4.4-4.4.127-rev1/vmlinuz-4.4.127",
- "organization": "11111111-1111-4111-8111-111111111111",
- "public": true,
- "title": "x86_64 mainline 4.4.127 rev1"
- },
- "commercial_type": "START1-XS",
- "creation_date": "2018-08-14T21:36:56.271545+00:00",
- "dynamic_ip_required": false,
- "enable_ipv6": false,
- "extra_networks": [],
- "hostname": "scw-e0d256",
- "id": "12f19bc7-108c-4517-954c-e6b3d0311363",
- "image": {
- "arch": "x86_64",
- "creation_date": "2018-04-26T12:42:21.619844+00:00",
- "default_bootscript": {
- "architecture": "x86_64",
- "bootcmdargs": "LINUX_COMMON scaleway boot=local nbd.max_part=16",
- "default": true,
- "dtb": "",
- "id": "b1e68c26-a19c-4eac-9222-498b22bd7ad9",
- "initrd": "http://169.254.42.24/initrd/initrd-Linux-x86_64-v3.14.5.gz",
- "kernel": "http://169.254.42.24/kernel/x86_64-mainline-lts-4.4-4.4.127-rev1/vmlinuz-4.4.127",
- "organization": "11111111-1111-4111-8111-111111111111",
- "public": true,
- "title": "x86_64 mainline 4.4.127 rev1"
- },
- "extra_volumes": [],
- "from_server": null,
- "id": "67375eb1-f14d-4f02-bb42-6119cecbde51",
- "modification_date": "2018-04-26T12:49:07.573004+00:00",
- "name": "Ubuntu Xenial",
- "organization": "51b656e3-4865-41e8-adbc-0c45bdd780db",
- "public": true,
- "root_volume": {
- "id": "020b8d61-3867-4a0e-84a4-445c5393e05d",
- "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42",
- "size": 25000000000,
- "volume_type": "l_ssd"
- },
- "state": "available"
- },
- "ipv6": null,
- "location": {
- "cluster_id": "5",
- "hypervisor_id": "412",
- "node_id": "2",
- "platform_id": "13",
- "zone_id": "par1"
- },
- "maintenances": [],
- "modification_date": "2018-08-14T21:37:28.630882+00:00",
- "name": "scw-e0d256",
- "organization": "3f709602-5e6c-4619-b80c-e841c89734af",
- "private_ip": "10.14.222.131",
- "protected": false,
- "public_ip": {
- "address": "163.172.170.197",
- "dynamic": false,
- "id": "ea081794-a581-4495-8451-386ddaf0a451"
- },
- "security_group": {
- "id": "a37379d2-d8b0-4668-9cfb-1233fc436f7e",
- "name": "Default security group"
- },
- "state": "running",
- "state_detail": "booted",
- "tags": [],
- "volumes": {
- "0": {
- "creation_date": "2018-08-14T21:36:56.271545+00:00",
- "export_uri": "device://dev/vda",
- "id": "68386fae-4f55-4fbf-aabb-953036a85872",
- "modification_date": "2018-08-14T21:36:56.271545+00:00",
- "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42",
- "organization": "3f709602-5e6c-4619-b80c-e841c89734af",
- "server": {
- "id": "12f19bc7-108c-4517-954c-e6b3d0311363",
- "name": "scw-e0d256"
- },
- "size": 25000000000,
- "state": "available",
- "volume_type": "l_ssd"
- }
- }
- }
- ]
-'''
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.scaleway import (
- Scaleway,
- ScalewayException,
- scaleway_argument_spec,
- SCALEWAY_LOCATION,
-)
-
-
-class ScalewayServerInfo(Scaleway):
-
- def __init__(self, module):
- super(ScalewayServerInfo, self).__init__(module)
- self.name = 'servers'
-
- region = module.params["region"]
- self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
-
-
-def main():
- argument_spec = scaleway_argument_spec()
- argument_spec.update(dict(
- region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
- ))
-
- module = AnsibleModule(
- argument_spec=argument_spec,
- supports_check_mode=True,
- )
-
- try:
- module.exit_json(
- scaleway_server_info=ScalewayServerInfo(module).get_resources()
- )
- except ScalewayException as exc:
- module.fail_json(msg=exc.message)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_snapshot_info.py b/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_snapshot_info.py
deleted file mode 100644
index 8e1d2a61..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_snapshot_info.py
+++ /dev/null
@@ -1,116 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# (c) 2018, Yanis Guenane
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-DOCUMENTATION = r'''
----
-module: scaleway_snapshot_info
-short_description: Gather information about the Scaleway snapshots available.
-description:
- - Gather information about the Scaleway snapshot available.
-author:
- - "Yanis Guenane (@Spredzy)"
- - "Remy Leone (@remyleone)"
-extends_documentation_fragment:
-- community.general.scaleway
-
-options:
- region:
- type: str
- description:
- - Scaleway region to use (for example C(par1)).
- required: true
- choices:
- - ams1
- - EMEA-NL-EVS
- - par1
- - EMEA-FR-PAR1
- - par2
- - EMEA-FR-PAR2
- - waw1
- - EMEA-PL-WAW1
-'''
-
-EXAMPLES = r'''
-- name: Gather Scaleway snapshots information
- community.general.scaleway_snapshot_info:
- region: par1
- register: result
-
-- ansible.builtin.debug:
- msg: "{{ result.scaleway_snapshot_info }}"
-'''
-
-RETURN = r'''
----
-scaleway_snapshot_info:
- description:
- - Response from Scaleway API.
- - "For more details please refer to: U(https://developers.scaleway.com/en/products/instance/api/)."
- returned: success
- type: list
- elements: dict
- sample:
- "scaleway_snapshot_info": [
- {
- "base_volume": {
- "id": "68386fae-4f55-4fbf-aabb-953036a85872",
- "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42"
- },
- "creation_date": "2018-08-14T22:34:35.299461+00:00",
- "id": "b61b4b03-a2e9-4da5-b5ea-e462ac0662d2",
- "modification_date": "2018-08-14T22:34:54.520560+00:00",
- "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42 snapshot",
- "organization": "3f709602-5e6c-4619-b80c-e841c89734af",
- "size": 25000000000,
- "state": "available",
- "volume_type": "l_ssd"
- }
- ]
-'''
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.scaleway import (
- Scaleway,
- ScalewayException,
- scaleway_argument_spec,
- SCALEWAY_LOCATION
-)
-
-
-class ScalewaySnapshotInfo(Scaleway):
-
- def __init__(self, module):
- super(ScalewaySnapshotInfo, self).__init__(module)
- self.name = 'snapshots'
-
- region = module.params["region"]
- self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
-
-
-def main():
- argument_spec = scaleway_argument_spec()
- argument_spec.update(dict(
- region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
- ))
-
- module = AnsibleModule(
- argument_spec=argument_spec,
- supports_check_mode=True,
- )
-
- try:
- module.exit_json(
- scaleway_snapshot_info=ScalewaySnapshotInfo(module).get_resources()
- )
- except ScalewayException as exc:
- module.fail_json(msg=exc.message)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_sshkey.py b/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_sshkey.py
deleted file mode 100644
index 4c559092..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_sshkey.py
+++ /dev/null
@@ -1,173 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Scaleway SSH keys management module
-#
-# Copyright (C) 2018 Online SAS.
-# https://www.scaleway.com
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-
-__metaclass__ = type
-
-DOCUMENTATION = '''
----
-module: scaleway_sshkey
-short_description: Scaleway SSH keys management module
-author: Remy Leone (@remyleone)
-description:
- - This module manages SSH keys on Scaleway account
- U(https://developer.scaleway.com)
-extends_documentation_fragment:
-- community.general.scaleway
-
-
-options:
- state:
- type: str
- description:
- - Indicate desired state of the SSH key.
- default: present
- choices:
- - present
- - absent
- ssh_pub_key:
- type: str
- description:
- - The public SSH key as a string to add.
- required: true
- api_url:
- type: str
- description:
- - Scaleway API URL
- default: 'https://account.scaleway.com'
- aliases: ['base_url']
-'''
-
-EXAMPLES = '''
-- name: "Add SSH key"
- community.general.scaleway_sshkey:
- ssh_pub_key: "ssh-rsa AAAA..."
- state: "present"
-
-- name: "Delete SSH key"
- community.general.scaleway_sshkey:
- ssh_pub_key: "ssh-rsa AAAA..."
- state: "absent"
-
-- name: "Add SSH key with explicit token"
- community.general.scaleway_sshkey:
- ssh_pub_key: "ssh-rsa AAAA..."
- state: "present"
- oauth_token: "6ecd2c9b-6f4f-44d4-a187-61a92078d08c"
-'''
-
-RETURN = '''
-data:
- description: This is only present when C(state=present)
- returned: when C(state=present)
- type: dict
- sample: {
- "ssh_public_keys": [
- {"key": "ssh-rsa AAAA...."}
- ]
- }
-'''
-
-from ansible.module_utils.basic import AnsibleModule, env_fallback
-from ansible_collections.community.general.plugins.module_utils.scaleway import scaleway_argument_spec, Scaleway
-
-
-def extract_present_sshkeys(raw_organization_dict):
- ssh_key_list = raw_organization_dict["organizations"][0]["users"][0]["ssh_public_keys"]
- ssh_key_lookup = [ssh_key["key"] for ssh_key in ssh_key_list]
- return ssh_key_lookup
-
-
-def extract_user_id(raw_organization_dict):
- return raw_organization_dict["organizations"][0]["users"][0]["id"]
-
-
-def sshkey_user_patch(ssh_lookup):
- ssh_list = {"ssh_public_keys": [{"key": key}
- for key in ssh_lookup]}
- return ssh_list
-
-
-def core(module):
- ssh_pub_key = module.params['ssh_pub_key']
- state = module.params["state"]
- account_api = Scaleway(module)
- response = account_api.get('organizations')
-
- status_code = response.status_code
- organization_json = response.json
-
- if not response.ok:
- module.fail_json(msg='Error getting ssh key [{0}: {1}]'.format(
- status_code, response.json['message']))
-
- user_id = extract_user_id(organization_json)
- present_sshkeys = []
- try:
- present_sshkeys = extract_present_sshkeys(organization_json)
- except (KeyError, IndexError) as e:
- module.fail_json(changed=False, data="Error while extracting present SSH keys from API")
-
- if state in ('present',):
- if ssh_pub_key in present_sshkeys:
- module.exit_json(changed=False)
-
- # If key not found create it!
- if module.check_mode:
- module.exit_json(changed=True)
-
- present_sshkeys.append(ssh_pub_key)
- payload = sshkey_user_patch(present_sshkeys)
-
- response = account_api.patch('/users/%s' % user_id, data=payload)
-
- if response.ok:
- module.exit_json(changed=True, data=response.json)
-
- module.fail_json(msg='Error creating ssh key [{0}: {1}]'.format(
- response.status_code, response.json))
-
- elif state in ('absent',):
- if ssh_pub_key not in present_sshkeys:
- module.exit_json(changed=False)
-
- if module.check_mode:
- module.exit_json(changed=True)
-
- present_sshkeys.remove(ssh_pub_key)
- payload = sshkey_user_patch(present_sshkeys)
-
- response = account_api.patch('/users/%s' % user_id, data=payload)
-
- if response.ok:
- module.exit_json(changed=True, data=response.json)
-
- module.fail_json(msg='Error deleting ssh key [{0}: {1}]'.format(
- response.status_code, response.json))
-
-
-def main():
- argument_spec = scaleway_argument_spec()
- argument_spec.update(dict(
- state=dict(default='present', choices=['absent', 'present']),
- ssh_pub_key=dict(required=True),
- api_url=dict(fallback=(env_fallback, ['SCW_API_URL']), default='https://account.scaleway.com', aliases=['base_url']),
- ))
- module = AnsibleModule(
- argument_spec=argument_spec,
- supports_check_mode=True,
- )
-
- core(module)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_user_data.py b/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_user_data.py
deleted file mode 100644
index 2848ec2c..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_user_data.py
+++ /dev/null
@@ -1,172 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Scaleway user data management module
-#
-# Copyright (C) 2018 Online SAS.
-# https://www.scaleway.com
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-
-__metaclass__ = type
-
-DOCUMENTATION = '''
----
-module: scaleway_user_data
-short_description: Scaleway user_data management module
-author: Remy Leone (@remyleone)
-description:
- - "This module manages user_data on compute instances on Scaleway."
- - "It can be used to configure cloud-init for instance"
-extends_documentation_fragment:
-- community.general.scaleway
-
-
-options:
-
- server_id:
- type: str
- description:
- - Scaleway Compute instance ID of the server
- required: true
-
- user_data:
- type: dict
- description:
- - User defined data. Typically used with `cloud-init`.
- - Pass your cloud-init script here as a string
- required: false
-
- region:
- type: str
- description:
- - Scaleway compute zone
- required: true
- choices:
- - ams1
- - EMEA-NL-EVS
- - par1
- - EMEA-FR-PAR1
- - par2
- - EMEA-FR-PAR2
- - waw1
- - EMEA-PL-WAW1
-'''
-
-EXAMPLES = '''
-- name: Update the cloud-init
- community.general.scaleway_user_data:
- server_id: '5a33b4ab-57dd-4eb6-8b0a-d95eb63492ce'
- region: ams1
- user_data:
- cloud-init: 'final_message: "Hello World!"'
-'''
-
-RETURN = '''
-'''
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway
-
-
-def patch_user_data(compute_api, server_id, key, value):
- compute_api.module.debug("Starting patching user_data attributes")
-
- path = "servers/%s/user_data/%s" % (server_id, key)
- response = compute_api.patch(path=path, data=value, headers={"Content-Type": "text/plain"})
- if not response.ok:
- msg = 'Error during user_data patching: %s %s' % (response.status_code, response.body)
- compute_api.module.fail_json(msg=msg)
-
- return response
-
-
-def delete_user_data(compute_api, server_id, key):
- compute_api.module.debug("Starting deleting user_data attributes: %s" % key)
-
- response = compute_api.delete(path="servers/%s/user_data/%s" % (server_id, key))
-
- if not response.ok:
- msg = 'Error during user_data deleting: (%s) %s' % response.status_code, response.body
- compute_api.module.fail_json(msg=msg)
-
- return response
-
-
-def get_user_data(compute_api, server_id, key):
- compute_api.module.debug("Starting patching user_data attributes")
-
- path = "servers/%s/user_data/%s" % (server_id, key)
- response = compute_api.get(path=path)
- if not response.ok:
- msg = 'Error during user_data patching: %s %s' % (response.status_code, response.body)
- compute_api.module.fail_json(msg=msg)
-
- return response.json
-
-
-def core(module):
- region = module.params["region"]
- server_id = module.params["server_id"]
- user_data = module.params["user_data"]
- changed = False
-
- module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
- compute_api = Scaleway(module=module)
-
- user_data_list = compute_api.get(path="servers/%s/user_data" % server_id)
- if not user_data_list.ok:
- msg = 'Error during user_data fetching: %s %s' % user_data_list.status_code, user_data_list.body
- compute_api.module.fail_json(msg=msg)
-
- present_user_data_keys = user_data_list.json["user_data"]
- present_user_data = dict(
- (key, get_user_data(compute_api=compute_api, server_id=server_id, key=key))
- for key in present_user_data_keys
- )
-
- if present_user_data == user_data:
- module.exit_json(changed=changed, msg=user_data_list.json)
-
- # First we remove keys that are not defined in the wished user_data
- for key in present_user_data:
- if key not in user_data:
-
- changed = True
- if compute_api.module.check_mode:
- module.exit_json(changed=changed, msg={"status": "User-data of %s would be patched." % server_id})
-
- delete_user_data(compute_api=compute_api, server_id=server_id, key=key)
-
- # Then we patch keys that are different
- for key, value in user_data.items():
- if key not in present_user_data or user_data[key] != present_user_data[key]:
-
- changed = True
- if compute_api.module.check_mode:
- module.exit_json(changed=changed, msg={"status": "User-data of %s would be patched." % server_id})
-
- patch_user_data(compute_api=compute_api, server_id=server_id, key=key, value=value)
-
- module.exit_json(changed=changed, msg=user_data)
-
-
-def main():
- argument_spec = scaleway_argument_spec()
- argument_spec.update(dict(
- region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
- user_data=dict(type="dict"),
- server_id=dict(required=True),
- ))
- module = AnsibleModule(
- argument_spec=argument_spec,
- supports_check_mode=True,
- )
-
- core(module)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_volume.py b/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_volume.py
deleted file mode 100644
index e68309fc..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_volume.py
+++ /dev/null
@@ -1,195 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Scaleway volumes management module
-#
-# Copyright (C) 2018 Henryk Konsek Consulting (hekonsek@gmail.com).
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-
-__metaclass__ = type
-
-DOCUMENTATION = '''
----
-module: scaleway_volume
-short_description: Scaleway volumes management module
-author: Henryk Konsek (@hekonsek)
-description:
- - This module manages volumes on Scaleway account
- U(https://developer.scaleway.com)
-extends_documentation_fragment:
-- community.general.scaleway
-
-
-options:
- state:
- type: str
- description:
- - Indicate desired state of the volume.
- default: present
- choices:
- - present
- - absent
- region:
- type: str
- description:
- - Scaleway region to use (for example par1).
- required: true
- choices:
- - ams1
- - EMEA-NL-EVS
- - par1
- - EMEA-FR-PAR1
- - par2
- - EMEA-FR-PAR2
- - waw1
- - EMEA-PL-WAW1
- name:
- type: str
- description:
- - Name used to identify the volume.
- required: true
- project:
- type: str
- description:
- - Scaleway project ID to which volume belongs.
- version_added: 4.3.0
- organization:
- type: str
- description:
- - ScaleWay organization ID to which volume belongs.
- size:
- type: int
- description:
- - Size of the volume in bytes.
- volume_type:
- type: str
- description:
- - Type of the volume (for example 'l_ssd').
-'''
-
-EXAMPLES = '''
-- name: Create 10GB volume
- community.general.scaleway_volume:
- name: my-volume
- state: present
- region: par1
- project: "{{ scw_org }}"
- "size": 10000000000
- volume_type: l_ssd
- register: server_creation_check_task
-
-- name: Make sure volume deleted
- community.general.scaleway_volume:
- name: my-volume
- state: absent
- region: par1
-'''
-
-RETURN = '''
-data:
- description: This is only present when C(state=present)
- returned: when C(state=present)
- type: dict
- sample: {
- "volume": {
- "export_uri": null,
- "id": "c675f420-cfeb-48ff-ba2a-9d2a4dbe3fcd",
- "name": "volume-0-3",
- "project": "000a115d-2852-4b0a-9ce8-47f1134ba95a",
- "server": null,
- "size": 10000000000,
- "volume_type": "l_ssd"
- }
-}
-'''
-
-from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway
-from ansible.module_utils.basic import AnsibleModule
-
-
-def core(module):
- region = module.params["region"]
- state = module.params['state']
- name = module.params['name']
- organization = module.params['organization']
- project = module.params['project']
- size = module.params['size']
- volume_type = module.params['volume_type']
- module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
-
- account_api = Scaleway(module)
- response = account_api.get('volumes')
- status_code = response.status_code
- volumes_json = response.json
-
- if project is None:
- project = organization
-
- if not response.ok:
- module.fail_json(msg='Error getting volume [{0}: {1}]'.format(
- status_code, response.json['message']))
-
- volumeByName = None
- for volume in volumes_json['volumes']:
- if volume['project'] == project and volume['name'] == name:
- volumeByName = volume
-
- if state in ('present',):
- if volumeByName is not None:
- module.exit_json(changed=False)
-
- payload = {'name': name, 'project': project, 'size': size, 'volume_type': volume_type}
-
- response = account_api.post('/volumes', payload)
-
- if response.ok:
- module.exit_json(changed=True, data=response.json)
-
- module.fail_json(msg='Error creating volume [{0}: {1}]'.format(
- response.status_code, response.json))
-
- elif state in ('absent',):
- if volumeByName is None:
- module.exit_json(changed=False)
-
- if module.check_mode:
- module.exit_json(changed=True)
-
- response = account_api.delete('/volumes/' + volumeByName['id'])
- if response.status_code == 204:
- module.exit_json(changed=True, data=response.json)
-
- module.fail_json(msg='Error deleting volume [{0}: {1}]'.format(
- response.status_code, response.json))
-
-
-def main():
- argument_spec = scaleway_argument_spec()
- argument_spec.update(dict(
- state=dict(default='present', choices=['absent', 'present']),
- name=dict(required=True),
- size=dict(type='int'),
- project=dict(),
- organization=dict(),
- volume_type=dict(),
- region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
- ))
- module = AnsibleModule(
- argument_spec=argument_spec,
- supports_check_mode=True,
- mutually_exclusive=[
- ('organization', 'project'),
- ],
- required_one_of=[
- ('organization', 'project'),
- ],
- )
-
- core(module)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_volume_info.py b/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_volume_info.py
deleted file mode 100644
index e8dfa414..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/scaleway/scaleway_volume_info.py
+++ /dev/null
@@ -1,111 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# (c) 2018, Yanis Guenane
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-DOCUMENTATION = r'''
----
-module: scaleway_volume_info
-short_description: Gather information about the Scaleway volumes available.
-description:
- - Gather information about the Scaleway volumes available.
-author:
- - "Yanis Guenane (@Spredzy)"
- - "Remy Leone (@remyleone)"
-extends_documentation_fragment:
-- community.general.scaleway
-
-options:
- region:
- type: str
- description:
- - Scaleway region to use (for example C(par1)).
- required: true
- choices:
- - ams1
- - EMEA-NL-EVS
- - par1
- - EMEA-FR-PAR1
- - par2
- - EMEA-FR-PAR2
- - waw1
- - EMEA-PL-WAW1
-'''
-
-EXAMPLES = r'''
-- name: Gather Scaleway volumes information
- community.general.scaleway_volume_info:
- region: par1
- register: result
-
-- ansible.builtin.debug:
- msg: "{{ result.scaleway_volume_info }}"
-'''
-
-RETURN = r'''
----
-scaleway_volume_info:
- description:
- - Response from Scaleway API.
- - "For more details please refer to: U(https://developers.scaleway.com/en/products/instance/api/)."
- returned: success
- type: list
- elements: dict
- sample:
- "scaleway_volume_info": [
- {
- "creation_date": "2018-08-14T20:56:24.949660+00:00",
- "export_uri": null,
- "id": "b8d51a06-daeb-4fef-9539-a8aea016c1ba",
- "modification_date": "2018-08-14T20:56:24.949660+00:00",
- "name": "test-volume",
- "organization": "3f709602-5e6c-4619-b80c-e841c89734af",
- "server": null,
- "size": 50000000000,
- "state": "available",
- "volume_type": "l_ssd"
- }
- ]
-'''
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.scaleway import (
- Scaleway, ScalewayException, scaleway_argument_spec,
- SCALEWAY_LOCATION)
-
-
-class ScalewayVolumeInfo(Scaleway):
-
- def __init__(self, module):
- super(ScalewayVolumeInfo, self).__init__(module)
- self.name = 'volumes'
-
- region = module.params["region"]
- self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
-
-
-def main():
- argument_spec = scaleway_argument_spec()
- argument_spec.update(dict(
- region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
- ))
-
- module = AnsibleModule(
- argument_spec=argument_spec,
- supports_check_mode=True,
- )
-
- try:
- module.exit_json(
- scaleway_volume_info=ScalewayVolumeInfo(module).get_resources()
- )
- except ScalewayException as exc:
- module.fail_json(msg=exc.message)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/smartos/imgadm.py b/ansible_collections/community/general/plugins/modules/cloud/smartos/imgadm.py
deleted file mode 100644
index 18a67d01..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/smartos/imgadm.py
+++ /dev/null
@@ -1,311 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# (c) 2016, 2017 Jasper Lievisse Adriaanse
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: imgadm
-short_description: Manage SmartOS images
-description:
- - Manage SmartOS virtual machine images through imgadm(1M)
-author: Jasper Lievisse Adriaanse (@jasperla)
-options:
- force:
- required: false
- type: bool
- description:
- - Force a given operation (where supported by imgadm(1M)).
- pool:
- required: false
- default: zones
- description:
- - zpool to import to or delete images from.
- type: str
- source:
- required: false
- description:
- - URI for the image source.
- type: str
- state:
- required: true
- choices: [ present, absent, deleted, imported, updated, vacuumed ]
- description:
- - State the object operated on should be in. C(imported) is an alias for
- for C(present) and C(deleted) for C(absent). When set to C(vacuumed)
- and C(uuid) to C(*), it will remove all unused images.
- type: str
-
- type:
- required: false
- choices: [ imgapi, docker, dsapi ]
- default: imgapi
- description:
- - Type for image sources.
- type: str
-
- uuid:
- required: false
- description:
- - Image UUID. Can either be a full UUID or C(*) for all images.
- type: str
-
-requirements:
- - python >= 2.6
-'''
-
-EXAMPLES = '''
-- name: Import an image
- community.general.imgadm:
- uuid: '70e3ae72-96b6-11e6-9056-9737fd4d0764'
- state: imported
-
-- name: Delete an image
- community.general.imgadm:
- uuid: '70e3ae72-96b6-11e6-9056-9737fd4d0764'
- state: deleted
-
-- name: Update all images
- community.general.imgadm:
- uuid: '*'
- state: updated
-
-- name: Update a single image
- community.general.imgadm:
- uuid: '70e3ae72-96b6-11e6-9056-9737fd4d0764'
- state: updated
-
-- name: Add a source
- community.general.imgadm:
- source: 'https://datasets.project-fifo.net'
- state: present
-
-- name: Add a Docker source
- community.general.imgadm:
- source: 'https://docker.io'
- type: docker
- state: present
-
-- name: Remove a source
- community.general.imgadm:
- source: 'https://docker.io'
- state: absent
-'''
-
-RETURN = '''
-source:
- description: Source that is managed.
- returned: When not managing an image.
- type: str
- sample: https://datasets.project-fifo.net
-uuid:
- description: UUID for an image operated on.
- returned: When not managing an image source.
- type: str
- sample: 70e3ae72-96b6-11e6-9056-9737fd4d0764
-state:
- description: State of the target, after execution.
- returned: success
- type: str
- sample: 'present'
-'''
-
-import re
-
-from ansible.module_utils.basic import AnsibleModule
-
-# Shortcut for the imgadm(1M) command. While imgadm(1M) supports a
-# -E option to return any errors in JSON, the generated JSON does not play well
-# with the JSON parsers of Python. The returned message contains '\n' as part of
-# the stacktrace, which breaks the parsers.
-
-
-class Imgadm(object):
- def __init__(self, module):
- self.module = module
- self.params = module.params
- self.cmd = module.get_bin_path('imgadm', required=True)
- self.changed = False
- self.uuid = module.params['uuid']
-
- # Since there are a number of (natural) aliases, prevent having to look
- # them up everytime we operate on `state`.
- if self.params['state'] in ['present', 'imported', 'updated']:
- self.present = True
- else:
- self.present = False
-
- # Perform basic UUID validation upfront.
- if self.uuid and self.uuid != '*':
- if not re.match('^[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}$', self.uuid, re.IGNORECASE):
- module.fail_json(msg='Provided value for uuid option is not a valid UUID.')
-
- # Helper method to massage stderr
- def errmsg(self, stderr):
- match = re.match(r'^imgadm .*?: error \(\w+\): (.*): .*', stderr)
- if match:
- return match.groups()[0]
- else:
- return 'Unexpected failure'
-
- def update_images(self):
- if self.uuid == '*':
- cmd = '{0} update'.format(self.cmd)
- else:
- cmd = '{0} update {1}'.format(self.cmd, self.uuid)
-
- (rc, stdout, stderr) = self.module.run_command(cmd)
-
- if rc != 0:
- self.module.fail_json(msg='Failed to update images: {0}'.format(self.errmsg(stderr)))
-
- # There is no feedback from imgadm(1M) to determine if anything
- # was actually changed. So treat this as an 'always-changes' operation.
- # Note that 'imgadm -v' produces unparseable JSON...
- self.changed = True
-
- def manage_sources(self):
- force = self.params['force']
- source = self.params['source']
- imgtype = self.params['type']
-
- cmd = '{0} sources'.format(self.cmd)
-
- if force:
- cmd += ' -f'
-
- if self.present:
- cmd = '{0} -a {1} -t {2}'.format(cmd, source, imgtype)
- (rc, stdout, stderr) = self.module.run_command(cmd)
-
- if rc != 0:
- self.module.fail_json(msg='Failed to add source: {0}'.format(self.errmsg(stderr)))
-
- # Check the various responses.
- # Note that trying to add a source with the wrong type is handled
- # above as it results in a non-zero status.
-
- regex = 'Already have "{0}" image source "{1}", no change'.format(imgtype, source)
- if re.match(regex, stdout):
- self.changed = False
-
- regex = 'Added "%s" image source "%s"' % (imgtype, source)
- if re.match(regex, stdout):
- self.changed = True
- else:
- # Type is ignored by imgadm(1M) here
- cmd += ' -d %s' % source
- (rc, stdout, stderr) = self.module.run_command(cmd)
-
- if rc != 0:
- self.module.fail_json(msg='Failed to remove source: {0}'.format(self.errmsg(stderr)))
-
- regex = 'Do not have image source "%s", no change' % source
- if re.match(regex, stdout):
- self.changed = False
-
- regex = 'Deleted ".*" image source "%s"' % source
- if re.match(regex, stdout):
- self.changed = True
-
- def manage_images(self):
- pool = self.params['pool']
- state = self.params['state']
-
- if state == 'vacuumed':
- # Unconditionally pass '--force', otherwise we're prompted with 'y/N'
- cmd = '{0} vacuum -f'.format(self.cmd)
-
- (rc, stdout, stderr) = self.module.run_command(cmd)
-
- if rc != 0:
- self.module.fail_json(msg='Failed to vacuum images: {0}'.format(self.errmsg(stderr)))
- else:
- if stdout == '':
- self.changed = False
- else:
- self.changed = True
- if self.present:
- cmd = '{0} import -P {1} -q {2}'.format(self.cmd, pool, self.uuid)
-
- (rc, stdout, stderr) = self.module.run_command(cmd)
-
- if rc != 0:
- self.module.fail_json(msg='Failed to import image: {0}'.format(self.errmsg(stderr)))
-
- regex = r'Image {0} \(.*\) is already installed, skipping'.format(self.uuid)
- if re.match(regex, stdout):
- self.changed = False
-
- regex = '.*ActiveImageNotFound.*'
- if re.match(regex, stderr):
- self.changed = False
-
- regex = 'Imported image {0}.*'.format(self.uuid)
- if re.match(regex, stdout.splitlines()[-1]):
- self.changed = True
- else:
- cmd = '{0} delete -P {1} {2}'.format(self.cmd, pool, self.uuid)
-
- (rc, stdout, stderr) = self.module.run_command(cmd)
-
- regex = '.*ImageNotInstalled.*'
- if re.match(regex, stderr):
- # Even if the 'rc' was non-zero (3), we handled the situation
- # in order to determine if there was a change.
- self.changed = False
-
- regex = 'Deleted image {0}'.format(self.uuid)
- if re.match(regex, stdout):
- self.changed = True
-
-
-def main():
- module = AnsibleModule(
- argument_spec=dict(
- force=dict(type='bool'),
- pool=dict(default='zones'),
- source=dict(),
- state=dict(required=True, choices=['present', 'absent', 'deleted', 'imported', 'updated', 'vacuumed']),
- type=dict(default='imgapi', choices=['imgapi', 'docker', 'dsapi']),
- uuid=dict()
- ),
- # This module relies largely on imgadm(1M) to enforce idempotency, which does not
- # provide a "noop" (or equivalent) mode to do a dry-run.
- supports_check_mode=False,
- )
-
- imgadm = Imgadm(module)
-
- uuid = module.params['uuid']
- source = module.params['source']
- state = module.params['state']
-
- result = {'state': state}
-
- # Either manage sources or images.
- if source:
- result['source'] = source
- imgadm.manage_sources()
- else:
- result['uuid'] = uuid
-
- if state == 'updated':
- imgadm.update_images()
- else:
- # Make sure operate on a single image for the following actions
- if (uuid == '*') and (state != 'vacuumed'):
- module.fail_json(msg='Can only specify uuid as "*" when updating image(s)')
- imgadm.manage_images()
-
- result['changed'] = imgadm.changed
- module.exit_json(**result)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/smartos/nictagadm.py b/ansible_collections/community/general/plugins/modules/cloud/smartos/nictagadm.py
deleted file mode 100644
index 05aba6f1..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/smartos/nictagadm.py
+++ /dev/null
@@ -1,224 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2018, Bruce Smith
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-DOCUMENTATION = r'''
----
-module: nictagadm
-short_description: Manage nic tags on SmartOS systems
-description:
- - Create or delete nic tags on SmartOS systems.
-author:
-- Bruce Smith (@SmithX10)
-options:
- name:
- description:
- - Name of the nic tag.
- required: true
- type: str
- mac:
- description:
- - Specifies the I(mac) address to attach the nic tag to when not creating an I(etherstub).
- - Parameters I(mac) and I(etherstub) are mutually exclusive.
- type: str
- etherstub:
- description:
- - Specifies that the nic tag will be attached to a created I(etherstub).
- - Parameter I(etherstub) is mutually exclusive with both I(mtu), and I(mac).
- type: bool
- default: no
- mtu:
- description:
- - Specifies the size of the I(mtu) of the desired nic tag.
- - Parameters I(mtu) and I(etherstub) are mutually exclusive.
- type: int
- force:
- description:
- - When I(state) is absent set this switch will use the C(-f) parameter and delete the nic tag regardless of existing VMs.
- type: bool
- default: no
- state:
- description:
- - Create or delete a SmartOS nic tag.
- type: str
- choices: [ absent, present ]
- default: present
-'''
-
-EXAMPLES = r'''
-- name: Create 'storage0' on '00:1b:21:a3:f5:4d'
- community.general.nictagadm:
- name: storage0
- mac: 00:1b:21:a3:f5:4d
- mtu: 9000
- state: present
-
-- name: Remove 'storage0' nic tag
- community.general.nictagadm:
- name: storage0
- state: absent
-'''
-
-RETURN = r'''
-name:
- description: nic tag name
- returned: always
- type: str
- sample: storage0
-mac:
- description: MAC Address that the nic tag was attached to.
- returned: always
- type: str
- sample: 00:1b:21:a3:f5:4d
-etherstub:
- description: specifies if the nic tag will create and attach to an etherstub.
- returned: always
- type: bool
- sample: False
-mtu:
- description: specifies which MTU size was passed during the nictagadm add command. mtu and etherstub are mutually exclusive.
- returned: always
- type: int
- sample: 1500
-force:
- description: Shows if -f was used during the deletion of a nic tag
- returned: always
- type: bool
- sample: False
-state:
- description: state of the target
- returned: always
- type: str
- sample: present
-'''
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.common.network import is_mac
-
-
-class NicTag(object):
-
- def __init__(self, module):
- self.module = module
-
- self.name = module.params['name']
- self.mac = module.params['mac']
- self.etherstub = module.params['etherstub']
- self.mtu = module.params['mtu']
- self.force = module.params['force']
- self.state = module.params['state']
-
- self.nictagadm_bin = self.module.get_bin_path('nictagadm', True)
-
- def is_valid_mac(self):
- return is_mac(self.mac.lower())
-
- def nictag_exists(self):
- cmd = [self.nictagadm_bin, 'exists', self.name]
- (rc, dummy, dummy) = self.module.run_command(cmd)
-
- return rc == 0
-
- def add_nictag(self):
- cmd = [self.nictagadm_bin, '-v', 'add']
-
- if self.etherstub:
- cmd.append('-l')
-
- if self.mtu:
- cmd.append('-p')
- cmd.append('mtu=' + str(self.mtu))
-
- if self.mac:
- cmd.append('-p')
- cmd.append('mac=' + str(self.mac))
-
- cmd.append(self.name)
-
- return self.module.run_command(cmd)
-
- def delete_nictag(self):
- cmd = [self.nictagadm_bin, '-v', 'delete']
-
- if self.force:
- cmd.append('-f')
-
- cmd.append(self.name)
-
- return self.module.run_command(cmd)
-
-
-def main():
- module = AnsibleModule(
- argument_spec=dict(
- name=dict(type='str', required=True),
- mac=dict(type='str'),
- etherstub=dict(type='bool', default=False),
- mtu=dict(type='int'),
- force=dict(type='bool', default=False),
- state=dict(type='str', default='present', choices=['absent', 'present']),
- ),
- mutually_exclusive=[
- ['etherstub', 'mac'],
- ['etherstub', 'mtu'],
- ],
- required_if=[
- ['etherstub', False, ['name', 'mac']],
- ['state', 'absent', ['name', 'force']],
- ],
- supports_check_mode=True
- )
-
- nictag = NicTag(module)
-
- rc = None
- out = ''
- err = ''
- result = dict(
- changed=False,
- etherstub=nictag.etherstub,
- force=nictag.force,
- name=nictag.name,
- mac=nictag.mac,
- mtu=nictag.mtu,
- state=nictag.state,
- )
-
- if not nictag.is_valid_mac():
- module.fail_json(msg='Invalid MAC Address Value',
- name=nictag.name,
- mac=nictag.mac,
- etherstub=nictag.etherstub)
-
- if nictag.state == 'absent':
- if nictag.nictag_exists():
- if module.check_mode:
- module.exit_json(changed=True)
- (rc, out, err) = nictag.delete_nictag()
- if rc != 0:
- module.fail_json(name=nictag.name, msg=err, rc=rc)
- elif nictag.state == 'present':
- if not nictag.nictag_exists():
- if module.check_mode:
- module.exit_json(changed=True)
- (rc, out, err) = nictag.add_nictag()
- if rc is not None and rc != 0:
- module.fail_json(name=nictag.name, msg=err, rc=rc)
-
- if rc is not None:
- result['changed'] = True
- if out:
- result['stdout'] = out
- if err:
- result['stderr'] = err
-
- module.exit_json(**result)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/smartos/smartos_image_info.py b/ansible_collections/community/general/plugins/modules/cloud/smartos/smartos_image_info.py
deleted file mode 100644
index 369559f5..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/smartos/smartos_image_info.py
+++ /dev/null
@@ -1,111 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# (c) 2015, Adam Števko
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: smartos_image_info
-short_description: Get SmartOS image details.
-description:
- - Retrieve information about all installed images on SmartOS.
- - This module was called C(smartos_image_facts) before Ansible 2.9, returning C(ansible_facts).
- Note that the M(community.general.smartos_image_info) module no longer returns C(ansible_facts)!
-author: Adam Števko (@xen0l)
-options:
- filters:
- description:
- - Criteria for selecting image. Can be any value from image
- manifest and 'published_date', 'published', 'source', 'clones',
- and 'size'. More information can be found at U(https://smartos.org/man/1m/imgadm)
- under 'imgadm list'.
- type: str
-'''
-
-EXAMPLES = '''
-- name: Return information about all installed images
- community.general.smartos_image_info:
- register: result
-
-- name: Return all private active Linux images
- community.general.smartos_image_info:
- filters: "os=linux state=active public=false"
- register: result
-
-- name: Show, how many clones does every image have
- community.general.smartos_image_info:
- register: result
-
-- name: Print information
- ansible.builtin.debug:
- msg: "{{ result.smartos_images[item]['name'] }}-{{ result.smartos_images[item]['version'] }}
- has {{ result.smartos_images[item]['clones'] }} VM(s)"
- with_items: "{{ result.smartos_images.keys() | list }}"
-
-- name: Print information
- ansible.builtin.debug:
- msg: "{{ smartos_images[item]['name'] }}-{{ smartos_images[item]['version'] }}
- has {{ smartos_images[item]['clones'] }} VM(s)"
- with_items: "{{ smartos_images.keys() | list }}"
-'''
-
-RETURN = '''
-'''
-
-import json
-from ansible.module_utils.basic import AnsibleModule
-
-
-class ImageFacts(object):
-
- def __init__(self, module):
- self.module = module
-
- self.filters = module.params['filters']
-
- def return_all_installed_images(self):
- cmd = [self.module.get_bin_path('imgadm'), 'list', '-j']
-
- if self.filters:
- cmd.append(self.filters)
-
- (rc, out, err) = self.module.run_command(cmd)
-
- if rc != 0:
- self.module.exit_json(
- msg='Failed to get all installed images', stderr=err)
-
- images = json.loads(out)
-
- result = {}
- for image in images:
- result[image['manifest']['uuid']] = image['manifest']
- # Merge additional attributes with the image manifest.
- for attrib in ['clones', 'source', 'zpool']:
- result[image['manifest']['uuid']][attrib] = image[attrib]
-
- return result
-
-
-def main():
- module = AnsibleModule(
- argument_spec=dict(
- filters=dict(default=None),
- ),
- supports_check_mode=True,
- )
-
- image_facts = ImageFacts(module)
-
- data = dict(smartos_images=image_facts.return_all_installed_images())
-
- module.exit_json(**data)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/smartos/vmadm.py b/ansible_collections/community/general/plugins/modules/cloud/smartos/vmadm.py
deleted file mode 100644
index 03a02242..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/smartos/vmadm.py
+++ /dev/null
@@ -1,803 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# (c) 2017, Jasper Lievisse Adriaanse
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: vmadm
-short_description: Manage SmartOS virtual machines and zones.
-description:
- - Manage SmartOS virtual machines through vmadm(1M).
-author: Jasper Lievisse Adriaanse (@jasperla)
-options:
- archive_on_delete:
- required: false
- description:
- - When enabled, the zone dataset will be mounted on C(/zones/archive)
- upon removal.
- type: bool
- autoboot:
- required: false
- description:
- - Whether or not a VM is booted when the system is rebooted.
- type: bool
- brand:
- choices: [ joyent, joyent-minimal, lx, kvm, bhyve ]
- default: joyent
- description:
- - Type of virtual machine. The C(bhyve) option was added in community.general 0.2.0.
- type: str
- boot:
- required: false
- description:
- - Set the boot order for KVM VMs.
- type: str
- cpu_cap:
- required: false
- description:
- - Sets a limit on the amount of CPU time that can be used by a VM.
- Use C(0) for no cap.
- type: int
- cpu_shares:
- required: false
- description:
- - Sets a limit on the number of fair share scheduler (FSS) CPU shares for
- a VM. This limit is relative to all other VMs on the system.
- type: int
- cpu_type:
- required: false
- choices: [ qemu64, host ]
- default: qemu64
- description:
- - Control the type of virtual CPU exposed to KVM VMs.
- type: str
- customer_metadata:
- required: false
- description:
- - Metadata to be set and associated with this VM, this contain customer
- modifiable keys.
- type: dict
- delegate_dataset:
- required: false
- description:
- - Whether to delegate a ZFS dataset to an OS VM.
- type: bool
- disk_driver:
- required: false
- description:
- - Default value for a virtual disk model for KVM guests.
- type: str
- disks:
- required: false
- description:
- - A list of disks to add, valid properties are documented in vmadm(1M).
- type: list
- elements: dict
- dns_domain:
- required: false
- description:
- - Domain value for C(/etc/hosts).
- type: str
- docker:
- required: false
- description:
- - Docker images need this flag enabled along with the I(brand) set to C(lx).
- type: bool
- filesystems:
- required: false
- description:
- - Mount additional filesystems into an OS VM.
- type: list
- elements: dict
- firewall_enabled:
- required: false
- description:
- - Enables the firewall, allowing fwadm(1M) rules to be applied.
- type: bool
- force:
- required: false
- description:
- - Force a particular action (i.e. stop or delete a VM).
- type: bool
- fs_allowed:
- required: false
- description:
- - Comma separated list of filesystem types this zone is allowed to mount.
- type: str
- hostname:
- required: false
- description:
- - Zone/VM hostname.
- type: str
- image_uuid:
- required: false
- description:
- - Image UUID.
- type: str
- indestructible_delegated:
- required: false
- description:
- - Adds an C(@indestructible) snapshot to delegated datasets.
- type: bool
- indestructible_zoneroot:
- required: false
- description:
- - Adds an C(@indestructible) snapshot to zoneroot.
- type: bool
- internal_metadata:
- required: false
- description:
- - Metadata to be set and associated with this VM, this contains operator
- generated keys.
- type: dict
- internal_metadata_namespace:
- required: false
- description:
- - List of namespaces to be set as I(internal_metadata-only); these namespaces
- will come from I(internal_metadata) rather than I(customer_metadata).
- type: str
- kernel_version:
- required: false
- description:
- - Kernel version to emulate for LX VMs.
- type: str
- limit_priv:
- required: false
- description:
- - Set (comma separated) list of privileges the zone is allowed to use.
- type: str
- maintain_resolvers:
- required: false
- description:
- - Resolvers in C(/etc/resolv.conf) will be updated when updating
- the I(resolvers) property.
- type: bool
- max_locked_memory:
- required: false
- description:
- - Total amount of memory (in MiBs) on the host that can be locked by this VM.
- type: int
- max_lwps:
- required: false
- description:
- - Maximum number of lightweight processes this VM is allowed to have running.
- type: int
- max_physical_memory:
- required: false
- description:
- - Maximum amount of memory (in MiBs) on the host that the VM is allowed to use.
- type: int
- max_swap:
- required: false
- description:
- - Maximum amount of virtual memory (in MiBs) the VM is allowed to use.
- type: int
- mdata_exec_timeout:
- required: false
- description:
- - Timeout in seconds (or 0 to disable) for the C(svc:/smartdc/mdata:execute) service
- that runs user-scripts in the zone.
- type: int
- name:
- required: false
- aliases: [ alias ]
- description:
- - Name of the VM. vmadm(1M) uses this as an optional name.
- type: str
- nic_driver:
- required: false
- description:
- - Default value for a virtual NIC model for KVM guests.
- type: str
- nics:
- required: false
- description:
- - A list of nics to add, valid properties are documented in vmadm(1M).
- type: list
- elements: dict
- nowait:
- required: false
- description:
- - Consider the provisioning complete when the VM first starts, rather than
- when the VM has rebooted.
- type: bool
- qemu_opts:
- required: false
- description:
- - Additional qemu arguments for KVM guests. This overwrites the default arguments
- provided by vmadm(1M) and should only be used for debugging.
- type: str
- qemu_extra_opts:
- required: false
- description:
- - Additional qemu cmdline arguments for KVM guests.
- type: str
- quota:
- required: false
- description:
- - Quota on zone filesystems (in MiBs).
- type: int
- ram:
- required: false
- description:
- - Amount of virtual RAM for a KVM guest (in MiBs).
- type: int
- resolvers:
- required: false
- description:
- - List of resolvers to be put into C(/etc/resolv.conf).
- type: list
- elements: str
- routes:
- required: false
- description:
- - Dictionary that maps destinations to gateways, these will be set as static
- routes in the VM.
- type: dict
- spice_opts:
- required: false
- description:
- - Addition options for SPICE-enabled KVM VMs.
- type: str
- spice_password:
- required: false
- description:
- - Password required to connect to SPICE. By default no password is set.
- Please note this can be read from the Global Zone.
- type: str
- state:
- choices: [ present, running, absent, deleted, stopped, created, restarted, rebooted ]
- default: running
- description:
- - States for the VM to be in. Please note that C(present), C(stopped) and C(restarted)
- operate on a VM that is currently provisioned. C(present) means that the VM will be
- created if it was absent, and that it will be in a running state. C(absent) will
- shutdown the zone before removing it.
- C(stopped) means the zone will be created if it doesn't exist already, before shutting
- it down.
- type: str
- tmpfs:
- required: false
- description:
- - Amount of memory (in MiBs) that will be available in the VM for the C(/tmp) filesystem.
- type: int
- uuid:
- required: false
- description:
- - UUID of the VM. Can either be a full UUID or C(*) for all VMs.
- type: str
- vcpus:
- required: false
- description:
- - Number of virtual CPUs for a KVM guest.
- type: int
- vga:
- required: false
- description:
- - Specify VGA emulation used by KVM VMs.
- type: str
- virtio_txburst:
- required: false
- description:
- - Number of packets that can be sent in a single flush of the tx queue of virtio NICs.
- type: int
- virtio_txtimer:
- required: false
- description:
- - Timeout (in nanoseconds) for the TX timer of virtio NICs.
- type: int
- vnc_password:
- required: false
- description:
- - Password required to connect to VNC. By default no password is set.
- Please note this can be read from the Global Zone.
- type: str
- vnc_port:
- required: false
- description:
- - TCP port to listen of the VNC server. Or set C(0) for random,
- or C(-1) to disable.
- type: int
- zfs_data_compression:
- required: false
- description:
- - Specifies compression algorithm used for this VMs data dataset. This option
- only has effect on delegated datasets.
- type: str
- zfs_data_recsize:
- required: false
- description:
- - Suggested block size (power of 2) for files in the delegated dataset's filesystem.
- type: int
- zfs_filesystem_limit:
- required: false
- description:
- - Maximum number of filesystems the VM can have.
- type: int
- zfs_io_priority:
- required: false
- description:
- - IO throttle priority value relative to other VMs.
- type: int
- zfs_root_compression:
- required: false
- description:
- - Specifies compression algorithm used for this VMs root dataset. This option
- only has effect on the zoneroot dataset.
- type: str
- zfs_root_recsize:
- required: false
- description:
- - Suggested block size (power of 2) for files in the zoneroot dataset's filesystem.
- type: int
- zfs_snapshot_limit:
- required: false
- description:
- - Number of snapshots the VM can have.
- type: int
- zpool:
- required: false
- description:
- - ZFS pool the VM's zone dataset will be created in.
- type: str
-requirements:
- - python >= 2.6
-'''
-
-EXAMPLES = '''
-- name: Create SmartOS zone
- community.general.vmadm:
- brand: joyent
- state: present
- alias: fw_zone
- image_uuid: 95f265b8-96b2-11e6-9597-972f3af4b6d5
- firewall_enabled: yes
- indestructible_zoneroot: yes
- nics:
- - nic_tag: admin
- ip: dhcp
- primary: true
- internal_metadata:
- root_pw: 'secret'
- quota: 1
-
-- name: Delete a zone
- community.general.vmadm:
- alias: test_zone
- state: deleted
-
-- name: Stop all zones
- community.general.vmadm:
- uuid: '*'
- state: stopped
-'''
-
-RETURN = '''
-uuid:
- description: UUID of the managed VM.
- returned: always
- type: str
- sample: 'b217ab0b-cf57-efd8-cd85-958d0b80be33'
-alias:
- description: Alias of the managed VM.
- returned: When addressing a VM by alias.
- type: str
- sample: 'dns-zone'
-state:
- description: State of the target, after execution.
- returned: success
- type: str
- sample: 'running'
-'''
-
-import json
-import os
-import re
-import tempfile
-import traceback
-
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.common.text.converters import to_native
-
-# While vmadm(1M) supports a -E option to return any errors in JSON, the
-# generated JSON does not play well with the JSON parsers of Python.
-# The returned message contains '\n' as part of the stacktrace,
-# which breaks the parsers.
-
-
-def get_vm_prop(module, uuid, prop):
- # Lookup a property for the given VM.
- # Returns the property, or None if not found.
- cmd = '{0} lookup -j -o {1} uuid={2}'.format(module.vmadm, prop, uuid)
-
- (rc, stdout, stderr) = module.run_command(cmd)
-
- if rc != 0:
- module.fail_json(
- msg='Could not perform lookup of {0} on {1}'.format(prop, uuid), exception=stderr)
-
- try:
- stdout_json = json.loads(stdout)
- except Exception as e:
- module.fail_json(
- msg='Invalid JSON returned by vmadm for uuid lookup of {0}'.format(prop),
- details=to_native(e), exception=traceback.format_exc())
-
- if len(stdout_json) > 0 and prop in stdout_json[0]:
- return stdout_json[0][prop]
- else:
- return None
-
-
-def get_vm_uuid(module, alias):
- # Lookup the uuid that goes with the given alias.
- # Returns the uuid or '' if not found.
- cmd = '{0} lookup -j -o uuid alias={1}'.format(module.vmadm, alias)
-
- (rc, stdout, stderr) = module.run_command(cmd)
-
- if rc != 0:
- module.fail_json(
- msg='Could not retrieve UUID of {0}'.format(alias), exception=stderr)
-
- # If no VM was found matching the given alias, we get back an empty array.
- # That is not an error condition as we might be explicitly checking it's
- # absence.
- if stdout.strip() == '[]':
- return None
- else:
- try:
- stdout_json = json.loads(stdout)
- except Exception as e:
- module.fail_json(
- msg='Invalid JSON returned by vmadm for uuid lookup of {0}'.format(alias),
- details=to_native(e), exception=traceback.format_exc())
-
- if len(stdout_json) > 0 and 'uuid' in stdout_json[0]:
- return stdout_json[0]['uuid']
-
-
-def get_all_vm_uuids(module):
- # Retrieve the UUIDs for all VMs.
- cmd = '{0} lookup -j -o uuid'.format(module.vmadm)
-
- (rc, stdout, stderr) = module.run_command(cmd)
-
- if rc != 0:
- module.fail_json(msg='Failed to get VMs list', exception=stderr)
-
- try:
- stdout_json = json.loads(stdout)
- return [v['uuid'] for v in stdout_json]
- except Exception as e:
- module.fail_json(msg='Could not retrieve VM UUIDs', details=to_native(e),
- exception=traceback.format_exc())
-
-
-def new_vm(module, uuid, vm_state):
- payload_file = create_payload(module, uuid)
-
- (rc, stdout, stderr) = vmadm_create_vm(module, payload_file)
-
- if rc != 0:
- changed = False
- module.fail_json(msg='Could not create VM', exception=stderr)
- else:
- changed = True
- # 'vmadm create' returns all output to stderr...
- match = re.match('Successfully created VM (.*)', stderr)
- if match:
- vm_uuid = match.groups()[0]
- if not is_valid_uuid(vm_uuid):
- module.fail_json(msg='Invalid UUID for VM {0}?'.format(vm_uuid))
- else:
- module.fail_json(msg='Could not retrieve UUID of newly created(?) VM')
-
- # Now that the VM is created, ensure it is in the desired state (if not 'running')
- if vm_state != 'running':
- ret = set_vm_state(module, vm_uuid, vm_state)
- if not ret:
- module.fail_json(msg='Could not set VM {0} to state {1}'.format(vm_uuid, vm_state))
-
- try:
- os.unlink(payload_file)
- except Exception as e:
- # Since the payload may contain sensitive information, fail hard
- # if we cannot remove the file so the operator knows about it.
- module.fail_json(msg='Could not remove temporary JSON payload file {0}: {1}'.format(payload_file, to_native(e)),
- exception=traceback.format_exc())
-
- return changed, vm_uuid
-
-
-def vmadm_create_vm(module, payload_file):
- # Create a new VM using the provided payload.
- cmd = '{0} create -f {1}'.format(module.vmadm, payload_file)
-
- return module.run_command(cmd)
-
-
-def set_vm_state(module, vm_uuid, vm_state):
- p = module.params
-
- # Check if the VM is already in the desired state.
- state = get_vm_prop(module, vm_uuid, 'state')
- if state and (state == vm_state):
- return None
-
- # Lookup table for the state to be in, and which command to use for that.
- # vm_state: [vmadm commandm, forceable?]
- cmds = {
- 'stopped': ['stop', True],
- 'running': ['start', False],
- 'deleted': ['delete', True],
- 'rebooted': ['reboot', False]
- }
-
- if p['force'] and cmds[vm_state][1]:
- force = '-F'
- else:
- force = ''
-
- cmd = 'vmadm {0} {1} {2}'.format(cmds[vm_state][0], force, vm_uuid)
-
- (rc, stdout, stderr) = module.run_command(cmd)
-
- match = re.match('^Successfully.*', stderr)
- if match:
- return True
- else:
- return False
-
-
-def create_payload(module, uuid):
- # Create the JSON payload (vmdef) and return the filename.
-
- # Filter out the few options that are not valid VM properties.
- module_options = ['debug', 'force', 'state']
- # @TODO make this a simple {} comprehension as soon as py2 is ditched
- # @TODO {k: v for k, v in p.items() if k not in module_options}
- vmdef = dict([(k, v) for k, v in module.params.items() if k not in module_options and v])
-
- try:
- vmdef_json = json.dumps(vmdef)
- except Exception as e:
- module.fail_json(
- msg='Could not create valid JSON payload', exception=traceback.format_exc())
-
- # Create the temporary file that contains our payload, and set tight
- # permissions for it may container sensitive information.
- try:
- # XXX: When there's a way to get the current ansible temporary directory
- # drop the mkstemp call and rely on ANSIBLE_KEEP_REMOTE_FILES to retain
- # the payload (thus removing the `save_payload` option).
- fname = tempfile.mkstemp()[1]
- os.chmod(fname, 0o400)
- with open(fname, 'w') as fh:
- fh.write(vmdef_json)
- except Exception as e:
- module.fail_json(msg='Could not save JSON payload: %s' % to_native(e), exception=traceback.format_exc())
-
- return fname
-
-
-def vm_state_transition(module, uuid, vm_state):
- ret = set_vm_state(module, uuid, vm_state)
-
- # Whether the VM changed state.
- if ret is None:
- return False
- elif ret:
- return True
- else:
- module.fail_json(msg='Failed to set VM {0} to state {1}'.format(uuid, vm_state))
-
-
-def is_valid_uuid(uuid):
- if re.match('^[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}$', uuid, re.IGNORECASE):
- return True
- else:
- return False
-
-
-def validate_uuids(module):
- # Perform basic UUID validation.
- failed = []
-
- for u in [['uuid', module.params['uuid']],
- ['image_uuid', module.params['image_uuid']]]:
- if u[1] and u[1] != '*':
- if not is_valid_uuid(u[1]):
- failed.append(u[0])
-
- if len(failed) > 0:
- module.fail_json(msg='No valid UUID(s) found for: {0}'.format(", ".join(failed)))
-
-
-def manage_all_vms(module, vm_state):
- # Handle operations for all VMs, which can by definition only
- # be state transitions.
- state = module.params['state']
-
- if state == 'created':
- module.fail_json(msg='State "created" is only valid for tasks with a single VM')
-
- # If any of the VMs has a change, the task as a whole has a change.
- any_changed = False
-
- # First get all VM uuids and for each check their state, and adjust it if needed.
- for uuid in get_all_vm_uuids(module):
- current_vm_state = get_vm_prop(module, uuid, 'state')
- if not current_vm_state and vm_state == 'deleted':
- any_changed = False
- else:
- if module.check_mode:
- if (not current_vm_state) or (get_vm_prop(module, uuid, 'state') != state):
- any_changed = True
- else:
- any_changed = (vm_state_transition(module, uuid, vm_state) | any_changed)
-
- return any_changed
-
-
-def main():
- # In order to reduce the clutter and boilerplate for trivial options,
- # abstract the vmadm properties and build the dict of arguments later.
- # Dict of all options that are simple to define based on their type.
- # They're not required and have a default of None.
- properties = {
- 'str': [
- 'boot', 'disk_driver', 'dns_domain', 'fs_allowed', 'hostname',
- 'image_uuid', 'internal_metadata_namespace', 'kernel_version',
- 'limit_priv', 'nic_driver', 'qemu_opts', 'qemu_extra_opts',
- 'spice_opts', 'uuid', 'vga', 'zfs_data_compression',
- 'zfs_root_compression', 'zpool'
- ],
- 'bool': [
- 'archive_on_delete', 'autoboot', 'debug', 'delegate_dataset',
- 'docker', 'firewall_enabled', 'force', 'indestructible_delegated',
- 'indestructible_zoneroot', 'maintain_resolvers', 'nowait'
- ],
- 'int': [
- 'cpu_cap', 'cpu_shares', 'max_locked_memory', 'max_lwps',
- 'max_physical_memory', 'max_swap', 'mdata_exec_timeout',
- 'quota', 'ram', 'tmpfs', 'vcpus', 'virtio_txburst',
- 'virtio_txtimer', 'vnc_port', 'zfs_data_recsize',
- 'zfs_filesystem_limit', 'zfs_io_priority', 'zfs_root_recsize',
- 'zfs_snapshot_limit'
- ],
- 'dict': ['customer_metadata', 'internal_metadata', 'routes'],
- }
-
- # Start with the options that are not as trivial as those above.
- options = dict(
- state=dict(
- default='running',
- type='str',
- choices=['present', 'running', 'absent', 'deleted', 'stopped', 'created', 'restarted', 'rebooted']
- ),
- name=dict(
- default=None, type='str',
- aliases=['alias']
- ),
- brand=dict(
- default='joyent',
- type='str',
- choices=['joyent', 'joyent-minimal', 'lx', 'kvm', 'bhyve']
- ),
- cpu_type=dict(
- default='qemu64',
- type='str',
- choices=['host', 'qemu64']
- ),
- # Regular strings, however these require additional options.
- spice_password=dict(type='str', no_log=True),
- vnc_password=dict(type='str', no_log=True),
- disks=dict(type='list', elements='dict'),
- nics=dict(type='list', elements='dict'),
- resolvers=dict(type='list', elements='str'),
- filesystems=dict(type='list', elements='dict'),
- )
-
- # Add our 'simple' options to options dict.
- for type in properties:
- for p in properties[type]:
- option = dict(default=None, type=type)
- options[p] = option
-
- module = AnsibleModule(
- argument_spec=options,
- supports_check_mode=True,
- required_one_of=[['name', 'uuid']]
- )
-
- module.vmadm = module.get_bin_path('vmadm', required=True)
-
- p = module.params
- uuid = p['uuid']
- state = p['state']
-
- # Translate the state parameter into something we can use later on.
- if state in ['present', 'running']:
- vm_state = 'running'
- elif state in ['stopped', 'created']:
- vm_state = 'stopped'
- elif state in ['absent', 'deleted']:
- vm_state = 'deleted'
- elif state in ['restarted', 'rebooted']:
- vm_state = 'rebooted'
-
- result = {'state': state}
-
- # While it's possible to refer to a given VM by it's `alias`, it's easier
- # to operate on VMs by their UUID. So if we're not given a `uuid`, look
- # it up.
- if not uuid:
- uuid = get_vm_uuid(module, p['name'])
- # Bit of a chicken and egg problem here for VMs with state == deleted.
- # If they're going to be removed in this play, we have to lookup the
- # uuid. If they're already deleted there's nothing to lookup.
- # So if state == deleted and get_vm_uuid() returned '', the VM is already
- # deleted and there's nothing else to do.
- if uuid is None and vm_state == 'deleted':
- result['name'] = p['name']
- module.exit_json(**result)
-
- validate_uuids(module)
-
- if p['name']:
- result['name'] = p['name']
- result['uuid'] = uuid
-
- if uuid == '*':
- result['changed'] = manage_all_vms(module, vm_state)
- module.exit_json(**result)
-
- # The general flow is as follows:
- # - first the current state of the VM is obtained by it's UUID.
- # - If the state was not found and the desired state is 'deleted', return.
- # - If the state was not found, it means the VM has to be created.
- # Subsequently the VM will be set to the desired state (i.e. stopped)
- # - Otherwise, it means the VM exists already and we operate on it's
- # state (i.e. reboot it.)
- #
- # In the future it should be possible to query the VM for a particular
- # property as a valid state (i.e. queried) so the result can be
- # registered.
- # Also, VMs should be able to get their properties updated.
- # Managing VM snapshots should be part of a standalone module.
-
- # First obtain the VM state to determine what needs to be done with it.
- current_vm_state = get_vm_prop(module, uuid, 'state')
-
- # First handle the case where the VM should be deleted and is not present.
- if not current_vm_state and vm_state == 'deleted':
- result['changed'] = False
- elif module.check_mode:
- # Shortcut for check mode, if there is no VM yet, it will need to be created.
- # Or, if the VM is not in the desired state yet, it needs to transition.
- if (not current_vm_state) or (get_vm_prop(module, uuid, 'state') != state):
- result['changed'] = True
- else:
- result['changed'] = False
-
- module.exit_json(**result)
- # No VM was found that matched the given ID (alias or uuid), so we create it.
- elif not current_vm_state:
- result['changed'], result['uuid'] = new_vm(module, uuid, vm_state)
- else:
- # VM was found, operate on its state directly.
- result['changed'] = vm_state_transition(module, uuid, vm_state)
-
- module.exit_json(**result)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/softlayer/sl_vm.py b/ansible_collections/community/general/plugins/modules/cloud/softlayer/sl_vm.py
deleted file mode 100644
index 825d82e1..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/softlayer/sl_vm.py
+++ /dev/null
@@ -1,430 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2017, Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-DOCUMENTATION = '''
----
-module: sl_vm
-short_description: create or cancel a virtual instance in SoftLayer
-description:
- - Creates or cancels SoftLayer instances.
- - When created, optionally waits for it to be 'running'.
-options:
- instance_id:
- description:
- - Instance Id of the virtual instance to perform action option.
- type: str
- hostname:
- description:
- - Hostname to be provided to a virtual instance.
- type: str
- domain:
- description:
- - Domain name to be provided to a virtual instance.
- type: str
- datacenter:
- description:
- - Datacenter for the virtual instance to be deployed.
- type: str
- choices:
- - ams01
- - ams03
- - che01
- - dal01
- - dal05
- - dal06
- - dal09
- - dal10
- - dal12
- - dal13
- - fra02
- - fra04
- - fra05
- - hkg02
- - hou02
- - lon02
- - lon04
- - lon06
- - mel01
- - mex01
- - mil01
- - mon01
- - osl01
- - par01
- - sao01
- - sea01
- - seo01
- - sjc01
- - sjc03
- - sjc04
- - sng01
- - syd01
- - syd04
- - tok02
- - tor01
- - wdc01
- - wdc04
- - wdc06
- - wdc07
- tags:
- description:
- - Tag or list of tags to be provided to a virtual instance.
- type: str
- hourly:
- description:
- - Flag to determine if the instance should be hourly billed.
- type: bool
- default: 'yes'
- private:
- description:
- - Flag to determine if the instance should be private only.
- type: bool
- default: 'no'
- dedicated:
- description:
- - Flag to determine if the instance should be deployed in dedicated space.
- type: bool
- default: 'no'
- local_disk:
- description:
- - Flag to determine if local disk should be used for the new instance.
- type: bool
- default: 'yes'
- cpus:
- description:
- - Count of cpus to be assigned to new virtual instance.
- type: int
- choices: [1, 2, 4, 8, 16, 32, 56]
- memory:
- description:
- - Amount of memory to be assigned to new virtual instance.
- type: int
- choices: [1024, 2048, 4096, 6144, 8192, 12288, 16384, 32768, 49152, 65536, 131072, 247808]
- flavor:
- description:
- - Specify which SoftLayer flavor template to use instead of cpus and memory.
- version_added: '0.2.0'
- type: str
- disks:
- description:
- - List of disk sizes to be assigned to new virtual instance.
- default: [ 25 ]
- type: list
- elements: int
- os_code:
- description:
- - OS Code to be used for new virtual instance.
- type: str
- image_id:
- description:
- - Image Template to be used for new virtual instance.
- type: str
- nic_speed:
- description:
- - NIC Speed to be assigned to new virtual instance.
- choices: [10, 100, 1000]
- type: int
- public_vlan:
- description:
- - VLAN by its Id to be assigned to the public NIC.
- type: str
- private_vlan:
- description:
- - VLAN by its Id to be assigned to the private NIC.
- type: str
- ssh_keys:
- description:
- - List of ssh keys by their Id to be assigned to a virtual instance.
- type: list
- elements: str
- post_uri:
- description:
- - URL of a post provisioning script to be loaded and executed on virtual instance.
- type: str
- state:
- description:
- - Create, or cancel a virtual instance.
- - Specify C(present) for create, C(absent) to cancel.
- choices: [ absent, present ]
- default: present
- type: str
- wait:
- description:
- - Flag used to wait for active status before returning.
- type: bool
- default: 'yes'
- wait_time:
- description:
- - Time in seconds before wait returns.
- default: 600
- type: int
-requirements:
- - python >= 2.6
- - softlayer >= 4.1.1
-author:
-- Matt Colton (@mcltn)
-'''
-
-EXAMPLES = '''
-- name: Build instance
- hosts: localhost
- gather_facts: no
- tasks:
- - name: Build instance request
- community.general.sl_vm:
- hostname: instance-1
- domain: anydomain.com
- datacenter: dal09
- tags: ansible-module-test
- hourly: yes
- private: no
- dedicated: no
- local_disk: yes
- cpus: 1
- memory: 1024
- disks: [25]
- os_code: UBUNTU_LATEST
- wait: no
-
-- name: Build additional instances
- hosts: localhost
- gather_facts: no
- tasks:
- - name: Build instances request
- community.general.sl_vm:
- hostname: "{{ item.hostname }}"
- domain: "{{ item.domain }}"
- datacenter: "{{ item.datacenter }}"
- tags: "{{ item.tags }}"
- hourly: "{{ item.hourly }}"
- private: "{{ item.private }}"
- dedicated: "{{ item.dedicated }}"
- local_disk: "{{ item.local_disk }}"
- cpus: "{{ item.cpus }}"
- memory: "{{ item.memory }}"
- disks: "{{ item.disks }}"
- os_code: "{{ item.os_code }}"
- ssh_keys: "{{ item.ssh_keys }}"
- wait: "{{ item.wait }}"
- with_items:
- - hostname: instance-2
- domain: anydomain.com
- datacenter: dal09
- tags:
- - ansible-module-test
- - ansible-module-test-replicas
- hourly: yes
- private: no
- dedicated: no
- local_disk: yes
- cpus: 1
- memory: 1024
- disks:
- - 25
- - 100
- os_code: UBUNTU_LATEST
- ssh_keys: []
- wait: True
- - hostname: instance-3
- domain: anydomain.com
- datacenter: dal09
- tags:
- - ansible-module-test
- - ansible-module-test-replicas
- hourly: yes
- private: no
- dedicated: no
- local_disk: yes
- cpus: 1
- memory: 1024
- disks:
- - 25
- - 100
- os_code: UBUNTU_LATEST
- ssh_keys: []
- wait: yes
-
-- name: Cancel instances
- hosts: localhost
- gather_facts: no
- tasks:
- - name: Cancel by tag
- community.general.sl_vm:
- state: absent
- tags: ansible-module-test
-'''
-
-# TODO: Disabled RETURN as it is breaking the build for docs. Needs to be fixed.
-RETURN = '''# '''
-
-import json
-import time
-
-try:
- import SoftLayer
- from SoftLayer import VSManager
-
- HAS_SL = True
- vsManager = VSManager(SoftLayer.create_client_from_env())
-except ImportError:
- HAS_SL = False
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.six import string_types
-
-
-# TODO: get this info from API
-STATES = ['present', 'absent']
-DATACENTERS = ['ams01', 'ams03', 'che01', 'dal01', 'dal05', 'dal06', 'dal09', 'dal10', 'dal12', 'dal13', 'fra02',
- 'fra04', 'fra05', 'hkg02', 'hou02', 'lon02', 'lon04', 'lon06', 'mel01', 'mex01', 'mil01', 'mon01',
- 'osl01', 'par01', 'sao01', 'sea01', 'seo01', 'sjc01', 'sjc03', 'sjc04', 'sng01', 'syd01', 'syd04',
- 'tok02', 'tor01', 'wdc01', 'wdc04', 'wdc06', 'wdc07']
-CPU_SIZES = [1, 2, 4, 8, 16, 32, 56]
-MEMORY_SIZES = [1024, 2048, 4096, 6144, 8192, 12288, 16384, 32768, 49152, 65536, 131072, 247808]
-INITIALDISK_SIZES = [25, 100]
-LOCALDISK_SIZES = [25, 100, 150, 200, 300]
-SANDISK_SIZES = [10, 20, 25, 30, 40, 50, 75, 100, 125, 150, 175, 200, 250, 300, 350, 400, 500, 750, 1000, 1500, 2000]
-NIC_SPEEDS = [10, 100, 1000]
-
-
-def create_virtual_instance(module):
-
- instances = vsManager.list_instances(
- hostname=module.params.get('hostname'),
- domain=module.params.get('domain'),
- datacenter=module.params.get('datacenter')
- )
-
- if instances:
- return False, None
-
- # Check if OS or Image Template is provided (Can't be both, defaults to OS)
- if (module.params.get('os_code') is not None and module.params.get('os_code') != ''):
- module.params['image_id'] = ''
- elif (module.params.get('image_id') is not None and module.params.get('image_id') != ''):
- module.params['os_code'] = ''
- module.params['disks'] = [] # Blank out disks since it will use the template
- else:
- return False, None
-
- tags = module.params.get('tags')
- if isinstance(tags, list):
- tags = ','.join(map(str, module.params.get('tags')))
-
- instance = vsManager.create_instance(
- hostname=module.params.get('hostname'),
- domain=module.params.get('domain'),
- cpus=module.params.get('cpus'),
- memory=module.params.get('memory'),
- flavor=module.params.get('flavor'),
- hourly=module.params.get('hourly'),
- datacenter=module.params.get('datacenter'),
- os_code=module.params.get('os_code'),
- image_id=module.params.get('image_id'),
- local_disk=module.params.get('local_disk'),
- disks=module.params.get('disks'),
- ssh_keys=module.params.get('ssh_keys'),
- nic_speed=module.params.get('nic_speed'),
- private=module.params.get('private'),
- public_vlan=module.params.get('public_vlan'),
- private_vlan=module.params.get('private_vlan'),
- dedicated=module.params.get('dedicated'),
- post_uri=module.params.get('post_uri'),
- tags=tags,
- )
-
- if instance is not None and instance['id'] > 0:
- return True, instance
- else:
- return False, None
-
-
-def wait_for_instance(module, id):
- instance = None
- completed = False
- wait_timeout = time.time() + module.params.get('wait_time')
- while not completed and wait_timeout > time.time():
- try:
- completed = vsManager.wait_for_ready(id, 10, 2)
- if completed:
- instance = vsManager.get_instance(id)
- except Exception:
- completed = False
-
- return completed, instance
-
-
-def cancel_instance(module):
- canceled = True
- if module.params.get('instance_id') is None and (module.params.get('tags') or module.params.get('hostname') or module.params.get('domain')):
- tags = module.params.get('tags')
- if isinstance(tags, string_types):
- tags = [module.params.get('tags')]
- instances = vsManager.list_instances(tags=tags, hostname=module.params.get('hostname'), domain=module.params.get('domain'))
- for instance in instances:
- try:
- vsManager.cancel_instance(instance['id'])
- except Exception:
- canceled = False
- elif module.params.get('instance_id') and module.params.get('instance_id') != 0:
- try:
- vsManager.cancel_instance(instance['id'])
- except Exception:
- canceled = False
- else:
- return False, None
-
- return canceled, None
-
-
-def main():
-
- module = AnsibleModule(
- argument_spec=dict(
- instance_id=dict(type='str'),
- hostname=dict(type='str'),
- domain=dict(type='str'),
- datacenter=dict(type='str', choices=DATACENTERS),
- tags=dict(type='str'),
- hourly=dict(type='bool', default=True),
- private=dict(type='bool', default=False),
- dedicated=dict(type='bool', default=False),
- local_disk=dict(type='bool', default=True),
- cpus=dict(type='int', choices=CPU_SIZES),
- memory=dict(type='int', choices=MEMORY_SIZES),
- flavor=dict(type='str'),
- disks=dict(type='list', elements='int', default=[25]),
- os_code=dict(type='str'),
- image_id=dict(type='str'),
- nic_speed=dict(type='int', choices=NIC_SPEEDS),
- public_vlan=dict(type='str'),
- private_vlan=dict(type='str'),
- ssh_keys=dict(type='list', elements='str', default=[], no_log=False),
- post_uri=dict(type='str'),
- state=dict(type='str', default='present', choices=STATES),
- wait=dict(type='bool', default=True),
- wait_time=dict(type='int', default=600),
- )
- )
-
- if not HAS_SL:
- module.fail_json(msg='softlayer python library required for this module')
-
- if module.params.get('state') == 'absent':
- (changed, instance) = cancel_instance(module)
-
- elif module.params.get('state') == 'present':
- (changed, instance) = create_virtual_instance(module)
- if module.params.get('wait') is True and instance:
- (changed, instance) = wait_for_instance(module, instance['id'])
-
- module.exit_json(changed=changed, instance=json.loads(json.dumps(instance, default=lambda o: o.__dict__)))
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py b/ansible_collections/community/general/plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py
deleted file mode 100644
index da8f0102..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/spotinst/spotinst_aws_elastigroup.py
+++ /dev/null
@@ -1,1557 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# Copyright (c) 2017 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import (absolute_import, division, print_function)
-
-DOCUMENTATION = '''
----
-module: spotinst_aws_elastigroup
-short_description: Create, update or delete Spotinst AWS Elastigroups
-author: Spotinst (@talzur)
-description:
- - Can create, update, or delete Spotinst AWS Elastigroups
- Launch configuration is part of the elastigroup configuration,
- so no additional modules are necessary for handling the launch configuration.
- You will have to have a credentials file in this location - /.spotinst/credentials
- The credentials file must contain a row that looks like this
- token =
- Full documentation available at https://help.spotinst.com/hc/en-us/articles/115003530285-Ansible-
-requirements:
- - python >= 2.7
- - spotinst_sdk >= 1.0.38
-options:
-
- credentials_path:
- description:
- - Optional parameter that allows to set a non-default credentials path.
- default: ~/.spotinst/credentials
- type: path
-
- account_id:
- description:
- - Optional parameter that allows to set an account-id inside the module configuration.
- By default this is retrieved from the credentials path.
- type: str
-
- availability_vs_cost:
- description:
- - The strategy orientation.
- - "The choices available are: C(availabilityOriented), C(costOriented), C(balanced)."
- required: true
- type: str
-
- availability_zones:
- description:
- - A list of hash/dictionaries of Availability Zones that are configured in the elastigroup;
- '[{"key":"value", "key":"value"}]';
- keys allowed are
- name (String),
- subnet_id (String),
- placement_group_name (String),
- required: true
- type: list
- elements: dict
-
- block_device_mappings:
- description:
- - A list of hash/dictionaries of Block Device Mappings for elastigroup instances;
- You can specify virtual devices and EBS volumes.;
- '[{"key":"value", "key":"value"}]';
- keys allowed are
- device_name (List of Strings),
- virtual_name (String),
- no_device (String),
- ebs (Object, expects the following keys-
- delete_on_termination(Boolean),
- encrypted(Boolean),
- iops (Integer),
- snapshot_id(Integer),
- volume_type(String),
- volume_size(Integer))
- type: list
- elements: dict
-
- chef:
- description:
- - The Chef integration configuration.;
- Expects the following keys - chef_server (String),
- organization (String),
- user (String),
- pem_key (String),
- chef_version (String)
- type: dict
-
- draining_timeout:
- description:
- - Time for instance to be drained from incoming requests and deregistered from ELB before termination.
- type: int
-
- ebs_optimized:
- description:
- - Enable EBS optimization for supported instances which are not enabled by default.;
- Note - additional charges will be applied.
- type: bool
-
- ebs_volume_pool:
- description:
- - A list of hash/dictionaries of EBS devices to reattach to the elastigroup when available;
- '[{"key":"value", "key":"value"}]';
- keys allowed are -
- volume_ids (List of Strings),
- device_name (String)
- type: list
- elements: dict
-
- ecs:
- description:
- - The ECS integration configuration.;
- Expects the following key -
- cluster_name (String)
- type: dict
-
- elastic_ips:
- description:
- - List of ElasticIps Allocation Ids (Example C(eipalloc-9d4e16f8)) to associate to the group instances
- type: list
- elements: str
-
- fallback_to_od:
- description:
- - In case of no spots available, Elastigroup will launch an On-demand instance instead
- type: bool
-
- health_check_grace_period:
- description:
- - The amount of time, in seconds, after the instance has launched to start and check its health.
- - If not specified, it defaults to C(300).
- type: int
-
- health_check_unhealthy_duration_before_replacement:
- description:
- - Minimal mount of time instance should be unhealthy for us to consider it unhealthy.
- type: int
-
- health_check_type:
- description:
- - The service to use for the health check.
- - "The choices available are: C(ELB), C(HCS), C(TARGET_GROUP), C(MLB), C(EC2)."
- type: str
-
- iam_role_name:
- description:
- - The instance profile iamRole name
- - Only use iam_role_arn, or iam_role_name
- type: str
-
- iam_role_arn:
- description:
- - The instance profile iamRole arn
- - Only use iam_role_arn, or iam_role_name
- type: str
-
- id:
- description:
- - The group id if it already exists and you want to update, or delete it.
- This will not work unless the uniqueness_by field is set to id.
- When this is set, and the uniqueness_by field is set, the group will either be updated or deleted, but not created.
- type: str
-
- image_id:
- description:
- - The image Id used to launch the instance.;
- In case of conflict between Instance type and image type, an error will be returned
- required: true
- type: str
-
- key_pair:
- description:
- - Specify a Key Pair to attach to the instances
- type: str
-
- kubernetes:
- description:
- - The Kubernetes integration configuration.
- Expects the following keys -
- api_server (String),
- token (String)
- type: dict
-
- lifetime_period:
- description:
- - Lifetime period
- type: int
-
- load_balancers:
- description:
- - List of classic ELB names
- type: list
- elements: str
-
- max_size:
- description:
- - The upper limit number of instances that you can scale up to
- required: true
- type: int
-
- mesosphere:
- description:
- - The Mesosphere integration configuration.
- Expects the following key -
- api_server (String)
- type: dict
-
- min_size:
- description:
- - The lower limit number of instances that you can scale down to
- required: true
- type: int
-
- monitoring:
- description:
- - Describes whether instance Enhanced Monitoring is enabled
- type: str
-
- name:
- description:
- - Unique name for elastigroup to be created, updated or deleted
- required: true
- type: str
-
- network_interfaces:
- description:
- - A list of hash/dictionaries of network interfaces to add to the elastigroup;
- '[{"key":"value", "key":"value"}]';
- keys allowed are -
- description (String),
- device_index (Integer),
- secondary_private_ip_address_count (Integer),
- associate_public_ip_address (Boolean),
- delete_on_termination (Boolean),
- groups (List of Strings),
- network_interface_id (String),
- private_ip_address (String),
- subnet_id (String),
- associate_ipv6_address (Boolean),
- private_ip_addresses (List of Objects, Keys are privateIpAddress (String, required) and primary (Boolean))
- type: list
- elements: dict
-
- on_demand_count:
- description:
- - Required if risk is not set
- - Number of on demand instances to launch. All other instances will be spot instances.;
- Either set this parameter or the risk parameter
- type: int
-
- on_demand_instance_type:
- description:
- - On-demand instance type that will be provisioned
- type: str
-
- opsworks:
- description:
- - The elastigroup OpsWorks integration configration.;
- Expects the following key -
- layer_id (String)
- type: dict
-
- persistence:
- description:
- - The Stateful elastigroup configration.;
- Accepts the following keys -
- should_persist_root_device (Boolean),
- should_persist_block_devices (Boolean),
- should_persist_private_ip (Boolean)
- type: dict
-
- product:
- description:
- - Operation system type.
- - "Available choices are: C(Linux/UNIX), C(SUSE Linux), C(Windows), C(Linux/UNIX (Amazon VPC)), C(SUSE Linux (Amazon VPC))."
- required: true
- type: str
-
- rancher:
- description:
- - The Rancher integration configuration.;
- Expects the following keys -
- version (String),
- access_key (String),
- secret_key (String),
- master_host (String)
- type: dict
-
- right_scale:
- description:
- - The Rightscale integration configuration.;
- Expects the following keys -
- account_id (String),
- refresh_token (String)
- type: dict
-
- risk:
- description:
- - Required if on demand is not set. The percentage of Spot instances to launch (0 - 100).
- type: int
-
- roll_config:
- description:
- - Roll configuration.;
- If you would like the group to roll after updating, please use this feature.
- Accepts the following keys -
- batch_size_percentage(Integer, Required),
- grace_period - (Integer, Required),
- health_check_type(String, Optional)
- type: dict
-
- scheduled_tasks:
- description:
- - A list of hash/dictionaries of scheduled tasks to configure in the elastigroup;
- '[{"key":"value", "key":"value"}]';
- keys allowed are -
- adjustment (Integer),
- scale_target_capacity (Integer),
- scale_min_capacity (Integer),
- scale_max_capacity (Integer),
- adjustment_percentage (Integer),
- batch_size_percentage (Integer),
- cron_expression (String),
- frequency (String),
- grace_period (Integer),
- task_type (String, required),
- is_enabled (Boolean)
- type: list
- elements: dict
-
- security_group_ids:
- description:
- - One or more security group IDs. ;
- In case of update it will override the existing Security Group with the new given array
- required: true
- type: list
- elements: str
-
- shutdown_script:
- description:
- - The Base64-encoded shutdown script that executes prior to instance termination.
- Encode before setting.
- type: str
-
- signals:
- description:
- - A list of hash/dictionaries of signals to configure in the elastigroup;
- keys allowed are -
- name (String, required),
- timeout (Integer)
- type: list
- elements: dict
-
- spin_up_time:
- description:
- - Spin up time, in seconds, for the instance
- type: int
-
- spot_instance_types:
- description:
- - Spot instance type that will be provisioned.
- required: true
- type: list
- elements: str
-
- state:
- choices:
- - present
- - absent
- description:
- - Create or delete the elastigroup
- default: present
- type: str
-
- tags:
- description:
- - A list of tags to configure in the elastigroup. Please specify list of keys and values (key colon value);
- type: list
- elements: dict
-
- target:
- description:
- - The number of instances to launch
- required: true
- type: int
-
- target_group_arns:
- description:
- - List of target group arns instances should be registered to
- type: list
- elements: str
-
- tenancy:
- description:
- - Dedicated vs shared tenancy.
- - "The available choices are: C(default), C(dedicated)."
- type: str
-
- terminate_at_end_of_billing_hour:
- description:
- - Terminate at the end of billing hour
- type: bool
-
- unit:
- description:
- - The capacity unit to launch instances by.
- - "The available choices are: C(instance), C(weight)."
- type: str
-
- up_scaling_policies:
- description:
- - A list of hash/dictionaries of scaling policies to configure in the elastigroup;
- '[{"key":"value", "key":"value"}]';
- keys allowed are -
- policy_name (String, required),
- namespace (String, required),
- metric_name (String, required),
- dimensions (List of Objects, Keys allowed are name (String, required) and value (String)),
- statistic (String, required)
- evaluation_periods (String, required),
- period (String, required),
- threshold (String, required),
- cooldown (String, required),
- unit (String, required),
- operator (String, required),
- action_type (String, required),
- adjustment (String),
- min_target_capacity (String),
- target (String),
- maximum (String),
- minimum (String)
- type: list
- elements: dict
-
- down_scaling_policies:
- description:
- - A list of hash/dictionaries of scaling policies to configure in the elastigroup;
- '[{"key":"value", "key":"value"}]';
- keys allowed are -
- policy_name (String, required),
- namespace (String, required),
- metric_name (String, required),
- dimensions ((List of Objects), Keys allowed are name (String, required) and value (String)),
- statistic (String, required),
- evaluation_periods (String, required),
- period (String, required),
- threshold (String, required),
- cooldown (String, required),
- unit (String, required),
- operator (String, required),
- action_type (String, required),
- adjustment (String),
- max_target_capacity (String),
- target (String),
- maximum (String),
- minimum (String)
- type: list
- elements: dict
-
- target_tracking_policies:
- description:
- - A list of hash/dictionaries of target tracking policies to configure in the elastigroup;
- '[{"key":"value", "key":"value"}]';
- keys allowed are -
- policy_name (String, required),
- namespace (String, required),
- source (String, required),
- metric_name (String, required),
- statistic (String, required),
- unit (String, required),
- cooldown (String, required),
- target (String, required)
- type: list
- elements: dict
-
- uniqueness_by:
- choices:
- - id
- - name
- description:
- - If your group names are not unique, you may use this feature to update or delete a specific group.
- Whenever this property is set, you must set a group_id in order to update or delete a group, otherwise a group will be created.
- default: name
- type: str
-
- user_data:
- description:
- - Base64-encoded MIME user data. Encode before setting the value.
- type: str
-
- utilize_reserved_instances:
- description:
- - In case of any available Reserved Instances,
- Elastigroup will utilize your reservations before purchasing Spot instances.
- type: bool
-
- wait_for_instances:
- description:
- - Whether or not the elastigroup creation / update actions should wait for the instances to spin
- type: bool
- default: false
-
- wait_timeout:
- description:
- - How long the module should wait for instances before failing the action.;
- Only works if wait_for_instances is True.
- type: int
-
-'''
-EXAMPLES = '''
-# Basic configuration YAML example
-
-- hosts: localhost
- tasks:
- - name: Create elastigroup
- community.general.spotinst_aws_elastigroup:
- state: present
- risk: 100
- availability_vs_cost: balanced
- availability_zones:
- - name: us-west-2a
- subnet_id: subnet-2b68a15c
- image_id: ami-f173cc91
- key_pair: spotinst-oregon
- max_size: 15
- min_size: 0
- target: 0
- unit: instance
- monitoring: True
- name: ansible-group
- on_demand_instance_type: c3.large
- product: Linux/UNIX
- load_balancers:
- - test-lb-1
- security_group_ids:
- - sg-8f4b8fe9
- spot_instance_types:
- - c3.large
- do_not_update:
- - image_id
- - target
- register: result
- - ansible.builtin.debug: var=result
-
-# In this example, we create an elastigroup and wait 600 seconds to retrieve the instances, and use their private ips
-
-- hosts: localhost
- tasks:
- - name: Create elastigroup
- community.general.spotinst_aws_elastigroup:
- state: present
- account_id: act-1a9dd2b
- risk: 100
- availability_vs_cost: balanced
- availability_zones:
- - name: us-west-2a
- subnet_id: subnet-2b68a15c
- tags:
- - Environment: someEnvValue
- - OtherTagKey: otherValue
- image_id: ami-f173cc91
- key_pair: spotinst-oregon
- max_size: 5
- min_size: 0
- target: 0
- unit: instance
- monitoring: True
- name: ansible-group-tal
- on_demand_instance_type: c3.large
- product: Linux/UNIX
- security_group_ids:
- - sg-8f4b8fe9
- block_device_mappings:
- - device_name: '/dev/sda1'
- ebs:
- volume_size: 100
- volume_type: gp2
- spot_instance_types:
- - c3.large
- do_not_update:
- - image_id
- wait_for_instances: True
- wait_timeout: 600
- register: result
-
- - name: Store private ips to file
- ansible.builtin.shell: echo {{ item.private_ip }}\\n >> list-of-private-ips
- with_items: "{{ result.instances }}"
- - ansible.builtin.debug: var=result
-
-# In this example, we create an elastigroup with multiple block device mappings, tags, and also an account id
-# In organizations with more than one account, it is required to specify an account_id
-
-- hosts: localhost
- tasks:
- - name: Create elastigroup
- community.general.spotinst_aws_elastigroup:
- state: present
- account_id: act-1a9dd2b
- risk: 100
- availability_vs_cost: balanced
- availability_zones:
- - name: us-west-2a
- subnet_id: subnet-2b68a15c
- tags:
- - Environment: someEnvValue
- - OtherTagKey: otherValue
- image_id: ami-f173cc91
- key_pair: spotinst-oregon
- max_size: 5
- min_size: 0
- target: 0
- unit: instance
- monitoring: True
- name: ansible-group-tal
- on_demand_instance_type: c3.large
- product: Linux/UNIX
- security_group_ids:
- - sg-8f4b8fe9
- block_device_mappings:
- - device_name: '/dev/xvda'
- ebs:
- volume_size: 60
- volume_type: gp2
- - device_name: '/dev/xvdb'
- ebs:
- volume_size: 120
- volume_type: gp2
- spot_instance_types:
- - c3.large
- do_not_update:
- - image_id
- wait_for_instances: True
- wait_timeout: 600
- register: result
-
- - name: Store private ips to file
- ansible.builtin.shell: echo {{ item.private_ip }}\\n >> list-of-private-ips
- with_items: "{{ result.instances }}"
- - ansible.builtin.debug: var=result
-
-# In this example we have set up block device mapping with ephemeral devices
-
-- hosts: localhost
- tasks:
- - name: Create elastigroup
- community.general.spotinst_aws_elastigroup:
- state: present
- risk: 100
- availability_vs_cost: balanced
- availability_zones:
- - name: us-west-2a
- subnet_id: subnet-2b68a15c
- image_id: ami-f173cc91
- key_pair: spotinst-oregon
- max_size: 15
- min_size: 0
- target: 0
- unit: instance
- block_device_mappings:
- - device_name: '/dev/xvda'
- virtual_name: ephemeral0
- - device_name: '/dev/xvdb/'
- virtual_name: ephemeral1
- monitoring: True
- name: ansible-group
- on_demand_instance_type: c3.large
- product: Linux/UNIX
- load_balancers:
- - test-lb-1
- security_group_ids:
- - sg-8f4b8fe9
- spot_instance_types:
- - c3.large
- do_not_update:
- - image_id
- - target
- register: result
- - ansible.builtin.debug: var=result
-
-# In this example we create a basic group configuration with a network interface defined.
-# Each network interface must have a device index
-
-- hosts: localhost
- tasks:
- - name: Create elastigroup
- community.general.spotinst_aws_elastigroup:
- state: present
- risk: 100
- availability_vs_cost: balanced
- network_interfaces:
- - associate_public_ip_address: true
- device_index: 0
- availability_zones:
- - name: us-west-2a
- subnet_id: subnet-2b68a15c
- image_id: ami-f173cc91
- key_pair: spotinst-oregon
- max_size: 15
- min_size: 0
- target: 0
- unit: instance
- monitoring: True
- name: ansible-group
- on_demand_instance_type: c3.large
- product: Linux/UNIX
- load_balancers:
- - test-lb-1
- security_group_ids:
- - sg-8f4b8fe9
- spot_instance_types:
- - c3.large
- do_not_update:
- - image_id
- - target
- register: result
- - ansible.builtin.debug: var=result
-
-
-# In this example we create a basic group configuration with a target tracking scaling policy defined
-
-- hosts: localhost
- tasks:
- - name: Create elastigroup
- community.general.spotinst_aws_elastigroup:
- account_id: act-92d45673
- state: present
- risk: 100
- availability_vs_cost: balanced
- availability_zones:
- - name: us-west-2a
- subnet_id: subnet-79da021e
- image_id: ami-f173cc91
- fallback_to_od: true
- tags:
- - Creator: ValueOfCreatorTag
- - Environment: ValueOfEnvironmentTag
- key_pair: spotinst-labs-oregon
- max_size: 10
- min_size: 0
- target: 2
- unit: instance
- monitoring: True
- name: ansible-group-1
- on_demand_instance_type: c3.large
- product: Linux/UNIX
- security_group_ids:
- - sg-46cdc13d
- spot_instance_types:
- - c3.large
- target_tracking_policies:
- - policy_name: target-tracking-1
- namespace: AWS/EC2
- metric_name: CPUUtilization
- statistic: average
- unit: percent
- target: 50
- cooldown: 120
- do_not_update:
- - image_id
- register: result
- - ansible.builtin.debug: var=result
-'''
-
-RETURN = '''
----
-instances:
- description: List of active elastigroup instances and their details.
- returned: success
- type: dict
- sample: [
- {
- "spotInstanceRequestId": "sir-regs25zp",
- "instanceId": "i-09640ad8678234c",
- "instanceType": "m4.large",
- "product": "Linux/UNIX",
- "availabilityZone": "us-west-2b",
- "privateIp": "180.0.2.244",
- "createdAt": "2017-07-17T12:46:18.000Z",
- "status": "fulfilled"
- }
- ]
-group_id:
- description: Created / Updated group's ID.
- returned: success
- type: str
- sample: "sig-12345"
-
-'''
-
-HAS_SPOTINST_SDK = False
-__metaclass__ = type
-
-import os
-import time
-from ansible.module_utils.basic import AnsibleModule
-
-try:
- import spotinst_sdk as spotinst
- from spotinst_sdk import SpotinstClientException
-
- HAS_SPOTINST_SDK = True
-
-except ImportError:
- pass
-
-eni_fields = ('description',
- 'device_index',
- 'secondary_private_ip_address_count',
- 'associate_public_ip_address',
- 'delete_on_termination',
- 'groups',
- 'network_interface_id',
- 'private_ip_address',
- 'subnet_id',
- 'associate_ipv6_address')
-
-private_ip_fields = ('private_ip_address',
- 'primary')
-
-capacity_fields = (dict(ansible_field_name='min_size',
- spotinst_field_name='minimum'),
- dict(ansible_field_name='max_size',
- spotinst_field_name='maximum'),
- 'target',
- 'unit')
-
-lspec_fields = ('user_data',
- 'key_pair',
- 'tenancy',
- 'shutdown_script',
- 'monitoring',
- 'ebs_optimized',
- 'image_id',
- 'health_check_type',
- 'health_check_grace_period',
- 'health_check_unhealthy_duration_before_replacement',
- 'security_group_ids')
-
-iam_fields = (dict(ansible_field_name='iam_role_name',
- spotinst_field_name='name'),
- dict(ansible_field_name='iam_role_arn',
- spotinst_field_name='arn'))
-
-scheduled_task_fields = ('adjustment',
- 'adjustment_percentage',
- 'batch_size_percentage',
- 'cron_expression',
- 'frequency',
- 'grace_period',
- 'task_type',
- 'is_enabled',
- 'scale_target_capacity',
- 'scale_min_capacity',
- 'scale_max_capacity')
-
-scaling_policy_fields = ('policy_name',
- 'namespace',
- 'metric_name',
- 'dimensions',
- 'statistic',
- 'evaluation_periods',
- 'period',
- 'threshold',
- 'cooldown',
- 'unit',
- 'operator')
-
-tracking_policy_fields = ('policy_name',
- 'namespace',
- 'source',
- 'metric_name',
- 'statistic',
- 'unit',
- 'cooldown',
- 'target',
- 'threshold')
-
-action_fields = (dict(ansible_field_name='action_type',
- spotinst_field_name='type'),
- 'adjustment',
- 'min_target_capacity',
- 'max_target_capacity',
- 'target',
- 'minimum',
- 'maximum')
-
-signal_fields = ('name',
- 'timeout')
-
-multai_lb_fields = ('balancer_id',
- 'project_id',
- 'target_set_id',
- 'az_awareness',
- 'auto_weight')
-
-persistence_fields = ('should_persist_root_device',
- 'should_persist_block_devices',
- 'should_persist_private_ip')
-
-strategy_fields = ('risk',
- 'utilize_reserved_instances',
- 'fallback_to_od',
- 'on_demand_count',
- 'availability_vs_cost',
- 'draining_timeout',
- 'spin_up_time',
- 'lifetime_period')
-
-ebs_fields = ('delete_on_termination',
- 'encrypted',
- 'iops',
- 'snapshot_id',
- 'volume_type',
- 'volume_size')
-
-bdm_fields = ('device_name',
- 'virtual_name',
- 'no_device')
-
-kubernetes_fields = ('api_server',
- 'token')
-
-right_scale_fields = ('account_id',
- 'refresh_token')
-
-rancher_fields = ('access_key',
- 'secret_key',
- 'master_host',
- 'version')
-
-chef_fields = ('chef_server',
- 'organization',
- 'user',
- 'pem_key',
- 'chef_version')
-
-az_fields = ('name',
- 'subnet_id',
- 'placement_group_name')
-
-opsworks_fields = ('layer_id',)
-
-scaling_strategy_fields = ('terminate_at_end_of_billing_hour',)
-
-mesosphere_fields = ('api_server',)
-
-ecs_fields = ('cluster_name',)
-
-multai_fields = ('multai_token',)
-
-
-def handle_elastigroup(client, module):
- has_changed = False
- group_id = None
- message = 'None'
-
- name = module.params.get('name')
- state = module.params.get('state')
- uniqueness_by = module.params.get('uniqueness_by')
- external_group_id = module.params.get('id')
-
- if uniqueness_by == 'id':
- if external_group_id is None:
- should_create = True
- else:
- should_create = False
- group_id = external_group_id
- else:
- groups = client.get_elastigroups()
- should_create, group_id = find_group_with_same_name(groups, name)
-
- if should_create is True:
- if state == 'present':
- eg = expand_elastigroup(module, is_update=False)
- module.debug(str(" [INFO] " + message + "\n"))
- group = client.create_elastigroup(group=eg)
- group_id = group['id']
- message = 'Created group Successfully.'
- has_changed = True
-
- elif state == 'absent':
- message = 'Cannot delete non-existent group.'
- has_changed = False
- else:
- eg = expand_elastigroup(module, is_update=True)
-
- if state == 'present':
- group = client.update_elastigroup(group_update=eg, group_id=group_id)
- message = 'Updated group successfully.'
-
- try:
- roll_config = module.params.get('roll_config')
- if roll_config:
- eg_roll = spotinst.aws_elastigroup.Roll(
- batch_size_percentage=roll_config.get('batch_size_percentage'),
- grace_period=roll_config.get('grace_period'),
- health_check_type=roll_config.get('health_check_type')
- )
- roll_response = client.roll_group(group_roll=eg_roll, group_id=group_id)
- message = 'Updated and started rolling the group successfully.'
-
- except SpotinstClientException as exc:
- message = 'Updated group successfully, but failed to perform roll. Error:' + str(exc)
- has_changed = True
-
- elif state == 'absent':
- try:
- client.delete_elastigroup(group_id=group_id)
- except SpotinstClientException as exc:
- if "GROUP_DOESNT_EXIST" in exc.message:
- pass
- else:
- module.fail_json(msg="Error while attempting to delete group : " + exc.message)
-
- message = 'Deleted group successfully.'
- has_changed = True
-
- return group_id, message, has_changed
-
-
-def retrieve_group_instances(client, module, group_id):
- wait_timeout = module.params.get('wait_timeout')
- wait_for_instances = module.params.get('wait_for_instances')
-
- health_check_type = module.params.get('health_check_type')
-
- if wait_timeout is None:
- wait_timeout = 300
-
- wait_timeout = time.time() + wait_timeout
- target = module.params.get('target')
- state = module.params.get('state')
- instances = list()
-
- if state == 'present' and group_id is not None and wait_for_instances is True:
-
- is_amount_fulfilled = False
- while is_amount_fulfilled is False and wait_timeout > time.time():
- instances = list()
- amount_of_fulfilled_instances = 0
-
- if health_check_type is not None:
- healthy_instances = client.get_instance_healthiness(group_id=group_id)
-
- for healthy_instance in healthy_instances:
- if healthy_instance.get('healthStatus') == 'HEALTHY':
- amount_of_fulfilled_instances += 1
- instances.append(healthy_instance)
-
- else:
- active_instances = client.get_elastigroup_active_instances(group_id=group_id)
-
- for active_instance in active_instances:
- if active_instance.get('private_ip') is not None:
- amount_of_fulfilled_instances += 1
- instances.append(active_instance)
-
- if amount_of_fulfilled_instances >= target:
- is_amount_fulfilled = True
-
- time.sleep(10)
-
- return instances
-
-
-def find_group_with_same_name(groups, name):
- for group in groups:
- if group['name'] == name:
- return False, group.get('id')
-
- return True, None
-
-
-def expand_elastigroup(module, is_update):
- do_not_update = module.params['do_not_update']
- name = module.params.get('name')
-
- eg = spotinst.aws_elastigroup.Elastigroup()
- description = module.params.get('description')
-
- if name is not None:
- eg.name = name
- if description is not None:
- eg.description = description
-
- # Capacity
- expand_capacity(eg, module, is_update, do_not_update)
- # Strategy
- expand_strategy(eg, module)
- # Scaling
- expand_scaling(eg, module)
- # Third party integrations
- expand_integrations(eg, module)
- # Compute
- expand_compute(eg, module, is_update, do_not_update)
- # Multai
- expand_multai(eg, module)
- # Scheduling
- expand_scheduled_tasks(eg, module)
-
- return eg
-
-
-def expand_compute(eg, module, is_update, do_not_update):
- elastic_ips = module.params['elastic_ips']
- on_demand_instance_type = module.params.get('on_demand_instance_type')
- spot_instance_types = module.params['spot_instance_types']
- ebs_volume_pool = module.params['ebs_volume_pool']
- availability_zones_list = module.params['availability_zones']
- product = module.params.get('product')
-
- eg_compute = spotinst.aws_elastigroup.Compute()
-
- if product is not None:
- # Only put product on group creation
- if is_update is not True:
- eg_compute.product = product
-
- if elastic_ips is not None:
- eg_compute.elastic_ips = elastic_ips
-
- if on_demand_instance_type or spot_instance_types is not None:
- eg_instance_types = spotinst.aws_elastigroup.InstanceTypes()
-
- if on_demand_instance_type is not None:
- eg_instance_types.spot = spot_instance_types
- if spot_instance_types is not None:
- eg_instance_types.ondemand = on_demand_instance_type
-
- if eg_instance_types.spot is not None or eg_instance_types.ondemand is not None:
- eg_compute.instance_types = eg_instance_types
-
- expand_ebs_volume_pool(eg_compute, ebs_volume_pool)
-
- eg_compute.availability_zones = expand_list(availability_zones_list, az_fields, 'AvailabilityZone')
-
- expand_launch_spec(eg_compute, module, is_update, do_not_update)
-
- eg.compute = eg_compute
-
-
-def expand_ebs_volume_pool(eg_compute, ebs_volumes_list):
- if ebs_volumes_list is not None:
- eg_volumes = []
-
- for volume in ebs_volumes_list:
- eg_volume = spotinst.aws_elastigroup.EbsVolume()
-
- if volume.get('device_name') is not None:
- eg_volume.device_name = volume.get('device_name')
- if volume.get('volume_ids') is not None:
- eg_volume.volume_ids = volume.get('volume_ids')
-
- if eg_volume.device_name is not None:
- eg_volumes.append(eg_volume)
-
- if len(eg_volumes) > 0:
- eg_compute.ebs_volume_pool = eg_volumes
-
-
-def expand_launch_spec(eg_compute, module, is_update, do_not_update):
- eg_launch_spec = expand_fields(lspec_fields, module.params, 'LaunchSpecification')
-
- if module.params['iam_role_arn'] is not None or module.params['iam_role_name'] is not None:
- eg_launch_spec.iam_role = expand_fields(iam_fields, module.params, 'IamRole')
-
- tags = module.params['tags']
- load_balancers = module.params['load_balancers']
- target_group_arns = module.params['target_group_arns']
- block_device_mappings = module.params['block_device_mappings']
- network_interfaces = module.params['network_interfaces']
-
- if is_update is True:
- if 'image_id' in do_not_update:
- delattr(eg_launch_spec, 'image_id')
-
- expand_tags(eg_launch_spec, tags)
-
- expand_load_balancers(eg_launch_spec, load_balancers, target_group_arns)
-
- expand_block_device_mappings(eg_launch_spec, block_device_mappings)
-
- expand_network_interfaces(eg_launch_spec, network_interfaces)
-
- eg_compute.launch_specification = eg_launch_spec
-
-
-def expand_integrations(eg, module):
- rancher = module.params.get('rancher')
- mesosphere = module.params.get('mesosphere')
- ecs = module.params.get('ecs')
- kubernetes = module.params.get('kubernetes')
- right_scale = module.params.get('right_scale')
- opsworks = module.params.get('opsworks')
- chef = module.params.get('chef')
-
- integration_exists = False
-
- eg_integrations = spotinst.aws_elastigroup.ThirdPartyIntegrations()
-
- if mesosphere is not None:
- eg_integrations.mesosphere = expand_fields(mesosphere_fields, mesosphere, 'Mesosphere')
- integration_exists = True
-
- if ecs is not None:
- eg_integrations.ecs = expand_fields(ecs_fields, ecs, 'EcsConfiguration')
- integration_exists = True
-
- if kubernetes is not None:
- eg_integrations.kubernetes = expand_fields(kubernetes_fields, kubernetes, 'KubernetesConfiguration')
- integration_exists = True
-
- if right_scale is not None:
- eg_integrations.right_scale = expand_fields(right_scale_fields, right_scale, 'RightScaleConfiguration')
- integration_exists = True
-
- if opsworks is not None:
- eg_integrations.opsworks = expand_fields(opsworks_fields, opsworks, 'OpsWorksConfiguration')
- integration_exists = True
-
- if rancher is not None:
- eg_integrations.rancher = expand_fields(rancher_fields, rancher, 'Rancher')
- integration_exists = True
-
- if chef is not None:
- eg_integrations.chef = expand_fields(chef_fields, chef, 'ChefConfiguration')
- integration_exists = True
-
- if integration_exists:
- eg.third_parties_integration = eg_integrations
-
-
-def expand_capacity(eg, module, is_update, do_not_update):
- eg_capacity = expand_fields(capacity_fields, module.params, 'Capacity')
-
- if is_update is True:
- delattr(eg_capacity, 'unit')
-
- if 'target' in do_not_update:
- delattr(eg_capacity, 'target')
-
- eg.capacity = eg_capacity
-
-
-def expand_strategy(eg, module):
- persistence = module.params.get('persistence')
- signals = module.params.get('signals')
-
- eg_strategy = expand_fields(strategy_fields, module.params, 'Strategy')
-
- terminate_at_end_of_billing_hour = module.params.get('terminate_at_end_of_billing_hour')
-
- if terminate_at_end_of_billing_hour is not None:
- eg_strategy.eg_scaling_strategy = expand_fields(scaling_strategy_fields,
- module.params, 'ScalingStrategy')
-
- if persistence is not None:
- eg_strategy.persistence = expand_fields(persistence_fields, persistence, 'Persistence')
-
- if signals is not None:
- eg_signals = expand_list(signals, signal_fields, 'Signal')
-
- if len(eg_signals) > 0:
- eg_strategy.signals = eg_signals
-
- eg.strategy = eg_strategy
-
-
-def expand_multai(eg, module):
- multai_load_balancers = module.params.get('multai_load_balancers')
-
- eg_multai = expand_fields(multai_fields, module.params, 'Multai')
-
- if multai_load_balancers is not None:
- eg_multai_load_balancers = expand_list(multai_load_balancers, multai_lb_fields, 'MultaiLoadBalancer')
-
- if len(eg_multai_load_balancers) > 0:
- eg_multai.balancers = eg_multai_load_balancers
- eg.multai = eg_multai
-
-
-def expand_scheduled_tasks(eg, module):
- scheduled_tasks = module.params.get('scheduled_tasks')
-
- if scheduled_tasks is not None:
- eg_scheduling = spotinst.aws_elastigroup.Scheduling()
-
- eg_tasks = expand_list(scheduled_tasks, scheduled_task_fields, 'ScheduledTask')
-
- if len(eg_tasks) > 0:
- eg_scheduling.tasks = eg_tasks
- eg.scheduling = eg_scheduling
-
-
-def expand_load_balancers(eg_launchspec, load_balancers, target_group_arns):
- if load_balancers is not None or target_group_arns is not None:
- eg_load_balancers_config = spotinst.aws_elastigroup.LoadBalancersConfig()
- eg_total_lbs = []
-
- if load_balancers is not None:
- for elb_name in load_balancers:
- eg_elb = spotinst.aws_elastigroup.LoadBalancer()
- if elb_name is not None:
- eg_elb.name = elb_name
- eg_elb.type = 'CLASSIC'
- eg_total_lbs.append(eg_elb)
-
- if target_group_arns is not None:
- for target_arn in target_group_arns:
- eg_elb = spotinst.aws_elastigroup.LoadBalancer()
- if target_arn is not None:
- eg_elb.arn = target_arn
- eg_elb.type = 'TARGET_GROUP'
- eg_total_lbs.append(eg_elb)
-
- if len(eg_total_lbs) > 0:
- eg_load_balancers_config.load_balancers = eg_total_lbs
- eg_launchspec.load_balancers_config = eg_load_balancers_config
-
-
-def expand_tags(eg_launchspec, tags):
- if tags is not None:
- eg_tags = []
-
- for tag in tags:
- eg_tag = spotinst.aws_elastigroup.Tag()
- if tag:
- eg_tag.tag_key, eg_tag.tag_value = list(tag.items())[0]
-
- eg_tags.append(eg_tag)
-
- if len(eg_tags) > 0:
- eg_launchspec.tags = eg_tags
-
-
-def expand_block_device_mappings(eg_launchspec, bdms):
- if bdms is not None:
- eg_bdms = []
-
- for bdm in bdms:
- eg_bdm = expand_fields(bdm_fields, bdm, 'BlockDeviceMapping')
-
- if bdm.get('ebs') is not None:
- eg_bdm.ebs = expand_fields(ebs_fields, bdm.get('ebs'), 'EBS')
-
- eg_bdms.append(eg_bdm)
-
- if len(eg_bdms) > 0:
- eg_launchspec.block_device_mappings = eg_bdms
-
-
-def expand_network_interfaces(eg_launchspec, enis):
- if enis is not None:
- eg_enis = []
-
- for eni in enis:
- eg_eni = expand_fields(eni_fields, eni, 'NetworkInterface')
-
- eg_pias = expand_list(eni.get('private_ip_addresses'), private_ip_fields, 'PrivateIpAddress')
-
- if eg_pias is not None:
- eg_eni.private_ip_addresses = eg_pias
-
- eg_enis.append(eg_eni)
-
- if len(eg_enis) > 0:
- eg_launchspec.network_interfaces = eg_enis
-
-
-def expand_scaling(eg, module):
- up_scaling_policies = module.params['up_scaling_policies']
- down_scaling_policies = module.params['down_scaling_policies']
- target_tracking_policies = module.params['target_tracking_policies']
-
- eg_scaling = spotinst.aws_elastigroup.Scaling()
-
- if up_scaling_policies is not None:
- eg_up_scaling_policies = expand_scaling_policies(up_scaling_policies)
- if len(eg_up_scaling_policies) > 0:
- eg_scaling.up = eg_up_scaling_policies
-
- if down_scaling_policies is not None:
- eg_down_scaling_policies = expand_scaling_policies(down_scaling_policies)
- if len(eg_down_scaling_policies) > 0:
- eg_scaling.down = eg_down_scaling_policies
-
- if target_tracking_policies is not None:
- eg_target_tracking_policies = expand_target_tracking_policies(target_tracking_policies)
- if len(eg_target_tracking_policies) > 0:
- eg_scaling.target = eg_target_tracking_policies
-
- if eg_scaling.down is not None or eg_scaling.up is not None or eg_scaling.target is not None:
- eg.scaling = eg_scaling
-
-
-def expand_list(items, fields, class_name):
- if items is not None:
- new_objects_list = []
- for item in items:
- new_obj = expand_fields(fields, item, class_name)
- new_objects_list.append(new_obj)
-
- return new_objects_list
-
-
-def expand_fields(fields, item, class_name):
- class_ = getattr(spotinst.aws_elastigroup, class_name)
- new_obj = class_()
-
- # Handle primitive fields
- if item is not None:
- for field in fields:
- if isinstance(field, dict):
- ansible_field_name = field['ansible_field_name']
- spotinst_field_name = field['spotinst_field_name']
- else:
- ansible_field_name = field
- spotinst_field_name = field
- if item.get(ansible_field_name) is not None:
- setattr(new_obj, spotinst_field_name, item.get(ansible_field_name))
-
- return new_obj
-
-
-def expand_scaling_policies(scaling_policies):
- eg_scaling_policies = []
-
- for policy in scaling_policies:
- eg_policy = expand_fields(scaling_policy_fields, policy, 'ScalingPolicy')
- eg_policy.action = expand_fields(action_fields, policy, 'ScalingPolicyAction')
- eg_scaling_policies.append(eg_policy)
-
- return eg_scaling_policies
-
-
-def expand_target_tracking_policies(tracking_policies):
- eg_tracking_policies = []
-
- for policy in tracking_policies:
- eg_policy = expand_fields(tracking_policy_fields, policy, 'TargetTrackingPolicy')
- eg_tracking_policies.append(eg_policy)
-
- return eg_tracking_policies
-
-
-def main():
- fields = dict(
- account_id=dict(type='str'),
- availability_vs_cost=dict(type='str', required=True),
- availability_zones=dict(type='list', elements='dict', required=True),
- block_device_mappings=dict(type='list', elements='dict'),
- chef=dict(type='dict'),
- credentials_path=dict(type='path', default="~/.spotinst/credentials"),
- do_not_update=dict(default=[], type='list'),
- down_scaling_policies=dict(type='list', elements='dict'),
- draining_timeout=dict(type='int'),
- ebs_optimized=dict(type='bool'),
- ebs_volume_pool=dict(type='list', elements='dict'),
- ecs=dict(type='dict'),
- elastic_beanstalk=dict(type='dict'),
- elastic_ips=dict(type='list', elements='str'),
- fallback_to_od=dict(type='bool'),
- id=dict(type='str'),
- health_check_grace_period=dict(type='int'),
- health_check_type=dict(type='str'),
- health_check_unhealthy_duration_before_replacement=dict(type='int'),
- iam_role_arn=dict(type='str'),
- iam_role_name=dict(type='str'),
- image_id=dict(type='str', required=True),
- key_pair=dict(type='str', no_log=False),
- kubernetes=dict(type='dict'),
- lifetime_period=dict(type='int'),
- load_balancers=dict(type='list', elements='str'),
- max_size=dict(type='int', required=True),
- mesosphere=dict(type='dict'),
- min_size=dict(type='int', required=True),
- monitoring=dict(type='str'),
- multai_load_balancers=dict(type='list'),
- multai_token=dict(type='str', no_log=True),
- name=dict(type='str', required=True),
- network_interfaces=dict(type='list', elements='dict'),
- on_demand_count=dict(type='int'),
- on_demand_instance_type=dict(type='str'),
- opsworks=dict(type='dict'),
- persistence=dict(type='dict'),
- product=dict(type='str', required=True),
- rancher=dict(type='dict'),
- right_scale=dict(type='dict'),
- risk=dict(type='int'),
- roll_config=dict(type='dict'),
- scheduled_tasks=dict(type='list', elements='dict'),
- security_group_ids=dict(type='list', elements='str', required=True),
- shutdown_script=dict(type='str'),
- signals=dict(type='list', elements='dict'),
- spin_up_time=dict(type='int'),
- spot_instance_types=dict(type='list', elements='str', required=True),
- state=dict(default='present', choices=['present', 'absent']),
- tags=dict(type='list', elements='dict'),
- target=dict(type='int', required=True),
- target_group_arns=dict(type='list', elements='str'),
- tenancy=dict(type='str'),
- terminate_at_end_of_billing_hour=dict(type='bool'),
- token=dict(type='str', no_log=True),
- unit=dict(type='str'),
- user_data=dict(type='str'),
- utilize_reserved_instances=dict(type='bool'),
- uniqueness_by=dict(default='name', choices=['name', 'id']),
- up_scaling_policies=dict(type='list', elements='dict'),
- target_tracking_policies=dict(type='list', elements='dict'),
- wait_for_instances=dict(type='bool', default=False),
- wait_timeout=dict(type='int')
- )
-
- module = AnsibleModule(argument_spec=fields)
-
- if not HAS_SPOTINST_SDK:
- module.fail_json(msg="the Spotinst SDK library is required. (pip install spotinst_sdk)")
-
- # Retrieve creds file variables
- creds_file_loaded_vars = dict()
-
- credentials_path = module.params.get('credentials_path')
-
- try:
- with open(credentials_path, "r") as creds:
- for line in creds:
- eq_index = line.find('=')
- var_name = line[:eq_index].strip()
- string_value = line[eq_index + 1:].strip()
- creds_file_loaded_vars[var_name] = string_value
- except IOError:
- pass
- # End of creds file retrieval
-
- token = module.params.get('token')
- if not token:
- token = os.environ.get('SPOTINST_TOKEN')
- if not token:
- token = creds_file_loaded_vars.get("token")
-
- account = module.params.get('account_id')
- if not account:
- account = os.environ.get('SPOTINST_ACCOUNT_ID') or os.environ.get('ACCOUNT')
- if not account:
- account = creds_file_loaded_vars.get("account")
-
- client = spotinst.SpotinstClient(auth_token=token, print_output=False)
-
- if account is not None:
- client = spotinst.SpotinstClient(auth_token=token, print_output=False, account_id=account)
-
- group_id, message, has_changed = handle_elastigroup(client=client, module=module)
-
- instances = retrieve_group_instances(client=client, module=module, group_id=group_id)
-
- module.exit_json(changed=has_changed, group_id=group_id, message=message, instances=instances)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/univention/udm_dns_record.py b/ansible_collections/community/general/plugins/modules/cloud/univention/udm_dns_record.py
deleted file mode 100644
index 4e7aa70b..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/univention/udm_dns_record.py
+++ /dev/null
@@ -1,238 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2016, Adfinis SyGroup AG
-# Tobias Rueetschi
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: udm_dns_record
-author:
-- Tobias Rüetschi (@keachi)
-short_description: Manage dns entries on a univention corporate server
-description:
- - "This module allows to manage dns records on a univention corporate server (UCS).
- It uses the python API of the UCS to create a new object or edit it."
-requirements:
- - Python >= 2.6
- - Univention
- - ipaddress (for I(type=ptr_record))
-options:
- state:
- type: str
- default: "present"
- choices: [ present, absent ]
- description:
- - Whether the dns record is present or not.
- name:
- type: str
- required: true
- description:
- - "Name of the record, this is also the DNS record. E.g. www for
- www.example.com."
- - For PTR records this has to be the IP address.
- zone:
- type: str
- required: true
- description:
- - Corresponding DNS zone for this record, e.g. example.com.
- - For PTR records this has to be the full reverse zone (for example C(1.1.192.in-addr.arpa)).
- type:
- type: str
- required: true
- description:
- - "Define the record type. C(host_record) is a A or AAAA record,
- C(alias) is a CNAME, C(ptr_record) is a PTR record, C(srv_record)
- is a SRV record and C(txt_record) is a TXT record."
- - "The available choices are: C(host_record), C(alias), C(ptr_record), C(srv_record), C(txt_record)."
- data:
- type: dict
- default: {}
- description:
- - "Additional data for this record, e.g. ['a': '192.0.2.1'].
- Required if C(state=present)."
-'''
-
-
-EXAMPLES = '''
-- name: Create a DNS record on a UCS
- community.general.udm_dns_record:
- name: www
- zone: example.com
- type: host_record
- data:
- a:
- - 192.0.2.1
- - 2001:0db8::42
-
-- name: Create a DNS v4 PTR record on a UCS
- community.general.udm_dns_record:
- name: 192.0.2.1
- zone: 2.0.192.in-addr.arpa
- type: ptr_record
- data:
- ptr_record: "www.example.com."
-
-- name: Create a DNS v6 PTR record on a UCS
- community.general.udm_dns_record:
- name: 2001:db8:0:0:0:ff00:42:8329
- zone: 2.4.0.0.0.0.f.f.0.0.0.0.0.0.0.0.0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa
- type: ptr_record
- data:
- ptr_record: "www.example.com."
-'''
-
-
-RETURN = '''#'''
-
-HAVE_UNIVENTION = False
-HAVE_IPADDRESS = False
-try:
- from univention.admin.handlers.dns import (
- forward_zone,
- reverse_zone,
- )
- HAVE_UNIVENTION = True
-except ImportError:
- pass
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.basic import missing_required_lib
-from ansible_collections.community.general.plugins.module_utils.univention_umc import (
- umc_module_for_add,
- umc_module_for_edit,
- ldap_search,
- base_dn,
- config,
- uldap,
-)
-try:
- import ipaddress
- HAVE_IPADDRESS = True
-except ImportError:
- pass
-
-
-def main():
- module = AnsibleModule(
- argument_spec=dict(
- type=dict(required=True,
- type='str'),
- zone=dict(required=True,
- type='str'),
- name=dict(required=True,
- type='str'),
- data=dict(default={},
- type='dict'),
- state=dict(default='present',
- choices=['present', 'absent'],
- type='str')
- ),
- supports_check_mode=True,
- required_if=([
- ('state', 'present', ['data'])
- ])
- )
-
- if not HAVE_UNIVENTION:
- module.fail_json(msg="This module requires univention python bindings")
-
- type = module.params['type']
- zone = module.params['zone']
- name = module.params['name']
- data = module.params['data']
- state = module.params['state']
- changed = False
- diff = None
-
- workname = name
- if type == 'ptr_record':
- if not HAVE_IPADDRESS:
- module.fail_json(msg=missing_required_lib('ipaddress'))
- try:
- if 'arpa' not in zone:
- raise Exception("Zone must be reversed zone for ptr_record. (e.g. 1.1.192.in-addr.arpa)")
- ipaddr_rev = ipaddress.ip_address(name).reverse_pointer
- subnet_offset = ipaddr_rev.find(zone)
- if subnet_offset == -1:
- raise Exception("reversed IP address {0} is not part of zone.".format(ipaddr_rev))
- workname = ipaddr_rev[0:subnet_offset - 1]
- except Exception as e:
- module.fail_json(
- msg='handling PTR record for {0} in zone {1} failed: {2}'.format(name, zone, e)
- )
-
- obj = list(ldap_search(
- '(&(objectClass=dNSZone)(zoneName={0})(relativeDomainName={1}))'.format(zone, workname),
- attr=['dNSZone']
- ))
- exists = bool(len(obj))
- container = 'zoneName={0},cn=dns,{1}'.format(zone, base_dn())
- dn = 'relativeDomainName={0},{1}'.format(workname, container)
-
- if state == 'present':
- try:
- if not exists:
- so = forward_zone.lookup(
- config(),
- uldap(),
- '(zone={0})'.format(zone),
- scope='domain',
- ) or reverse_zone.lookup(
- config(),
- uldap(),
- '(zoneName={0})'.format(zone),
- scope='domain',
- )
- if len(so) == 0:
- raise Exception("Did not find zone '{0}' in Univention".format(zone))
- obj = umc_module_for_add('dns/{0}'.format(type), container, superordinate=so[0])
- else:
- obj = umc_module_for_edit('dns/{0}'.format(type), dn)
-
- if type == 'ptr_record':
- obj['ip'] = name
- obj['address'] = workname
- else:
- obj['name'] = name
-
- for k, v in data.items():
- obj[k] = v
- diff = obj.diff()
- changed = obj.diff() != []
- if not module.check_mode:
- if not exists:
- obj.create()
- else:
- obj.modify()
- except Exception as e:
- module.fail_json(
- msg='Creating/editing dns entry {0} in {1} failed: {2}'.format(name, container, e)
- )
-
- if state == 'absent' and exists:
- try:
- obj = umc_module_for_edit('dns/{0}'.format(type), dn)
- if not module.check_mode:
- obj.remove()
- changed = True
- except Exception as e:
- module.fail_json(
- msg='Removing dns entry {0} in {1} failed: {2}'.format(name, container, e)
- )
-
- module.exit_json(
- changed=changed,
- name=name,
- diff=diff,
- container=container
- )
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/univention/udm_dns_zone.py b/ansible_collections/community/general/plugins/modules/cloud/univention/udm_dns_zone.py
deleted file mode 100644
index f1cea87e..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/univention/udm_dns_zone.py
+++ /dev/null
@@ -1,240 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2016, Adfinis SyGroup AG
-# Tobias Rueetschi
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: udm_dns_zone
-author:
-- Tobias Rüetschi (@keachi)
-short_description: Manage dns zones on a univention corporate server
-description:
- - "This module allows to manage dns zones on a univention corporate server (UCS).
- It uses the python API of the UCS to create a new object or edit it."
-requirements:
- - Python >= 2.6
-options:
- state:
- type: str
- default: "present"
- choices: [ present, absent ]
- description:
- - Whether the dns zone is present or not.
- type:
- type: str
- required: true
- description:
- - Define if the zone is a forward or reverse DNS zone.
- - "The available choices are: C(forward_zone), C(reverse_zone)."
- zone:
- type: str
- required: true
- description:
- - DNS zone name, e.g. C(example.com).
- aliases: [name]
- nameserver:
- type: list
- elements: str
- description:
- - List of appropriate name servers. Required if C(state=present).
- interfaces:
- type: list
- elements: str
- description:
- - List of interface IP addresses, on which the server should
- response this zone. Required if C(state=present).
-
- refresh:
- type: int
- default: 3600
- description:
- - Interval before the zone should be refreshed.
- retry:
- type: int
- default: 1800
- description:
- - Interval that should elapse before a failed refresh should be retried.
- expire:
- type: int
- default: 604800
- description:
- - Specifies the upper limit on the time interval that can elapse before the zone is no longer authoritative.
- ttl:
- type: int
- default: 600
- description:
- - Minimum TTL field that should be exported with any RR from this zone.
-
- contact:
- type: str
- default: ''
- description:
- - Contact person in the SOA record.
- mx:
- type: list
- elements: str
- default: []
- description:
- - List of MX servers. (Must declared as A or AAAA records).
-'''
-
-
-EXAMPLES = '''
-- name: Create a DNS zone on a UCS
- community.general.udm_dns_zone:
- zone: example.com
- type: forward_zone
- nameserver:
- - ucs.example.com
- interfaces:
- - 192.0.2.1
-'''
-
-
-RETURN = '''# '''
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.univention_umc import (
- umc_module_for_add,
- umc_module_for_edit,
- ldap_search,
- base_dn,
-)
-
-
-def convert_time(time):
- """Convert a time in seconds into the biggest unit"""
- units = [
- (24 * 60 * 60, 'days'),
- (60 * 60, 'hours'),
- (60, 'minutes'),
- (1, 'seconds'),
- ]
-
- if time == 0:
- return ('0', 'seconds')
- for unit in units:
- if time >= unit[0]:
- return ('{0}'.format(time // unit[0]), unit[1])
-
-
-def main():
- module = AnsibleModule(
- argument_spec=dict(
- type=dict(required=True,
- type='str'),
- zone=dict(required=True,
- aliases=['name'],
- type='str'),
- nameserver=dict(default=[],
- type='list',
- elements='str'),
- interfaces=dict(default=[],
- type='list',
- elements='str'),
- refresh=dict(default=3600,
- type='int'),
- retry=dict(default=1800,
- type='int'),
- expire=dict(default=604800,
- type='int'),
- ttl=dict(default=600,
- type='int'),
- contact=dict(default='',
- type='str'),
- mx=dict(default=[],
- type='list',
- elements='str'),
- state=dict(default='present',
- choices=['present', 'absent'],
- type='str')
- ),
- supports_check_mode=True,
- required_if=([
- ('state', 'present', ['nameserver', 'interfaces'])
- ])
- )
- type = module.params['type']
- zone = module.params['zone']
- nameserver = module.params['nameserver']
- interfaces = module.params['interfaces']
- refresh = module.params['refresh']
- retry = module.params['retry']
- expire = module.params['expire']
- ttl = module.params['ttl']
- contact = module.params['contact']
- mx = module.params['mx']
- state = module.params['state']
- changed = False
- diff = None
-
- obj = list(ldap_search(
- '(&(objectClass=dNSZone)(zoneName={0}))'.format(zone),
- attr=['dNSZone']
- ))
-
- exists = bool(len(obj))
- container = 'cn=dns,{0}'.format(base_dn())
- dn = 'zoneName={0},{1}'.format(zone, container)
- if contact == '':
- contact = 'root@{0}.'.format(zone)
-
- if state == 'present':
- try:
- if not exists:
- obj = umc_module_for_add('dns/{0}'.format(type), container)
- else:
- obj = umc_module_for_edit('dns/{0}'.format(type), dn)
- obj['zone'] = zone
- obj['nameserver'] = nameserver
- obj['a'] = interfaces
- obj['refresh'] = convert_time(refresh)
- obj['retry'] = convert_time(retry)
- obj['expire'] = convert_time(expire)
- obj['ttl'] = convert_time(ttl)
- obj['contact'] = contact
- obj['mx'] = mx
- diff = obj.diff()
- if exists:
- for k in obj.keys():
- if obj.hasChanged(k):
- changed = True
- else:
- changed = True
- if not module.check_mode:
- if not exists:
- obj.create()
- elif changed:
- obj.modify()
- except Exception as e:
- module.fail_json(
- msg='Creating/editing dns zone {0} failed: {1}'.format(zone, e)
- )
-
- if state == 'absent' and exists:
- try:
- obj = umc_module_for_edit('dns/{0}'.format(type), dn)
- if not module.check_mode:
- obj.remove()
- changed = True
- except Exception as e:
- module.fail_json(
- msg='Removing dns zone {0} failed: {1}'.format(zone, e)
- )
-
- module.exit_json(
- changed=changed,
- diff=diff,
- zone=zone
- )
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/univention/udm_group.py b/ansible_collections/community/general/plugins/modules/cloud/univention/udm_group.py
deleted file mode 100644
index d20187c6..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/univention/udm_group.py
+++ /dev/null
@@ -1,177 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2016, Adfinis SyGroup AG
-# Tobias Rueetschi
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: udm_group
-author:
-- Tobias Rüetschi (@keachi)
-short_description: Manage of the posix group
-description:
- - "This module allows to manage user groups on a univention corporate server (UCS).
- It uses the python API of the UCS to create a new object or edit it."
-requirements:
- - Python >= 2.6
-options:
- state:
- required: false
- default: "present"
- choices: [ present, absent ]
- description:
- - Whether the group is present or not.
- type: str
- name:
- required: true
- description:
- - Name of the posix group.
- type: str
- description:
- required: false
- description:
- - Group description.
- type: str
- position:
- required: false
- description:
- - define the whole ldap position of the group, e.g.
- C(cn=g123m-1A,cn=classes,cn=schueler,cn=groups,ou=schule,dc=example,dc=com).
- type: str
- ou:
- required: false
- description:
- - LDAP OU, e.g. school for LDAP OU C(ou=school,dc=example,dc=com).
- type: str
- subpath:
- required: false
- description:
- - Subpath inside the OU, e.g. C(cn=classes,cn=students,cn=groups).
- type: str
- default: "cn=groups"
-'''
-
-
-EXAMPLES = '''
-- name: Create a POSIX group
- community.general.udm_group:
- name: g123m-1A
-
-# Create a POSIX group with the exact DN
-# C(cn=g123m-1A,cn=classes,cn=students,cn=groups,ou=school,dc=school,dc=example,dc=com)
-- name: Create a POSIX group with a DN
- community.general.udm_group:
- name: g123m-1A
- subpath: 'cn=classes,cn=students,cn=groups'
- ou: school
-
-# or
-- name: Create a POSIX group with a DN
- community.general.udm_group:
- name: g123m-1A
- position: 'cn=classes,cn=students,cn=groups,ou=school,dc=school,dc=example,dc=com'
-'''
-
-
-RETURN = '''# '''
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.univention_umc import (
- umc_module_for_add,
- umc_module_for_edit,
- ldap_search,
- base_dn,
-)
-
-
-def main():
- module = AnsibleModule(
- argument_spec=dict(
- name=dict(required=True,
- type='str'),
- description=dict(default=None,
- type='str'),
- position=dict(default='',
- type='str'),
- ou=dict(default='',
- type='str'),
- subpath=dict(default='cn=groups',
- type='str'),
- state=dict(default='present',
- choices=['present', 'absent'],
- type='str')
- ),
- supports_check_mode=True
- )
- name = module.params['name']
- description = module.params['description']
- position = module.params['position']
- ou = module.params['ou']
- subpath = module.params['subpath']
- state = module.params['state']
- changed = False
- diff = None
-
- groups = list(ldap_search(
- '(&(objectClass=posixGroup)(cn={0}))'.format(name),
- attr=['cn']
- ))
- if position != '':
- container = position
- else:
- if ou != '':
- ou = 'ou={0},'.format(ou)
- if subpath != '':
- subpath = '{0},'.format(subpath)
- container = '{0}{1}{2}'.format(subpath, ou, base_dn())
- group_dn = 'cn={0},{1}'.format(name, container)
-
- exists = bool(len(groups))
-
- if state == 'present':
- try:
- if not exists:
- grp = umc_module_for_add('groups/group', container)
- else:
- grp = umc_module_for_edit('groups/group', group_dn)
- grp['name'] = name
- grp['description'] = description
- diff = grp.diff()
- changed = grp.diff() != []
- if not module.check_mode:
- if not exists:
- grp.create()
- else:
- grp.modify()
- except Exception:
- module.fail_json(
- msg="Creating/editing group {0} in {1} failed".format(name, container)
- )
-
- if state == 'absent' and exists:
- try:
- grp = umc_module_for_edit('groups/group', group_dn)
- if not module.check_mode:
- grp.remove()
- changed = True
- except Exception:
- module.fail_json(
- msg="Removing group {0} failed".format(name)
- )
-
- module.exit_json(
- changed=changed,
- name=name,
- diff=diff,
- container=container
- )
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/univention/udm_share.py b/ansible_collections/community/general/plugins/modules/cloud/univention/udm_share.py
deleted file mode 100644
index fb86d836..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/univention/udm_share.py
+++ /dev/null
@@ -1,576 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2016, Adfinis SyGroup AG
-# Tobias Rueetschi
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: udm_share
-author:
-- Tobias Rüetschi (@keachi)
-short_description: Manage samba shares on a univention corporate server
-description:
- - "This module allows to manage samba shares on a univention corporate
- server (UCS).
- It uses the python API of the UCS to create a new object or edit it."
-requirements:
- - Python >= 2.6
-options:
- state:
- default: "present"
- choices: [ present, absent ]
- description:
- - Whether the share is present or not.
- type: str
- name:
- required: true
- description:
- - Name
- type: str
- host:
- required: false
- description:
- - Host FQDN (server which provides the share), e.g. C({{
- ansible_fqdn }}). Required if C(state=present).
- type: str
- path:
- required: false
- description:
- - Directory on the providing server, e.g. C(/home). Required if C(state=present).
- type: path
- sambaName:
- required: false
- description:
- - Windows name. Required if C(state=present).
- type: str
- aliases: [ samba_name ]
- ou:
- required: true
- description:
- - Organisational unit, inside the LDAP Base DN.
- type: str
- owner:
- default: '0'
- description:
- - Directory owner of the share's root directory.
- type: str
- group:
- default: '0'
- description:
- - Directory owner group of the share's root directory.
- type: str
- directorymode:
- default: '00755'
- description:
- - Permissions for the share's root directory.
- type: str
- root_squash:
- default: true
- description:
- - Modify user ID for root user (root squashing).
- type: bool
- subtree_checking:
- default: true
- description:
- - Subtree checking.
- type: bool
- sync:
- default: 'sync'
- description:
- - NFS synchronisation.
- type: str
- writeable:
- default: true
- description:
- - NFS write access.
- type: bool
- sambaBlockSize:
- description:
- - Blocking size.
- type: str
- aliases: [ samba_block_size ]
- sambaBlockingLocks:
- default: true
- description:
- - Blocking locks.
- type: bool
- aliases: [ samba_blocking_locks ]
- sambaBrowseable:
- description:
- - Show in Windows network environment.
- type: bool
- default: True
- aliases: [ samba_browsable ]
- sambaCreateMode:
- default: '0744'
- description:
- - File mode.
- type: str
- aliases: [ samba_create_mode ]
- sambaCscPolicy:
- default: 'manual'
- description:
- - Client-side caching policy.
- type: str
- aliases: [ samba_csc_policy ]
- sambaCustomSettings:
- default: []
- description:
- - Option name in smb.conf and its value.
- type: list
- aliases: [ samba_custom_settings ]
- sambaDirectoryMode:
- default: '0755'
- description:
- - Directory mode.
- type: str
- aliases: [ samba_directory_mode ]
- sambaDirectorySecurityMode:
- default: '0777'
- description:
- - Directory security mode.
- type: str
- aliases: [ samba_directory_security_mode ]
- sambaDosFilemode:
- default: false
- description:
- - Users with write access may modify permissions.
- type: bool
- aliases: [ samba_dos_filemode ]
- sambaFakeOplocks:
- default: false
- description:
- - Fake oplocks.
- type: bool
- aliases: [ samba_fake_oplocks ]
- sambaForceCreateMode:
- default: false
- description:
- - Force file mode.
- type: bool
- aliases: [ samba_force_create_mode ]
- sambaForceDirectoryMode:
- default: false
- description:
- - Force directory mode.
- type: bool
- aliases: [ samba_force_directory_mode ]
- sambaForceDirectorySecurityMode:
- default: false
- description:
- - Force directory security mode.
- type: bool
- aliases: [ samba_force_directory_security_mode ]
- sambaForceGroup:
- description:
- - Force group.
- type: str
- aliases: [ samba_force_group ]
- sambaForceSecurityMode:
- default: false
- description:
- - Force security mode.
- type: bool
- aliases: [ samba_force_security_mode ]
- sambaForceUser:
- description:
- - Force user.
- type: str
- aliases: [ samba_force_user ]
- sambaHideFiles:
- description:
- - Hide files.
- type: str
- aliases: [ samba_hide_files ]
- sambaHideUnreadable:
- default: false
- description:
- - Hide unreadable files/directories.
- type: bool
- aliases: [ samba_hide_unreadable ]
- sambaHostsAllow:
- default: []
- description:
- - Allowed host/network.
- type: list
- aliases: [ samba_hosts_allow ]
- sambaHostsDeny:
- default: []
- description:
- - Denied host/network.
- type: list
- aliases: [ samba_hosts_deny ]
- sambaInheritAcls:
- default: true
- description:
- - Inherit ACLs.
- type: bool
- aliases: [ samba_inherit_acls ]
- sambaInheritOwner:
- default: false
- description:
- - Create files/directories with the owner of the parent directory.
- type: bool
- aliases: [ samba_inherit_owner ]
- sambaInheritPermissions:
- default: false
- description:
- - Create files/directories with permissions of the parent directory.
- type: bool
- aliases: [ samba_inherit_permissions ]
- sambaInvalidUsers:
- description:
- - Invalid users or groups.
- type: str
- aliases: [ samba_invalid_users ]
- sambaLevel2Oplocks:
- default: true
- description:
- - Level 2 oplocks.
- type: bool
- aliases: [ samba_level_2_oplocks ]
- sambaLocking:
- default: true
- description:
- - Locking.
- type: bool
- aliases: [ samba_locking ]
- sambaMSDFSRoot:
- default: false
- description:
- - MSDFS root.
- type: bool
- aliases: [ samba_msdfs_root ]
- sambaNtAclSupport:
- default: true
- description:
- - NT ACL support.
- type: bool
- aliases: [ samba_nt_acl_support ]
- sambaOplocks:
- default: true
- description:
- - Oplocks.
- type: bool
- aliases: [ samba_oplocks ]
- sambaPostexec:
- description:
- - Postexec script.
- type: str
- aliases: [ samba_postexec ]
- sambaPreexec:
- description:
- - Preexec script.
- type: str
- aliases: [ samba_preexec ]
- sambaPublic:
- default: false
- description:
- - Allow anonymous read-only access with a guest user.
- type: bool
- aliases: [ samba_public ]
- sambaSecurityMode:
- default: '0777'
- description:
- - Security mode.
- type: str
- aliases: [ samba_security_mode ]
- sambaStrictLocking:
- default: 'Auto'
- description:
- - Strict locking.
- type: str
- aliases: [ samba_strict_locking ]
- sambaVFSObjects:
- description:
- - VFS objects.
- type: str
- aliases: [ samba_vfs_objects ]
- sambaValidUsers:
- description:
- - Valid users or groups.
- type: str
- aliases: [ samba_valid_users ]
- sambaWriteList:
- description:
- - Restrict write access to these users/groups.
- type: str
- aliases: [ samba_write_list ]
- sambaWriteable:
- default: true
- description:
- - Samba write access.
- type: bool
- aliases: [ samba_writeable ]
- nfs_hosts:
- default: []
- description:
- - Only allow access for this host, IP address or network.
- type: list
- nfsCustomSettings:
- default: []
- description:
- - Option name in exports file.
- type: list
- aliases: [ nfs_custom_settings ]
-'''
-
-
-EXAMPLES = '''
-- name: Create a share named home on the server ucs.example.com with the path /home
- community.general.udm_share:
- name: home
- path: /home
- host: ucs.example.com
- sambaName: Home
-'''
-
-
-RETURN = '''# '''
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.univention_umc import (
- umc_module_for_add,
- umc_module_for_edit,
- ldap_search,
- base_dn,
-)
-
-
-def main():
- module = AnsibleModule(
- argument_spec=dict(
- name=dict(required=True,
- type='str'),
- ou=dict(required=True,
- type='str'),
- owner=dict(type='str',
- default='0'),
- group=dict(type='str',
- default='0'),
- path=dict(type='path',
- default=None),
- directorymode=dict(type='str',
- default='00755'),
- host=dict(type='str',
- default=None),
- root_squash=dict(type='bool',
- default=True),
- subtree_checking=dict(type='bool',
- default=True),
- sync=dict(type='str',
- default='sync'),
- writeable=dict(type='bool',
- default=True),
- sambaBlockSize=dict(type='str',
- aliases=['samba_block_size'],
- default=None),
- sambaBlockingLocks=dict(type='bool',
- aliases=['samba_blocking_locks'],
- default=True),
- sambaBrowseable=dict(type='bool',
- aliases=['samba_browsable'],
- default=True),
- sambaCreateMode=dict(type='str',
- aliases=['samba_create_mode'],
- default='0744'),
- sambaCscPolicy=dict(type='str',
- aliases=['samba_csc_policy'],
- default='manual'),
- sambaCustomSettings=dict(type='list',
- aliases=['samba_custom_settings'],
- default=[]),
- sambaDirectoryMode=dict(type='str',
- aliases=['samba_directory_mode'],
- default='0755'),
- sambaDirectorySecurityMode=dict(type='str',
- aliases=['samba_directory_security_mode'],
- default='0777'),
- sambaDosFilemode=dict(type='bool',
- aliases=['samba_dos_filemode'],
- default=False),
- sambaFakeOplocks=dict(type='bool',
- aliases=['samba_fake_oplocks'],
- default=False),
- sambaForceCreateMode=dict(type='bool',
- aliases=['samba_force_create_mode'],
- default=False),
- sambaForceDirectoryMode=dict(type='bool',
- aliases=['samba_force_directory_mode'],
- default=False),
- sambaForceDirectorySecurityMode=dict(type='bool',
- aliases=['samba_force_directory_security_mode'],
- default=False),
- sambaForceGroup=dict(type='str',
- aliases=['samba_force_group'],
- default=None),
- sambaForceSecurityMode=dict(type='bool',
- aliases=['samba_force_security_mode'],
- default=False),
- sambaForceUser=dict(type='str',
- aliases=['samba_force_user'],
- default=None),
- sambaHideFiles=dict(type='str',
- aliases=['samba_hide_files'],
- default=None),
- sambaHideUnreadable=dict(type='bool',
- aliases=['samba_hide_unreadable'],
- default=False),
- sambaHostsAllow=dict(type='list',
- aliases=['samba_hosts_allow'],
- default=[]),
- sambaHostsDeny=dict(type='list',
- aliases=['samba_hosts_deny'],
- default=[]),
- sambaInheritAcls=dict(type='bool',
- aliases=['samba_inherit_acls'],
- default=True),
- sambaInheritOwner=dict(type='bool',
- aliases=['samba_inherit_owner'],
- default=False),
- sambaInheritPermissions=dict(type='bool',
- aliases=['samba_inherit_permissions'],
- default=False),
- sambaInvalidUsers=dict(type='str',
- aliases=['samba_invalid_users'],
- default=None),
- sambaLevel2Oplocks=dict(type='bool',
- aliases=['samba_level_2_oplocks'],
- default=True),
- sambaLocking=dict(type='bool',
- aliases=['samba_locking'],
- default=True),
- sambaMSDFSRoot=dict(type='bool',
- aliases=['samba_msdfs_root'],
- default=False),
- sambaName=dict(type='str',
- aliases=['samba_name'],
- default=None),
- sambaNtAclSupport=dict(type='bool',
- aliases=['samba_nt_acl_support'],
- default=True),
- sambaOplocks=dict(type='bool',
- aliases=['samba_oplocks'],
- default=True),
- sambaPostexec=dict(type='str',
- aliases=['samba_postexec'],
- default=None),
- sambaPreexec=dict(type='str',
- aliases=['samba_preexec'],
- default=None),
- sambaPublic=dict(type='bool',
- aliases=['samba_public'],
- default=False),
- sambaSecurityMode=dict(type='str',
- aliases=['samba_security_mode'],
- default='0777'),
- sambaStrictLocking=dict(type='str',
- aliases=['samba_strict_locking'],
- default='Auto'),
- sambaVFSObjects=dict(type='str',
- aliases=['samba_vfs_objects'],
- default=None),
- sambaValidUsers=dict(type='str',
- aliases=['samba_valid_users'],
- default=None),
- sambaWriteList=dict(type='str',
- aliases=['samba_write_list'],
- default=None),
- sambaWriteable=dict(type='bool',
- aliases=['samba_writeable'],
- default=True),
- nfs_hosts=dict(type='list',
- default=[]),
- nfsCustomSettings=dict(type='list',
- aliases=['nfs_custom_settings'],
- default=[]),
- state=dict(default='present',
- choices=['present', 'absent'],
- type='str')
- ),
- supports_check_mode=True,
- required_if=([
- ('state', 'present', ['path', 'host', 'sambaName'])
- ])
- )
- name = module.params['name']
- state = module.params['state']
- changed = False
- diff = None
-
- obj = list(ldap_search(
- '(&(objectClass=univentionShare)(cn={0}))'.format(name),
- attr=['cn']
- ))
-
- exists = bool(len(obj))
- container = 'cn=shares,ou={0},{1}'.format(module.params['ou'], base_dn())
- dn = 'cn={0},{1}'.format(name, container)
-
- if state == 'present':
- try:
- if not exists:
- obj = umc_module_for_add('shares/share', container)
- else:
- obj = umc_module_for_edit('shares/share', dn)
-
- module.params['printablename'] = '{0} ({1})'.format(name, module.params['host'])
- for k in obj.keys():
- if module.params[k] is True:
- module.params[k] = '1'
- elif module.params[k] is False:
- module.params[k] = '0'
- obj[k] = module.params[k]
-
- diff = obj.diff()
- if exists:
- for k in obj.keys():
- if obj.hasChanged(k):
- changed = True
- else:
- changed = True
- if not module.check_mode:
- if not exists:
- obj.create()
- elif changed:
- obj.modify()
- except Exception as err:
- module.fail_json(
- msg='Creating/editing share {0} in {1} failed: {2}'.format(
- name,
- container,
- err,
- )
- )
-
- if state == 'absent' and exists:
- try:
- obj = umc_module_for_edit('shares/share', dn)
- if not module.check_mode:
- obj.remove()
- changed = True
- except Exception as err:
- module.fail_json(
- msg='Removing share {0} in {1} failed: {2}'.format(
- name,
- container,
- err,
- )
- )
-
- module.exit_json(
- changed=changed,
- name=name,
- diff=diff,
- container=container
- )
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/univention/udm_user.py b/ansible_collections/community/general/plugins/modules/cloud/univention/udm_user.py
deleted file mode 100644
index b0d6138f..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/univention/udm_user.py
+++ /dev/null
@@ -1,542 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2016, Adfinis SyGroup AG
-# Tobias Rueetschi
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = r'''
----
-module: udm_user
-author:
-- Tobias Rüetschi (@keachi)
-short_description: Manage posix users on a univention corporate server
-description:
- - "This module allows to manage posix users on a univention corporate
- server (UCS).
- It uses the python API of the UCS to create a new object or edit it."
-requirements:
- - Python >= 2.6
-options:
- state:
- default: "present"
- choices: [ present, absent ]
- description:
- - Whether the user is present or not.
- type: str
- username:
- required: true
- description:
- - User name
- aliases: ['name']
- type: str
- firstname:
- description:
- - First name. Required if C(state=present).
- type: str
- lastname:
- description:
- - Last name. Required if C(state=present).
- type: str
- password:
- description:
- - Password. Required if C(state=present).
- type: str
- birthday:
- description:
- - Birthday
- type: str
- city:
- description:
- - City of users business address.
- type: str
- country:
- description:
- - Country of users business address.
- type: str
- department_number:
- description:
- - Department number of users business address.
- aliases: [ departmentNumber ]
- type: str
- description:
- description:
- - Description (not gecos)
- type: str
- display_name:
- description:
- - Display name (not gecos)
- aliases: [ displayName ]
- type: str
- email:
- default: ['']
- description:
- - A list of e-mail addresses.
- type: list
- employee_number:
- description:
- - Employee number
- aliases: [ employeeNumber ]
- type: str
- employee_type:
- description:
- - Employee type
- aliases: [ employeeType ]
- type: str
- gecos:
- description:
- - GECOS
- type: str
- groups:
- default: []
- description:
- - "POSIX groups, the LDAP DNs of the groups will be found with the
- LDAP filter for each group as $GROUP:
- C((&(objectClass=posixGroup)(cn=$GROUP)))."
- type: list
- home_share:
- description:
- - "Home NFS share. Must be a LDAP DN, e.g.
- C(cn=home,cn=shares,ou=school,dc=example,dc=com)."
- aliases: [ homeShare ]
- type: str
- home_share_path:
- description:
- - Path to home NFS share, inside the homeShare.
- aliases: [ homeSharePath ]
- type: str
- home_telephone_number:
- default: []
- description:
- - List of private telephone numbers.
- aliases: [ homeTelephoneNumber ]
- type: list
- homedrive:
- description:
- - Windows home drive, e.g. C("H:").
- type: str
- mail_alternative_address:
- default: []
- description:
- - List of alternative e-mail addresses.
- aliases: [ mailAlternativeAddress ]
- type: list
- mail_home_server:
- description:
- - FQDN of mail server
- aliases: [ mailHomeServer ]
- type: str
- mail_primary_address:
- description:
- - Primary e-mail address
- aliases: [ mailPrimaryAddress ]
- type: str
- mobile_telephone_number:
- default: []
- description:
- - Mobile phone number
- aliases: [ mobileTelephoneNumber ]
- type: list
- organisation:
- description:
- - Organisation
- aliases: [ organization ]
- type: str
- overridePWHistory:
- type: bool
- default: 'no'
- description:
- - Override password history
- aliases: [ override_pw_history ]
- overridePWLength:
- type: bool
- default: 'no'
- description:
- - Override password check
- aliases: [ override_pw_length ]
- pager_telephonenumber:
- default: []
- description:
- - List of pager telephone numbers.
- aliases: [ pagerTelephonenumber ]
- type: list
- phone:
- description:
- - List of telephone numbers.
- type: list
- postcode:
- description:
- - Postal code of users business address.
- type: str
- primary_group:
- description:
- - Primary group. This must be the group LDAP DN.
- - If not specified, it defaults to C(cn=Domain Users,cn=groups,$LDAP_BASE_DN).
- aliases: [ primaryGroup ]
- type: str
- profilepath:
- description:
- - Windows profile directory
- type: str
- pwd_change_next_login:
- choices: [ '0', '1' ]
- description:
- - Change password on next login.
- aliases: [ pwdChangeNextLogin ]
- type: str
- room_number:
- description:
- - Room number of users business address.
- aliases: [ roomNumber ]
- type: str
- samba_privileges:
- description:
- - "Samba privilege, like allow printer administration, do domain
- join."
- aliases: [ sambaPrivileges ]
- type: list
- samba_user_workstations:
- description:
- - Allow the authentication only on this Microsoft Windows host.
- aliases: [ sambaUserWorkstations ]
- type: list
- sambahome:
- description:
- - Windows home path, e.g. C('\\$FQDN\$USERNAME').
- type: str
- scriptpath:
- description:
- - Windows logon script.
- type: str
- secretary:
- default: []
- description:
- - A list of superiors as LDAP DNs.
- type: list
- serviceprovider:
- default: ['']
- description:
- - Enable user for the following service providers.
- type: list
- shell:
- default: '/bin/bash'
- description:
- - Login shell
- type: str
- street:
- description:
- - Street of users business address.
- type: str
- title:
- description:
- - Title, e.g. C(Prof.).
- type: str
- unixhome:
- description:
- - Unix home directory
- - If not specified, it defaults to C(/home/$USERNAME).
- type: str
- userexpiry:
- description:
- - Account expiry date, e.g. C(1999-12-31).
- - If not specified, it defaults to the current day plus one year.
- type: str
- position:
- default: ''
- description:
- - "Define the whole position of users object inside the LDAP tree,
- e.g. C(cn=employee,cn=users,ou=school,dc=example,dc=com)."
- type: str
- update_password:
- default: always
- choices: [ always, on_create ]
- description:
- - "C(always) will update passwords if they differ.
- C(on_create) will only set the password for newly created users."
- type: str
- ou:
- default: ''
- description:
- - "Organizational Unit inside the LDAP Base DN, e.g. C(school) for
- LDAP OU C(ou=school,dc=example,dc=com)."
- type: str
- subpath:
- default: 'cn=users'
- description:
- - "LDAP subpath inside the organizational unit, e.g.
- C(cn=teachers,cn=users) for LDAP container
- C(cn=teachers,cn=users,dc=example,dc=com)."
- type: str
-'''
-
-
-EXAMPLES = '''
-- name: Create a user on a UCS
- community.general.udm_user:
- name: FooBar
- password: secure_password
- firstname: Foo
- lastname: Bar
-
-- name: Create a user with the DN C(uid=foo,cn=teachers,cn=users,ou=school,dc=school,dc=example,dc=com)
- community.general.udm_user:
- name: foo
- password: secure_password
- firstname: Foo
- lastname: Bar
- ou: school
- subpath: 'cn=teachers,cn=users'
-
-# or define the position
-- name: Create a user with the DN C(uid=foo,cn=teachers,cn=users,ou=school,dc=school,dc=example,dc=com)
- community.general.udm_user:
- name: foo
- password: secure_password
- firstname: Foo
- lastname: Bar
- position: 'cn=teachers,cn=users,ou=school,dc=school,dc=example,dc=com'
-'''
-
-
-RETURN = '''# '''
-
-import crypt
-from datetime import date, timedelta
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.univention_umc import (
- umc_module_for_add,
- umc_module_for_edit,
- ldap_search,
- base_dn,
-)
-
-
-def main():
- expiry = date.strftime(date.today() + timedelta(days=365), "%Y-%m-%d")
- module = AnsibleModule(
- argument_spec=dict(
- birthday=dict(type='str'),
- city=dict(type='str'),
- country=dict(type='str'),
- department_number=dict(type='str',
- aliases=['departmentNumber']),
- description=dict(type='str'),
- display_name=dict(type='str',
- aliases=['displayName']),
- email=dict(default=[''],
- type='list'),
- employee_number=dict(type='str',
- aliases=['employeeNumber']),
- employee_type=dict(type='str',
- aliases=['employeeType']),
- firstname=dict(type='str'),
- gecos=dict(type='str'),
- groups=dict(default=[],
- type='list'),
- home_share=dict(type='str',
- aliases=['homeShare']),
- home_share_path=dict(type='str',
- aliases=['homeSharePath']),
- home_telephone_number=dict(default=[],
- type='list',
- aliases=['homeTelephoneNumber']),
- homedrive=dict(type='str'),
- lastname=dict(type='str'),
- mail_alternative_address=dict(default=[],
- type='list',
- aliases=['mailAlternativeAddress']),
- mail_home_server=dict(type='str',
- aliases=['mailHomeServer']),
- mail_primary_address=dict(type='str',
- aliases=['mailPrimaryAddress']),
- mobile_telephone_number=dict(default=[],
- type='list',
- aliases=['mobileTelephoneNumber']),
- organisation=dict(type='str',
- aliases=['organization']),
- overridePWHistory=dict(default=False,
- type='bool',
- aliases=['override_pw_history']),
- overridePWLength=dict(default=False,
- type='bool',
- aliases=['override_pw_length']),
- pager_telephonenumber=dict(default=[],
- type='list',
- aliases=['pagerTelephonenumber']),
- password=dict(type='str',
- no_log=True),
- phone=dict(default=[],
- type='list'),
- postcode=dict(type='str'),
- primary_group=dict(type='str',
- aliases=['primaryGroup']),
- profilepath=dict(type='str'),
- pwd_change_next_login=dict(type='str',
- choices=['0', '1'],
- aliases=['pwdChangeNextLogin']),
- room_number=dict(type='str',
- aliases=['roomNumber']),
- samba_privileges=dict(default=[],
- type='list',
- aliases=['sambaPrivileges']),
- samba_user_workstations=dict(default=[],
- type='list',
- aliases=['sambaUserWorkstations']),
- sambahome=dict(type='str'),
- scriptpath=dict(type='str'),
- secretary=dict(default=[],
- type='list'),
- serviceprovider=dict(default=[''],
- type='list'),
- shell=dict(default='/bin/bash',
- type='str'),
- street=dict(type='str'),
- title=dict(type='str'),
- unixhome=dict(type='str'),
- userexpiry=dict(type='str'),
- username=dict(required=True,
- aliases=['name'],
- type='str'),
- position=dict(default='',
- type='str'),
- update_password=dict(default='always',
- choices=['always', 'on_create'],
- type='str'),
- ou=dict(default='',
- type='str'),
- subpath=dict(default='cn=users',
- type='str'),
- state=dict(default='present',
- choices=['present', 'absent'],
- type='str')
- ),
- supports_check_mode=True,
- required_if=([
- ('state', 'present', ['firstname', 'lastname', 'password'])
- ])
- )
- username = module.params['username']
- position = module.params['position']
- ou = module.params['ou']
- subpath = module.params['subpath']
- state = module.params['state']
- changed = False
- diff = None
-
- users = list(ldap_search(
- '(&(objectClass=posixAccount)(uid={0}))'.format(username),
- attr=['uid']
- ))
- if position != '':
- container = position
- else:
- if ou != '':
- ou = 'ou={0},'.format(ou)
- if subpath != '':
- subpath = '{0},'.format(subpath)
- container = '{0}{1}{2}'.format(subpath, ou, base_dn())
- user_dn = 'uid={0},{1}'.format(username, container)
-
- exists = bool(len(users))
-
- if state == 'present':
- try:
- if not exists:
- obj = umc_module_for_add('users/user', container)
- else:
- obj = umc_module_for_edit('users/user', user_dn)
-
- if module.params['displayName'] is None:
- module.params['displayName'] = '{0} {1}'.format(
- module.params['firstname'],
- module.params['lastname']
- )
- if module.params['unixhome'] is None:
- module.params['unixhome'] = '/home/{0}'.format(
- module.params['username']
- )
- for k in obj.keys():
- if (k != 'password' and
- k != 'groups' and
- k != 'overridePWHistory' and
- k in module.params and
- module.params[k] is not None):
- obj[k] = module.params[k]
- # handle some special values
- obj['e-mail'] = module.params['email']
- if 'userexpiry' in obj and obj.get('userexpiry') is None:
- obj['userexpiry'] = expiry
- password = module.params['password']
- if obj['password'] is None:
- obj['password'] = password
- if module.params['update_password'] == 'always':
- old_password = obj['password'].split('}', 2)[1]
- if crypt.crypt(password, old_password) != old_password:
- obj['overridePWHistory'] = module.params['overridePWHistory']
- obj['overridePWLength'] = module.params['overridePWLength']
- obj['password'] = password
-
- diff = obj.diff()
- if exists:
- for k in obj.keys():
- if obj.hasChanged(k):
- changed = True
- else:
- changed = True
- if not module.check_mode:
- if not exists:
- obj.create()
- elif changed:
- obj.modify()
- except Exception:
- module.fail_json(
- msg="Creating/editing user {0} in {1} failed".format(
- username,
- container
- )
- )
- try:
- groups = module.params['groups']
- if groups:
- filter = '(&(objectClass=posixGroup)(|(cn={0})))'.format(
- ')(cn='.join(groups)
- )
- group_dns = list(ldap_search(filter, attr=['dn']))
- for dn in group_dns:
- grp = umc_module_for_edit('groups/group', dn[0])
- if user_dn not in grp['users']:
- grp['users'].append(user_dn)
- if not module.check_mode:
- grp.modify()
- changed = True
- except Exception:
- module.fail_json(
- msg="Adding groups to user {0} failed".format(username)
- )
-
- if state == 'absent' and exists:
- try:
- obj = umc_module_for_edit('users/user', user_dn)
- if not module.check_mode:
- obj.remove()
- changed = True
- except Exception:
- module.fail_json(
- msg="Removing user {0} failed".format(username)
- )
-
- module.exit_json(
- changed=changed,
- username=username,
- diff=diff,
- container=container
- )
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/webfaction/webfaction_app.py b/ansible_collections/community/general/plugins/modules/cloud/webfaction/webfaction_app.py
deleted file mode 100644
index 1839db38..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/webfaction/webfaction_app.py
+++ /dev/null
@@ -1,197 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2015, Quentin Stafford-Fraser, with contributions gratefully acknowledged from:
-# * Andy Baker
-# * Federico Tarantini
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-# Create a Webfaction application using Ansible and the Webfaction API
-#
-# Valid application types can be found by looking here:
-# https://docs.webfaction.com/xmlrpc-api/apps.html#application-types
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: webfaction_app
-short_description: Add or remove applications on a Webfaction host
-description:
- - Add or remove applications on a Webfaction host. Further documentation at U(https://github.com/quentinsf/ansible-webfaction).
-author: Quentin Stafford-Fraser (@quentinsf)
-notes:
- - >
- You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API.
- The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as
- your host, you may want to add C(serial: 1) to the plays.
- - See `the webfaction API `_ for more info.
-
-options:
- name:
- description:
- - The name of the application
- required: true
- type: str
-
- state:
- description:
- - Whether the application should exist
- choices: ['present', 'absent']
- default: "present"
- type: str
-
- type:
- description:
- - The type of application to create. See the Webfaction docs at U(https://docs.webfaction.com/xmlrpc-api/apps.html) for a list.
- required: true
- type: str
-
- autostart:
- description:
- - Whether the app should restart with an C(autostart.cgi) script
- type: bool
- default: 'no'
-
- extra_info:
- description:
- - Any extra parameters required by the app
- default: ''
- type: str
-
- port_open:
- description:
- - IF the port should be opened
- type: bool
- default: 'no'
-
- login_name:
- description:
- - The webfaction account to use
- required: true
- type: str
-
- login_password:
- description:
- - The webfaction password to use
- required: true
- type: str
-
- machine:
- description:
- - The machine name to use (optional for accounts with only one machine)
- type: str
-
-'''
-
-EXAMPLES = '''
- - name: Create a test app
- community.general.webfaction_app:
- name: "my_wsgi_app1"
- state: present
- type: mod_wsgi35-python27
- login_name: "{{webfaction_user}}"
- login_password: "{{webfaction_passwd}}"
- machine: "{{webfaction_machine}}"
-'''
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.six.moves import xmlrpc_client
-
-
-webfaction = xmlrpc_client.ServerProxy('https://api.webfaction.com/')
-
-
-def main():
-
- module = AnsibleModule(
- argument_spec=dict(
- name=dict(required=True),
- state=dict(required=False, choices=['present', 'absent'], default='present'),
- type=dict(required=True),
- autostart=dict(required=False, type='bool', default=False),
- extra_info=dict(required=False, default=""),
- port_open=dict(required=False, type='bool', default=False),
- login_name=dict(required=True),
- login_password=dict(required=True, no_log=True),
- machine=dict(required=False, default=None),
- ),
- supports_check_mode=True
- )
- app_name = module.params['name']
- app_type = module.params['type']
- app_state = module.params['state']
-
- if module.params['machine']:
- session_id, account = webfaction.login(
- module.params['login_name'],
- module.params['login_password'],
- module.params['machine']
- )
- else:
- session_id, account = webfaction.login(
- module.params['login_name'],
- module.params['login_password']
- )
-
- app_list = webfaction.list_apps(session_id)
- app_map = dict([(i['name'], i) for i in app_list])
- existing_app = app_map.get(app_name)
-
- result = {}
-
- # Here's where the real stuff happens
-
- if app_state == 'present':
-
- # Does an app with this name already exist?
- if existing_app:
- if existing_app['type'] != app_type:
- module.fail_json(msg="App already exists with different type. Please fix by hand.")
-
- # If it exists with the right type, we don't change it
- # Should check other parameters.
- module.exit_json(
- changed=False,
- result=existing_app,
- )
-
- if not module.check_mode:
- # If this isn't a dry run, create the app
- result.update(
- webfaction.create_app(
- session_id, app_name, app_type,
- module.boolean(module.params['autostart']),
- module.params['extra_info'],
- module.boolean(module.params['port_open'])
- )
- )
-
- elif app_state == 'absent':
-
- # If the app's already not there, nothing changed.
- if not existing_app:
- module.exit_json(
- changed=False,
- )
-
- if not module.check_mode:
- # If this isn't a dry run, delete the app
- result.update(
- webfaction.delete_app(session_id, app_name)
- )
-
- else:
- module.fail_json(msg="Unknown state specified: {0}".format(app_state))
-
- module.exit_json(
- changed=True,
- result=result
- )
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/webfaction/webfaction_db.py b/ansible_collections/community/general/plugins/modules/cloud/webfaction/webfaction_db.py
deleted file mode 100644
index 11563426..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/webfaction/webfaction_db.py
+++ /dev/null
@@ -1,195 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2015, Quentin Stafford-Fraser, with contributions gratefully acknowledged from:
-# * Andy Baker
-# * Federico Tarantini
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-# Create a webfaction database using Ansible and the Webfaction API
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: webfaction_db
-short_description: Add or remove a database on Webfaction
-description:
- - Add or remove a database on a Webfaction host. Further documentation at https://github.com/quentinsf/ansible-webfaction.
-author: Quentin Stafford-Fraser (@quentinsf)
-notes:
- - >
- You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API.
- The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as
- your host, you may want to add C(serial: 1) to the plays.
- - See `the webfaction API `_ for more info.
-options:
-
- name:
- description:
- - The name of the database
- required: true
- type: str
-
- state:
- description:
- - Whether the database should exist
- choices: ['present', 'absent']
- default: "present"
- type: str
-
- type:
- description:
- - The type of database to create.
- required: true
- choices: ['mysql', 'postgresql']
- type: str
-
- password:
- description:
- - The password for the new database user.
- type: str
-
- login_name:
- description:
- - The webfaction account to use
- required: true
- type: str
-
- login_password:
- description:
- - The webfaction password to use
- required: true
- type: str
-
- machine:
- description:
- - The machine name to use (optional for accounts with only one machine)
- type: str
-'''
-
-EXAMPLES = '''
- # This will also create a default DB user with the same
- # name as the database, and the specified password.
-
- - name: Create a database
- community.general.webfaction_db:
- name: "{{webfaction_user}}_db1"
- password: mytestsql
- type: mysql
- login_name: "{{webfaction_user}}"
- login_password: "{{webfaction_passwd}}"
- machine: "{{webfaction_machine}}"
-
- # Note that, for symmetry's sake, deleting a database using
- # 'state: absent' will also delete the matching user.
-
-'''
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.six.moves import xmlrpc_client
-
-
-webfaction = xmlrpc_client.ServerProxy('https://api.webfaction.com/')
-
-
-def main():
-
- module = AnsibleModule(
- argument_spec=dict(
- name=dict(required=True),
- state=dict(required=False, choices=['present', 'absent'], default='present'),
- # You can specify an IP address or hostname.
- type=dict(required=True, choices=['mysql', 'postgresql']),
- password=dict(required=False, default=None, no_log=True),
- login_name=dict(required=True),
- login_password=dict(required=True, no_log=True),
- machine=dict(required=False, default=None),
- ),
- supports_check_mode=True
- )
- db_name = module.params['name']
- db_state = module.params['state']
- db_type = module.params['type']
- db_passwd = module.params['password']
-
- if module.params['machine']:
- session_id, account = webfaction.login(
- module.params['login_name'],
- module.params['login_password'],
- module.params['machine']
- )
- else:
- session_id, account = webfaction.login(
- module.params['login_name'],
- module.params['login_password']
- )
-
- db_list = webfaction.list_dbs(session_id)
- db_map = dict([(i['name'], i) for i in db_list])
- existing_db = db_map.get(db_name)
-
- user_list = webfaction.list_db_users(session_id)
- user_map = dict([(i['username'], i) for i in user_list])
- existing_user = user_map.get(db_name)
-
- result = {}
-
- # Here's where the real stuff happens
-
- if db_state == 'present':
-
- # Does a database with this name already exist?
- if existing_db:
- # Yes, but of a different type - fail
- if existing_db['db_type'] != db_type:
- module.fail_json(msg="Database already exists but is a different type. Please fix by hand.")
-
- # If it exists with the right type, we don't change anything.
- module.exit_json(
- changed=False,
- )
-
- if not module.check_mode:
- # If this isn't a dry run, create the db
- # and default user.
- result.update(
- webfaction.create_db(
- session_id, db_name, db_type, db_passwd
- )
- )
-
- elif db_state == 'absent':
-
- # If this isn't a dry run...
- if not module.check_mode:
-
- if not (existing_db or existing_user):
- module.exit_json(changed=False,)
-
- if existing_db:
- # Delete the db if it exists
- result.update(
- webfaction.delete_db(session_id, db_name, db_type)
- )
-
- if existing_user:
- # Delete the default db user if it exists
- result.update(
- webfaction.delete_db_user(session_id, db_name, db_type)
- )
-
- else:
- module.fail_json(msg="Unknown state specified: {0}".format(db_state))
-
- module.exit_json(
- changed=True,
- result=result
- )
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/webfaction/webfaction_domain.py b/ansible_collections/community/general/plugins/modules/cloud/webfaction/webfaction_domain.py
deleted file mode 100644
index f9c3b7db..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/webfaction/webfaction_domain.py
+++ /dev/null
@@ -1,168 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2015, Quentin Stafford-Fraser
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-# Create Webfaction domains and subdomains using Ansible and the Webfaction API
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: webfaction_domain
-short_description: Add or remove domains and subdomains on Webfaction
-description:
- - Add or remove domains or subdomains on a Webfaction host. Further documentation at https://github.com/quentinsf/ansible-webfaction.
-author: Quentin Stafford-Fraser (@quentinsf)
-notes:
- - If you are I(deleting) domains by using C(state=absent), then note that if you specify subdomains, just those particular subdomains will be deleted.
- If you don't specify subdomains, the domain will be deleted.
- - >
- You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API.
- The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as
- your host, you may want to add C(serial: 1) to the plays.
- - See `the webfaction API `_ for more info.
-
-options:
-
- name:
- description:
- - The name of the domain
- required: true
- type: str
-
- state:
- description:
- - Whether the domain should exist
- choices: ['present', 'absent']
- default: "present"
- type: str
-
- subdomains:
- description:
- - Any subdomains to create.
- default: []
- type: list
- elements: str
-
- login_name:
- description:
- - The webfaction account to use
- required: true
- type: str
-
- login_password:
- description:
- - The webfaction password to use
- required: true
- type: str
-'''
-
-EXAMPLES = '''
- - name: Create a test domain
- community.general.webfaction_domain:
- name: mydomain.com
- state: present
- subdomains:
- - www
- - blog
- login_name: "{{webfaction_user}}"
- login_password: "{{webfaction_passwd}}"
-
- - name: Delete test domain and any subdomains
- community.general.webfaction_domain:
- name: mydomain.com
- state: absent
- login_name: "{{webfaction_user}}"
- login_password: "{{webfaction_passwd}}"
-
-'''
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.six.moves import xmlrpc_client
-
-
-webfaction = xmlrpc_client.ServerProxy('https://api.webfaction.com/')
-
-
-def main():
-
- module = AnsibleModule(
- argument_spec=dict(
- name=dict(required=True),
- state=dict(choices=['present', 'absent'], default='present'),
- subdomains=dict(default=[], type='list', elements='str'),
- login_name=dict(required=True),
- login_password=dict(required=True, no_log=True),
- ),
- supports_check_mode=True
- )
- domain_name = module.params['name']
- domain_state = module.params['state']
- domain_subdomains = module.params['subdomains']
-
- session_id, account = webfaction.login(
- module.params['login_name'],
- module.params['login_password']
- )
-
- domain_list = webfaction.list_domains(session_id)
- domain_map = dict([(i['domain'], i) for i in domain_list])
- existing_domain = domain_map.get(domain_name)
-
- result = {}
-
- # Here's where the real stuff happens
-
- if domain_state == 'present':
-
- # Does an app with this name already exist?
- if existing_domain:
-
- if set(existing_domain['subdomains']) >= set(domain_subdomains):
- # If it exists with the right subdomains, we don't change anything.
- module.exit_json(
- changed=False,
- )
-
- positional_args = [session_id, domain_name] + domain_subdomains
-
- if not module.check_mode:
- # If this isn't a dry run, create the app
- # print positional_args
- result.update(
- webfaction.create_domain(
- *positional_args
- )
- )
-
- elif domain_state == 'absent':
-
- # If the app's already not there, nothing changed.
- if not existing_domain:
- module.exit_json(
- changed=False,
- )
-
- positional_args = [session_id, domain_name] + domain_subdomains
-
- if not module.check_mode:
- # If this isn't a dry run, delete the app
- result.update(
- webfaction.delete_domain(*positional_args)
- )
-
- else:
- module.fail_json(msg="Unknown state specified: {0}".format(domain_state))
-
- module.exit_json(
- changed=True,
- result=result
- )
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/webfaction/webfaction_mailbox.py b/ansible_collections/community/general/plugins/modules/cloud/webfaction/webfaction_mailbox.py
deleted file mode 100644
index 37755763..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/webfaction/webfaction_mailbox.py
+++ /dev/null
@@ -1,135 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2015, Quentin Stafford-Fraser and Andy Baker
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-# Create webfaction mailbox using Ansible and the Webfaction API
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: webfaction_mailbox
-short_description: Add or remove mailboxes on Webfaction
-description:
- - Add or remove mailboxes on a Webfaction account. Further documentation at https://github.com/quentinsf/ansible-webfaction.
-author: Quentin Stafford-Fraser (@quentinsf)
-notes:
- - >
- You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API.
- The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as
- your host, you may want to add C(serial: 1) to the plays.
- - See `the webfaction API `_ for more info.
-options:
-
- mailbox_name:
- description:
- - The name of the mailbox
- required: true
- type: str
-
- mailbox_password:
- description:
- - The password for the mailbox
- required: true
- type: str
-
- state:
- description:
- - Whether the mailbox should exist
- choices: ['present', 'absent']
- default: "present"
- type: str
-
- login_name:
- description:
- - The webfaction account to use
- required: true
- type: str
-
- login_password:
- description:
- - The webfaction password to use
- required: true
- type: str
-'''
-
-EXAMPLES = '''
- - name: Create a mailbox
- community.general.webfaction_mailbox:
- mailbox_name="mybox"
- mailbox_password="myboxpw"
- state=present
- login_name={{webfaction_user}}
- login_password={{webfaction_passwd}}
-'''
-
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.six.moves import xmlrpc_client
-
-
-webfaction = xmlrpc_client.ServerProxy('https://api.webfaction.com/')
-
-
-def main():
-
- module = AnsibleModule(
- argument_spec=dict(
- mailbox_name=dict(required=True),
- mailbox_password=dict(required=True, no_log=True),
- state=dict(required=False, choices=['present', 'absent'], default='present'),
- login_name=dict(required=True),
- login_password=dict(required=True, no_log=True),
- ),
- supports_check_mode=True
- )
-
- mailbox_name = module.params['mailbox_name']
- site_state = module.params['state']
-
- session_id, account = webfaction.login(
- module.params['login_name'],
- module.params['login_password']
- )
-
- mailbox_list = [x['mailbox'] for x in webfaction.list_mailboxes(session_id)]
- existing_mailbox = mailbox_name in mailbox_list
-
- result = {}
-
- # Here's where the real stuff happens
-
- if site_state == 'present':
-
- # Does a mailbox with this name already exist?
- if existing_mailbox:
- module.exit_json(changed=False,)
-
- positional_args = [session_id, mailbox_name]
-
- if not module.check_mode:
- # If this isn't a dry run, create the mailbox
- result.update(webfaction.create_mailbox(*positional_args))
-
- elif site_state == 'absent':
-
- # If the mailbox is already not there, nothing changed.
- if not existing_mailbox:
- module.exit_json(changed=False)
-
- if not module.check_mode:
- # If this isn't a dry run, delete the mailbox
- result.update(webfaction.delete_mailbox(session_id, mailbox_name))
-
- else:
- module.fail_json(msg="Unknown state specified: {0}".format(site_state))
-
- module.exit_json(changed=True, result=result)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/webfaction/webfaction_site.py b/ansible_collections/community/general/plugins/modules/cloud/webfaction/webfaction_site.py
deleted file mode 100644
index 87faade3..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/webfaction/webfaction_site.py
+++ /dev/null
@@ -1,207 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2015, Quentin Stafford-Fraser
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-# Create Webfaction website using Ansible and the Webfaction API
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: webfaction_site
-short_description: Add or remove a website on a Webfaction host
-description:
- - Add or remove a website on a Webfaction host. Further documentation at https://github.com/quentinsf/ansible-webfaction.
-author: Quentin Stafford-Fraser (@quentinsf)
-notes:
- - Sadly, you I(do) need to know your webfaction hostname for the C(host) parameter. But at least, unlike the API, you don't need to know the IP
- address. You can use a DNS name.
- - If a site of the same name exists in the account but on a different host, the operation will exit.
- - >
- You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API.
- The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as
- your host, you may want to add C(serial: 1) to the plays.
- - See `the webfaction API `_ for more info.
-
-options:
-
- name:
- description:
- - The name of the website
- required: true
- type: str
-
- state:
- description:
- - Whether the website should exist
- choices: ['present', 'absent']
- default: "present"
- type: str
-
- host:
- description:
- - The webfaction host on which the site should be created.
- required: true
- type: str
-
- https:
- description:
- - Whether or not to use HTTPS
- type: bool
- default: 'no'
-
- site_apps:
- description:
- - A mapping of URLs to apps
- default: []
- type: list
- elements: list
-
- subdomains:
- description:
- - A list of subdomains associated with this site.
- default: []
- type: list
- elements: str
-
- login_name:
- description:
- - The webfaction account to use
- required: true
- type: str
-
- login_password:
- description:
- - The webfaction password to use
- required: true
- type: str
-'''
-
-EXAMPLES = '''
- - name: Create website
- community.general.webfaction_site:
- name: testsite1
- state: present
- host: myhost.webfaction.com
- subdomains:
- - 'testsite1.my_domain.org'
- site_apps:
- - ['testapp1', '/']
- https: no
- login_name: "{{webfaction_user}}"
- login_password: "{{webfaction_passwd}}"
-'''
-
-import socket
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.six.moves import xmlrpc_client
-
-
-webfaction = xmlrpc_client.ServerProxy('https://api.webfaction.com/')
-
-
-def main():
-
- module = AnsibleModule(
- argument_spec=dict(
- name=dict(required=True),
- state=dict(choices=['present', 'absent'], default='present'),
- # You can specify an IP address or hostname.
- host=dict(required=True),
- https=dict(required=False, type='bool', default=False),
- subdomains=dict(type='list', elements='str', default=[]),
- site_apps=dict(type='list', elements='list', default=[]),
- login_name=dict(required=True),
- login_password=dict(required=True, no_log=True),
- ),
- supports_check_mode=True
- )
- site_name = module.params['name']
- site_state = module.params['state']
- site_host = module.params['host']
- site_ip = socket.gethostbyname(site_host)
-
- session_id, account = webfaction.login(
- module.params['login_name'],
- module.params['login_password']
- )
-
- site_list = webfaction.list_websites(session_id)
- site_map = dict([(i['name'], i) for i in site_list])
- existing_site = site_map.get(site_name)
-
- result = {}
-
- # Here's where the real stuff happens
-
- if site_state == 'present':
-
- # Does a site with this name already exist?
- if existing_site:
-
- # If yes, but it's on a different IP address, then fail.
- # If we wanted to allow relocation, we could add a 'relocate=true' option
- # which would get the existing IP address, delete the site there, and create it
- # at the new address. A bit dangerous, perhaps, so for now we'll require manual
- # deletion if it's on another host.
-
- if existing_site['ip'] != site_ip:
- module.fail_json(msg="Website already exists with a different IP address. Please fix by hand.")
-
- # If it's on this host and the key parameters are the same, nothing needs to be done.
-
- if (existing_site['https'] == module.boolean(module.params['https'])) and \
- (set(existing_site['subdomains']) == set(module.params['subdomains'])) and \
- (dict(existing_site['website_apps']) == dict(module.params['site_apps'])):
- module.exit_json(
- changed=False
- )
-
- positional_args = [
- session_id, site_name, site_ip,
- module.boolean(module.params['https']),
- module.params['subdomains'],
- ]
- for a in module.params['site_apps']:
- positional_args.append((a[0], a[1]))
-
- if not module.check_mode:
- # If this isn't a dry run, create or modify the site
- result.update(
- webfaction.create_website(
- *positional_args
- ) if not existing_site else webfaction.update_website(
- *positional_args
- )
- )
-
- elif site_state == 'absent':
-
- # If the site's already not there, nothing changed.
- if not existing_site:
- module.exit_json(
- changed=False,
- )
-
- if not module.check_mode:
- # If this isn't a dry run, delete the site
- result.update(
- webfaction.delete_website(session_id, site_name, site_ip)
- )
-
- else:
- module.fail_json(msg="Unknown state specified: {0}".format(site_state))
-
- module.exit_json(
- changed=True,
- result=result
- )
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/xenserver/xenserver_guest.py b/ansible_collections/community/general/plugins/modules/cloud/xenserver/xenserver_guest.py
deleted file mode 100644
index b90b380c..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/xenserver/xenserver_guest.py
+++ /dev/null
@@ -1,2026 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright: (c) 2018, Bojan Vitnik
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-DOCUMENTATION = r'''
----
-module: xenserver_guest
-short_description: Manages virtual machines running on Citrix Hypervisor/XenServer host or pool
-description: >
- This module can be used to create new virtual machines from templates or other virtual machines,
- modify various virtual machine components like network and disk, rename a virtual machine and
- remove a virtual machine with associated components.
-author:
-- Bojan Vitnik (@bvitnik)
-notes:
-- Minimal supported version of XenServer is 5.6.
-- Module was tested with XenServer 6.5, 7.1, 7.2, 7.6, Citrix Hypervisor 8.0, XCP-ng 7.6 and 8.0.
-- 'To acquire XenAPI Python library, just run C(pip install XenAPI) on your Ansible Control Node. The library can also be found inside
- Citrix Hypervisor/XenServer SDK (downloadable from Citrix website). Copy the XenAPI.py file from the SDK to your Python site-packages on your
- Ansible Control Node to use it. Latest version of the library can also be acquired from GitHub:
- U(https://raw.githubusercontent.com/xapi-project/xen-api/master/scripts/examples/python/XenAPI/XenAPI.py)'
-- 'If no scheme is specified in I(hostname), module defaults to C(http://) because C(https://) is problematic in most setups. Make sure you are
- accessing XenServer host in trusted environment or use C(https://) scheme explicitly.'
-- 'To use C(https://) scheme for I(hostname) you have to either import host certificate to your OS certificate store or use I(validate_certs): C(no)
- which requires XenAPI library from XenServer 7.2 SDK or newer and Python 2.7.9 or newer.'
-- 'Network configuration inside a guest OS, by using I(networks.type), I(networks.ip), I(networks.gateway) etc. parameters, is supported on
- XenServer 7.0 or newer for Windows guests by using official XenServer Guest agent support for network configuration. The module will try to
- detect if such support is available and utilize it, else it will use a custom method of configuration via xenstore. Since XenServer Guest
- agent only support None and Static types of network configuration, where None means DHCP configured interface, I(networks.type) and I(networks.type6)
- values C(none) and C(dhcp) have same effect. More info here:
- U(https://www.citrix.com/community/citrix-developer/citrix-hypervisor-developer/citrix-hypervisor-developing-products/citrix-hypervisor-staticip.html)'
-- 'On platforms without official support for network configuration inside a guest OS, network parameters will be written to xenstore
- C(vm-data/networks/) key. Parameters can be inspected by using C(xenstore ls) and C(xenstore read) tools on \*nix guests or trough
- WMI interface on Windows guests. They can also be found in VM facts C(instance.xenstore_data) key as returned by the module. It is up to the user
- to implement a boot time scripts or custom agent that will read the parameters from xenstore and configure network with given parameters.
- Take note that for xenstore data to become available inside a guest, a VM restart is needed hence module will require VM restart if any
- parameter is changed. This is a limitation of XenAPI and xenstore. Considering these limitations, network configuration trough xenstore is most
- useful for bootstraping newly deployed VMs, much less for reconfiguring existing ones. More info here:
- U(https://support.citrix.com/article/CTX226713)'
-requirements:
-- python >= 2.6
-- XenAPI
-options:
- state:
- description:
- - Specify the state VM should be in.
- - If I(state) is set to C(present) and VM exists, ensure the VM configuration conforms to given parameters.
- - If I(state) is set to C(present) and VM does not exist, then VM is deployed with given parameters.
- - If I(state) is set to C(absent) and VM exists, then VM is removed with its associated components.
- - If I(state) is set to C(poweredon) and VM does not exist, then VM is deployed with given parameters and powered on automatically.
- type: str
- default: present
- choices: [ present, absent, poweredon ]
- name:
- description:
- - Name of the VM to work with.
- - VMs running on XenServer do not necessarily have unique names. The module will fail if multiple VMs with same name are found.
- - In case of multiple VMs with same name, use I(uuid) to uniquely specify VM to manage.
- - This parameter is case sensitive.
- type: str
- aliases: [ name_label ]
- name_desc:
- description:
- - VM description.
- type: str
- uuid:
- description:
- - UUID of the VM to manage if known. This is XenServer's unique identifier.
- - It is required if name is not unique.
- - Please note that a supplied UUID will be ignored on VM creation, as XenServer creates the UUID internally.
- type: str
- template:
- description:
- - Name of a template, an existing VM (must be shut down) or a snapshot that should be used to create VM.
- - Templates/VMs/snapshots on XenServer do not necessarily have unique names. The module will fail if multiple templates with same name are found.
- - In case of multiple templates/VMs/snapshots with same name, use I(template_uuid) to uniquely specify source template.
- - If VM already exists, this setting will be ignored.
- - This parameter is case sensitive.
- type: str
- aliases: [ template_src ]
- template_uuid:
- description:
- - UUID of a template, an existing VM or a snapshot that should be used to create VM.
- - It is required if template name is not unique.
- type: str
- is_template:
- description:
- - Convert VM to template.
- type: bool
- default: no
- folder:
- description:
- - Destination folder for VM.
- - This parameter is case sensitive.
- - 'Example:'
- - ' folder: /folder1/folder2'
- type: str
- hardware:
- description:
- - Manage VM's hardware parameters. VM needs to be shut down to reconfigure these parameters.
- type: dict
- suboptions:
- num_cpus:
- description:
- - Number of CPUs.
- type: int
- num_cpu_cores_per_socket:
- description:
- - Number of Cores Per Socket. I(num_cpus) has to be a multiple of I(num_cpu_cores_per_socket).
- type: int
- memory_mb:
- description:
- - Amount of memory in MB.
- type: int
- disks:
- description:
- - A list of disks to add to VM.
- - All parameters are case sensitive.
- - Removing or detaching existing disks of VM is not supported.
- - New disks are required to have either a I(size) or one of I(size_[tb,gb,mb,kb,b]) parameters specified.
- - VM needs to be shut down to reconfigure disk size.
- type: list
- elements: dict
- aliases: [ disk ]
- suboptions:
- size:
- description:
- - 'Disk size with unit. Unit must be: C(b), C(kb), C(mb), C(gb), C(tb). VM needs to be shut down to reconfigure this parameter.'
- - If no unit is specified, size is assumed to be in bytes.
- type: str
- size_b:
- description:
- - Disk size in bytes.
- type: str
- size_kb:
- description:
- - Disk size in kilobytes.
- type: str
- size_mb:
- description:
- - Disk size in megabytes.
- type: str
- size_gb:
- description:
- - Disk size in gigabytes.
- type: str
- size_tb:
- description:
- - Disk size in terabytes.
- type: str
- name:
- description:
- - Disk name.
- type: str
- aliases: [ name_label ]
- name_desc:
- description:
- - Disk description.
- type: str
- sr:
- description:
- - Storage Repository to create disk on. If not specified, will use default SR. Cannot be used for moving disk to other SR.
- type: str
- sr_uuid:
- description:
- - UUID of a SR to create disk on. Use if SR name is not unique.
- type: str
- cdrom:
- description:
- - A CD-ROM configuration for the VM.
- - All parameters are case sensitive.
- type: dict
- suboptions:
- type:
- description:
- - The type of CD-ROM. With C(none) the CD-ROM device will be present but empty.
- type: str
- choices: [ none, iso ]
- iso_name:
- description:
- - 'The file name of an ISO image from one of the XenServer ISO Libraries (implies I(type): C(iso)).'
- - Required if I(type) is set to C(iso).
- type: str
- networks:
- description:
- - A list of networks (in the order of the NICs).
- - All parameters are case sensitive.
- - Name is required for new NICs. Other parameters are optional in all cases.
- type: list
- elements: dict
- aliases: [ network ]
- suboptions:
- name:
- description:
- - Name of a XenServer network to attach the network interface to.
- type: str
- aliases: [ name_label ]
- mac:
- description:
- - Customize MAC address of the interface.
- type: str
- type:
- description:
- - Type of IPv4 assignment. Value C(none) means whatever is default for OS.
- - On some operating systems it could be DHCP configured (e.g. Windows) or unconfigured interface (e.g. Linux).
- type: str
- choices: [ none, dhcp, static ]
- ip:
- description:
- - 'Static IPv4 address (implies I(type): C(static)). Can include prefix in format C(/) instead of using C(netmask).'
- type: str
- netmask:
- description:
- - Static IPv4 netmask required for I(ip) if prefix is not specified.
- type: str
- gateway:
- description:
- - Static IPv4 gateway.
- type: str
- type6:
- description:
- - Type of IPv6 assignment. Value C(none) means whatever is default for OS.
- type: str
- choices: [ none, dhcp, static ]
- ip6:
- description:
- - 'Static IPv6 address (implies I(type6): C(static)) with prefix in format C(/).'
- type: str
- gateway6:
- description:
- - Static IPv6 gateway.
- type: str
- home_server:
- description:
- - Name of a XenServer host that will be a Home Server for the VM.
- - This parameter is case sensitive.
- type: str
- custom_params:
- description:
- - Define a list of custom VM params to set on VM.
- - Useful for advanced users familiar with managing VM params trough xe CLI.
- - A custom value object takes two fields I(key) and I(value) (see example below).
- type: list
- elements: dict
- suboptions:
- key:
- description:
- - VM param name.
- type: str
- required: yes
- value:
- description:
- - VM param value.
- type: raw
- required: yes
- wait_for_ip_address:
- description:
- - Wait until XenServer detects an IP address for the VM. If I(state) is set to C(absent), this parameter is ignored.
- - This requires XenServer Tools to be preinstalled on the VM to work properly.
- type: bool
- default: no
- state_change_timeout:
- description:
- - 'By default, module will wait indefinitely for VM to accquire an IP address if I(wait_for_ip_address): C(yes).'
- - If this parameter is set to positive value, the module will instead wait specified number of seconds for the state change.
- - In case of timeout, module will generate an error message.
- type: int
- default: 0
- linked_clone:
- description:
- - Whether to create a Linked Clone from the template, existing VM or snapshot. If no, will create a full copy.
- - This is equivalent to C(Use storage-level fast disk clone) option in XenCenter.
- type: bool
- default: no
- force:
- description:
- - Ignore warnings and complete the actions.
- - This parameter is useful for removing VM in running state or reconfiguring VM params that require VM to be shut down.
- type: bool
- default: no
-extends_documentation_fragment:
-- community.general.xenserver.documentation
-
-'''
-
-EXAMPLES = r'''
-- name: Create a VM from a template
- community.general.xenserver_guest:
- hostname: "{{ xenserver_hostname }}"
- username: "{{ xenserver_username }}"
- password: "{{ xenserver_password }}"
- validate_certs: no
- folder: /testvms
- name: testvm_2
- state: poweredon
- template: CentOS 7
- disks:
- - size_gb: 10
- sr: my_sr
- hardware:
- num_cpus: 6
- num_cpu_cores_per_socket: 3
- memory_mb: 512
- cdrom:
- type: iso
- iso_name: guest-tools.iso
- networks:
- - name: VM Network
- mac: aa:bb:dd:aa:00:14
- wait_for_ip_address: yes
- delegate_to: localhost
- register: deploy
-
-- name: Create a VM template
- community.general.xenserver_guest:
- hostname: "{{ xenserver_hostname }}"
- username: "{{ xenserver_username }}"
- password: "{{ xenserver_password }}"
- validate_certs: no
- folder: /testvms
- name: testvm_6
- is_template: yes
- disk:
- - size_gb: 10
- sr: my_sr
- hardware:
- memory_mb: 512
- num_cpus: 1
- delegate_to: localhost
- register: deploy
-
-- name: Rename a VM (requires the VM's UUID)
- community.general.xenserver_guest:
- hostname: "{{ xenserver_hostname }}"
- username: "{{ xenserver_username }}"
- password: "{{ xenserver_password }}"
- uuid: 421e4592-c069-924d-ce20-7e7533fab926
- name: new_name
- state: present
- delegate_to: localhost
-
-- name: Remove a VM by UUID
- community.general.xenserver_guest:
- hostname: "{{ xenserver_hostname }}"
- username: "{{ xenserver_username }}"
- password: "{{ xenserver_password }}"
- uuid: 421e4592-c069-924d-ce20-7e7533fab926
- state: absent
- delegate_to: localhost
-
-- name: Modify custom params (boot order)
- community.general.xenserver_guest:
- hostname: "{{ xenserver_hostname }}"
- username: "{{ xenserver_username }}"
- password: "{{ xenserver_password }}"
- name: testvm_8
- state: present
- custom_params:
- - key: HVM_boot_params
- value: { "order": "ndc" }
- delegate_to: localhost
-
-- name: Customize network parameters
- community.general.xenserver_guest:
- hostname: "{{ xenserver_hostname }}"
- username: "{{ xenserver_username }}"
- password: "{{ xenserver_password }}"
- name: testvm_10
- networks:
- - name: VM Network
- ip: 192.168.1.100/24
- gateway: 192.168.1.1
- - type: dhcp
- delegate_to: localhost
-'''
-
-RETURN = r'''
-instance:
- description: Metadata about the VM
- returned: always
- type: dict
- sample: {
- "cdrom": {
- "type": "none"
- },
- "customization_agent": "native",
- "disks": [
- {
- "name": "testvm_11-0",
- "name_desc": "",
- "os_device": "xvda",
- "size": 42949672960,
- "sr": "Local storage",
- "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075",
- "vbd_userdevice": "0"
- },
- {
- "name": "testvm_11-1",
- "name_desc": "",
- "os_device": "xvdb",
- "size": 42949672960,
- "sr": "Local storage",
- "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075",
- "vbd_userdevice": "1"
- }
- ],
- "domid": "56",
- "folder": "",
- "hardware": {
- "memory_mb": 8192,
- "num_cpu_cores_per_socket": 2,
- "num_cpus": 4
- },
- "home_server": "",
- "is_template": false,
- "name": "testvm_11",
- "name_desc": "",
- "networks": [
- {
- "gateway": "192.168.0.254",
- "gateway6": "fc00::fffe",
- "ip": "192.168.0.200",
- "ip6": [
- "fe80:0000:0000:0000:e9cb:625a:32c5:c291",
- "fc00:0000:0000:0000:0000:0000:0000:0001"
- ],
- "mac": "ba:91:3a:48:20:76",
- "mtu": "1500",
- "name": "Pool-wide network associated with eth1",
- "netmask": "255.255.255.128",
- "prefix": "25",
- "prefix6": "64",
- "vif_device": "0"
- }
- ],
- "other_config": {
- "base_template_name": "Windows Server 2016 (64-bit)",
- "import_task": "OpaqueRef:e43eb71c-45d6-5351-09ff-96e4fb7d0fa5",
- "install-methods": "cdrom",
- "instant": "true",
- "mac_seed": "f83e8d8a-cfdc-b105-b054-ef5cb416b77e"
- },
- "platform": {
- "acpi": "1",
- "apic": "true",
- "cores-per-socket": "2",
- "device_id": "0002",
- "hpet": "true",
- "nx": "true",
- "pae": "true",
- "timeoffset": "-25200",
- "vga": "std",
- "videoram": "8",
- "viridian": "true",
- "viridian_reference_tsc": "true",
- "viridian_time_ref_count": "true"
- },
- "state": "poweredon",
- "uuid": "e3c0b2d5-5f05-424e-479c-d3df8b3e7cda",
- "xenstore_data": {
- "vm-data": ""
- }
- }
-changes:
- description: Detected or made changes to VM
- returned: always
- type: list
- sample: [
- {
- "hardware": [
- "num_cpus"
- ]
- },
- {
- "disks_changed": [
- [],
- [
- "size"
- ]
- ]
- },
- {
- "disks_new": [
- {
- "name": "new-disk",
- "name_desc": "",
- "position": 2,
- "size_gb": "4",
- "vbd_userdevice": "2"
- }
- ]
- },
- {
- "cdrom": [
- "type",
- "iso_name"
- ]
- },
- {
- "networks_changed": [
- [
- "mac"
- ],
- ]
- },
- {
- "networks_new": [
- {
- "name": "Pool-wide network associated with eth2",
- "position": 1,
- "vif_device": "1"
- }
- ]
- },
- "need_poweredoff"
- ]
-'''
-
-import re
-
-HAS_XENAPI = False
-try:
- import XenAPI
- HAS_XENAPI = True
-except ImportError:
- pass
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.common.network import is_mac
-from ansible.module_utils import six
-from ansible_collections.community.general.plugins.module_utils.xenserver import (
- xenserver_common_argument_spec, XenServerObject, get_object_ref,
- gather_vm_params, gather_vm_facts, set_vm_power_state,
- wait_for_vm_ip_address, is_valid_ip_addr, is_valid_ip_netmask,
- is_valid_ip_prefix, ip_prefix_to_netmask, ip_netmask_to_prefix,
- is_valid_ip6_addr, is_valid_ip6_prefix)
-
-
-class XenServerVM(XenServerObject):
- """Class for managing XenServer VM.
-
- Attributes:
- vm_ref (str): XAPI reference to VM.
- vm_params (dict): A dictionary with VM parameters as returned
- by gather_vm_params() function.
- """
-
- def __init__(self, module):
- """Inits XenServerVM using module parameters.
-
- Args:
- module: Reference to Ansible module object.
- """
- super(XenServerVM, self).__init__(module)
-
- self.vm_ref = get_object_ref(self.module, self.module.params['name'], self.module.params['uuid'], obj_type="VM", fail=False, msg_prefix="VM search: ")
- self.gather_params()
-
- def exists(self):
- """Returns True if VM exists, else False."""
- return True if self.vm_ref is not None else False
-
- def gather_params(self):
- """Gathers all VM parameters available in XAPI database."""
- self.vm_params = gather_vm_params(self.module, self.vm_ref)
-
- def gather_facts(self):
- """Gathers and returns VM facts."""
- return gather_vm_facts(self.module, self.vm_params)
-
- def set_power_state(self, power_state):
- """Controls VM power state."""
- state_changed, current_state = set_vm_power_state(self.module, self.vm_ref, power_state, self.module.params['state_change_timeout'])
-
- # If state has changed, update vm_params.
- if state_changed:
- self.vm_params['power_state'] = current_state.capitalize()
-
- return state_changed
-
- def wait_for_ip_address(self):
- """Waits for VM to acquire an IP address."""
- self.vm_params['guest_metrics'] = wait_for_vm_ip_address(self.module, self.vm_ref, self.module.params['state_change_timeout'])
-
- def deploy(self):
- """Deploys new VM from template."""
- # Safety check.
- if self.exists():
- self.module.fail_json(msg="Called deploy on existing VM!")
-
- try:
- templ_ref = get_object_ref(self.module, self.module.params['template'], self.module.params['template_uuid'], obj_type="template", fail=True,
- msg_prefix="VM deploy: ")
-
- # Is this an existing running VM?
- if self.xapi_session.xenapi.VM.get_power_state(templ_ref).lower() != 'halted':
- self.module.fail_json(msg="VM deploy: running VM cannot be used as a template!")
-
- # Find a SR we can use for VM.copy(). We use SR of the first disk
- # if specified or default SR if not specified.
- disk_params_list = self.module.params['disks']
-
- sr_ref = None
-
- if disk_params_list:
- disk_params = disk_params_list[0]
-
- disk_sr_uuid = disk_params.get('sr_uuid')
- disk_sr = disk_params.get('sr')
-
- if disk_sr_uuid is not None or disk_sr is not None:
- sr_ref = get_object_ref(self.module, disk_sr, disk_sr_uuid, obj_type="SR", fail=True,
- msg_prefix="VM deploy disks[0]: ")
-
- if not sr_ref:
- if self.default_sr_ref != "OpaqueRef:NULL":
- sr_ref = self.default_sr_ref
- else:
- self.module.fail_json(msg="VM deploy disks[0]: no default SR found! You must specify SR explicitly.")
-
- # VM name could be an empty string which is bad.
- if self.module.params['name'] is not None and not self.module.params['name']:
- self.module.fail_json(msg="VM deploy: VM name must not be an empty string!")
-
- # Support for Ansible check mode.
- if self.module.check_mode:
- return
-
- # Now we can instantiate VM. We use VM.clone for linked_clone and
- # VM.copy for non linked_clone.
- if self.module.params['linked_clone']:
- self.vm_ref = self.xapi_session.xenapi.VM.clone(templ_ref, self.module.params['name'])
- else:
- self.vm_ref = self.xapi_session.xenapi.VM.copy(templ_ref, self.module.params['name'], sr_ref)
-
- # Description is copied over from template so we reset it.
- self.xapi_session.xenapi.VM.set_name_description(self.vm_ref, "")
-
- # If template is one of built-in XenServer templates, we have to
- # do some additional steps.
- # Note: VM.get_is_default_template() is supported from XenServer 7.2
- # onward so we use an alternative way.
- templ_other_config = self.xapi_session.xenapi.VM.get_other_config(templ_ref)
-
- if "default_template" in templ_other_config and templ_other_config['default_template']:
- # other_config of built-in XenServer templates have a key called
- # 'disks' with the following content:
- # disks:
- # This value of other_data is copied to cloned or copied VM and
- # it prevents provisioning of VM because sr is not specified and
- # XAPI returns an error. To get around this, we remove the
- # 'disks' key and add disks to VM later ourselves.
- vm_other_config = self.xapi_session.xenapi.VM.get_other_config(self.vm_ref)
-
- if "disks" in vm_other_config:
- del vm_other_config['disks']
-
- self.xapi_session.xenapi.VM.set_other_config(self.vm_ref, vm_other_config)
-
- # At this point we have VM ready for provisioning.
- self.xapi_session.xenapi.VM.provision(self.vm_ref)
-
- # After provisioning we can prepare vm_params for reconfigure().
- self.gather_params()
-
- # VM is almost ready. We just need to reconfigure it...
- self.reconfigure()
-
- # Power on VM if needed.
- if self.module.params['state'] == "poweredon":
- self.set_power_state("poweredon")
-
- except XenAPI.Failure as f:
- self.module.fail_json(msg="XAPI ERROR: %s" % f.details)
-
- def reconfigure(self):
- """Reconfigures an existing VM.
-
- Returns:
- list: parameters that were reconfigured.
- """
- # Safety check.
- if not self.exists():
- self.module.fail_json(msg="Called reconfigure on non existing VM!")
-
- config_changes = self.get_changes()
-
- vm_power_state_save = self.vm_params['power_state'].lower()
-
- if "need_poweredoff" in config_changes and vm_power_state_save != 'halted' and not self.module.params['force']:
- self.module.fail_json(msg="VM reconfigure: VM has to be in powered off state to reconfigure but force was not specified!")
-
- # Support for Ansible check mode.
- if self.module.check_mode:
- return config_changes
-
- if "need_poweredoff" in config_changes and vm_power_state_save != 'halted' and self.module.params['force']:
- self.set_power_state("shutdownguest")
-
- try:
- for change in config_changes:
- if isinstance(change, six.string_types):
- if change == "name":
- self.xapi_session.xenapi.VM.set_name_label(self.vm_ref, self.module.params['name'])
- elif change == "name_desc":
- self.xapi_session.xenapi.VM.set_name_description(self.vm_ref, self.module.params['name_desc'])
- elif change == "folder":
- self.xapi_session.xenapi.VM.remove_from_other_config(self.vm_ref, 'folder')
-
- if self.module.params['folder']:
- self.xapi_session.xenapi.VM.add_to_other_config(self.vm_ref, 'folder', self.module.params['folder'])
- elif change == "home_server":
- if self.module.params['home_server']:
- host_ref = self.xapi_session.xenapi.host.get_by_name_label(self.module.params['home_server'])[0]
- else:
- host_ref = "OpaqueRef:NULL"
-
- self.xapi_session.xenapi.VM.set_affinity(self.vm_ref, host_ref)
- elif isinstance(change, dict):
- if change.get('hardware'):
- for hardware_change in change['hardware']:
- if hardware_change == "num_cpus":
- num_cpus = int(self.module.params['hardware']['num_cpus'])
-
- if num_cpus < int(self.vm_params['VCPUs_at_startup']):
- self.xapi_session.xenapi.VM.set_VCPUs_at_startup(self.vm_ref, str(num_cpus))
- self.xapi_session.xenapi.VM.set_VCPUs_max(self.vm_ref, str(num_cpus))
- else:
- self.xapi_session.xenapi.VM.set_VCPUs_max(self.vm_ref, str(num_cpus))
- self.xapi_session.xenapi.VM.set_VCPUs_at_startup(self.vm_ref, str(num_cpus))
- elif hardware_change == "num_cpu_cores_per_socket":
- self.xapi_session.xenapi.VM.remove_from_platform(self.vm_ref, 'cores-per-socket')
- num_cpu_cores_per_socket = int(self.module.params['hardware']['num_cpu_cores_per_socket'])
-
- if num_cpu_cores_per_socket > 1:
- self.xapi_session.xenapi.VM.add_to_platform(self.vm_ref, 'cores-per-socket', str(num_cpu_cores_per_socket))
- elif hardware_change == "memory_mb":
- memory_b = str(int(self.module.params['hardware']['memory_mb']) * 1048576)
- vm_memory_static_min_b = str(min(int(memory_b), int(self.vm_params['memory_static_min'])))
-
- self.xapi_session.xenapi.VM.set_memory_limits(self.vm_ref, vm_memory_static_min_b, memory_b, memory_b, memory_b)
- elif change.get('disks_changed'):
- vm_disk_params_list = [disk_params for disk_params in self.vm_params['VBDs'] if disk_params['type'] == "Disk"]
- position = 0
-
- for disk_change_list in change['disks_changed']:
- for disk_change in disk_change_list:
- vdi_ref = self.xapi_session.xenapi.VDI.get_by_uuid(vm_disk_params_list[position]['VDI']['uuid'])
-
- if disk_change == "name":
- self.xapi_session.xenapi.VDI.set_name_label(vdi_ref, self.module.params['disks'][position]['name'])
- elif disk_change == "name_desc":
- self.xapi_session.xenapi.VDI.set_name_description(vdi_ref, self.module.params['disks'][position]['name_desc'])
- elif disk_change == "size":
- self.xapi_session.xenapi.VDI.resize(vdi_ref, str(self.get_normalized_disk_size(self.module.params['disks'][position],
- "VM reconfigure disks[%s]: " % position)))
-
- position += 1
- elif change.get('disks_new'):
- for position, disk_userdevice in change['disks_new']:
- disk_params = self.module.params['disks'][position]
-
- disk_name = disk_params['name'] if disk_params.get('name') else "%s-%s" % (self.vm_params['name_label'], position)
- disk_name_desc = disk_params['name_desc'] if disk_params.get('name_desc') else ""
-
- if disk_params.get('sr_uuid'):
- sr_ref = self.xapi_session.xenapi.SR.get_by_uuid(disk_params['sr_uuid'])
- elif disk_params.get('sr'):
- sr_ref = self.xapi_session.xenapi.SR.get_by_name_label(disk_params['sr'])[0]
- else:
- sr_ref = self.default_sr_ref
-
- disk_size = str(self.get_normalized_disk_size(self.module.params['disks'][position], "VM reconfigure disks[%s]: " % position))
-
- new_disk_vdi = {
- "name_label": disk_name,
- "name_description": disk_name_desc,
- "SR": sr_ref,
- "virtual_size": disk_size,
- "type": "user",
- "sharable": False,
- "read_only": False,
- "other_config": {},
- }
-
- new_disk_vbd = {
- "VM": self.vm_ref,
- "VDI": None,
- "userdevice": disk_userdevice,
- "bootable": False,
- "mode": "RW",
- "type": "Disk",
- "empty": False,
- "other_config": {},
- "qos_algorithm_type": "",
- "qos_algorithm_params": {},
- }
-
- new_disk_vbd['VDI'] = self.xapi_session.xenapi.VDI.create(new_disk_vdi)
- vbd_ref_new = self.xapi_session.xenapi.VBD.create(new_disk_vbd)
-
- if self.vm_params['power_state'].lower() == "running":
- self.xapi_session.xenapi.VBD.plug(vbd_ref_new)
-
- elif change.get('cdrom'):
- vm_cdrom_params_list = [cdrom_params for cdrom_params in self.vm_params['VBDs'] if cdrom_params['type'] == "CD"]
-
- # If there is no CD present, we have to create one.
- if not vm_cdrom_params_list:
- # We will try to place cdrom at userdevice position
- # 3 (which is default) if it is not already occupied
- # else we will place it at first allowed position.
- cdrom_userdevices_allowed = self.xapi_session.xenapi.VM.get_allowed_VBD_devices(self.vm_ref)
-
- if "3" in cdrom_userdevices_allowed:
- cdrom_userdevice = "3"
- else:
- cdrom_userdevice = cdrom_userdevices_allowed[0]
-
- cdrom_vbd = {
- "VM": self.vm_ref,
- "VDI": "OpaqueRef:NULL",
- "userdevice": cdrom_userdevice,
- "bootable": False,
- "mode": "RO",
- "type": "CD",
- "empty": True,
- "other_config": {},
- "qos_algorithm_type": "",
- "qos_algorithm_params": {},
- }
-
- cdrom_vbd_ref = self.xapi_session.xenapi.VBD.create(cdrom_vbd)
- else:
- cdrom_vbd_ref = self.xapi_session.xenapi.VBD.get_by_uuid(vm_cdrom_params_list[0]['uuid'])
-
- cdrom_is_empty = self.xapi_session.xenapi.VBD.get_empty(cdrom_vbd_ref)
-
- for cdrom_change in change['cdrom']:
- if cdrom_change == "type":
- cdrom_type = self.module.params['cdrom']['type']
-
- if cdrom_type == "none" and not cdrom_is_empty:
- self.xapi_session.xenapi.VBD.eject(cdrom_vbd_ref)
- elif cdrom_type == "host":
- # Unimplemented!
- pass
-
- elif cdrom_change == "iso_name":
- if not cdrom_is_empty:
- self.xapi_session.xenapi.VBD.eject(cdrom_vbd_ref)
-
- cdrom_vdi_ref = self.xapi_session.xenapi.VDI.get_by_name_label(self.module.params['cdrom']['iso_name'])[0]
- self.xapi_session.xenapi.VBD.insert(cdrom_vbd_ref, cdrom_vdi_ref)
- elif change.get('networks_changed'):
- position = 0
-
- for network_change_list in change['networks_changed']:
- if network_change_list:
- vm_vif_params = self.vm_params['VIFs'][position]
- network_params = self.module.params['networks'][position]
-
- vif_ref = self.xapi_session.xenapi.VIF.get_by_uuid(vm_vif_params['uuid'])
- network_ref = self.xapi_session.xenapi.network.get_by_uuid(vm_vif_params['network']['uuid'])
-
- vif_recreated = False
-
- if "name" in network_change_list or "mac" in network_change_list:
- # To change network or MAC, we destroy old
- # VIF and then create a new one with changed
- # parameters. That's how XenCenter does it.
-
- # Copy all old parameters to new VIF record.
- vif = {
- "device": vm_vif_params['device'],
- "network": network_ref,
- "VM": vm_vif_params['VM'],
- "MAC": vm_vif_params['MAC'],
- "MTU": vm_vif_params['MTU'],
- "other_config": vm_vif_params['other_config'],
- "qos_algorithm_type": vm_vif_params['qos_algorithm_type'],
- "qos_algorithm_params": vm_vif_params['qos_algorithm_params'],
- "locking_mode": vm_vif_params['locking_mode'],
- "ipv4_allowed": vm_vif_params['ipv4_allowed'],
- "ipv6_allowed": vm_vif_params['ipv6_allowed'],
- }
-
- if "name" in network_change_list:
- network_ref_new = self.xapi_session.xenapi.network.get_by_name_label(network_params['name'])[0]
- vif['network'] = network_ref_new
- vif['MTU'] = self.xapi_session.xenapi.network.get_MTU(network_ref_new)
-
- if "mac" in network_change_list:
- vif['MAC'] = network_params['mac'].lower()
-
- if self.vm_params['power_state'].lower() == "running":
- self.xapi_session.xenapi.VIF.unplug(vif_ref)
-
- self.xapi_session.xenapi.VIF.destroy(vif_ref)
- vif_ref_new = self.xapi_session.xenapi.VIF.create(vif)
-
- if self.vm_params['power_state'].lower() == "running":
- self.xapi_session.xenapi.VIF.plug(vif_ref_new)
-
- vif_ref = vif_ref_new
- vif_recreated = True
-
- if self.vm_params['customization_agent'] == "native":
- vif_reconfigure_needed = False
-
- if "type" in network_change_list:
- network_type = network_params['type'].capitalize()
- vif_reconfigure_needed = True
- else:
- network_type = vm_vif_params['ipv4_configuration_mode']
-
- if "ip" in network_change_list:
- network_ip = network_params['ip']
- vif_reconfigure_needed = True
- elif vm_vif_params['ipv4_addresses']:
- network_ip = vm_vif_params['ipv4_addresses'][0].split('/')[0]
- else:
- network_ip = ""
-
- if "prefix" in network_change_list:
- network_prefix = "/%s" % network_params['prefix']
- vif_reconfigure_needed = True
- elif vm_vif_params['ipv4_addresses'] and vm_vif_params['ipv4_addresses'][0]:
- network_prefix = "/%s" % vm_vif_params['ipv4_addresses'][0].split('/')[1]
- else:
- network_prefix = ""
-
- if "gateway" in network_change_list:
- network_gateway = network_params['gateway']
- vif_reconfigure_needed = True
- else:
- network_gateway = vm_vif_params['ipv4_gateway']
-
- if vif_recreated or vif_reconfigure_needed:
- self.xapi_session.xenapi.VIF.configure_ipv4(vif_ref, network_type,
- "%s%s" % (network_ip, network_prefix), network_gateway)
-
- vif_reconfigure_needed = False
-
- if "type6" in network_change_list:
- network_type6 = network_params['type6'].capitalize()
- vif_reconfigure_needed = True
- else:
- network_type6 = vm_vif_params['ipv6_configuration_mode']
-
- if "ip6" in network_change_list:
- network_ip6 = network_params['ip6']
- vif_reconfigure_needed = True
- elif vm_vif_params['ipv6_addresses']:
- network_ip6 = vm_vif_params['ipv6_addresses'][0].split('/')[0]
- else:
- network_ip6 = ""
-
- if "prefix6" in network_change_list:
- network_prefix6 = "/%s" % network_params['prefix6']
- vif_reconfigure_needed = True
- elif vm_vif_params['ipv6_addresses'] and vm_vif_params['ipv6_addresses'][0]:
- network_prefix6 = "/%s" % vm_vif_params['ipv6_addresses'][0].split('/')[1]
- else:
- network_prefix6 = ""
-
- if "gateway6" in network_change_list:
- network_gateway6 = network_params['gateway6']
- vif_reconfigure_needed = True
- else:
- network_gateway6 = vm_vif_params['ipv6_gateway']
-
- if vif_recreated or vif_reconfigure_needed:
- self.xapi_session.xenapi.VIF.configure_ipv6(vif_ref, network_type6,
- "%s%s" % (network_ip6, network_prefix6), network_gateway6)
-
- elif self.vm_params['customization_agent'] == "custom":
- vif_device = vm_vif_params['device']
-
- # A user could have manually changed network
- # or mac e.g. trough XenCenter and then also
- # make those changes in playbook manually.
- # In that case, module will not detect any
- # changes and info in xenstore_data will
- # become stale. For that reason we always
- # update name and mac in xenstore_data.
-
- # Since we handle name and mac differently,
- # we have to remove them from
- # network_change_list.
- network_change_list_tmp = [net_chg for net_chg in network_change_list if net_chg not in ['name', 'mac']]
-
- for network_change in network_change_list_tmp + ['name', 'mac']:
- self.xapi_session.xenapi.VM.remove_from_xenstore_data(self.vm_ref,
- "vm-data/networks/%s/%s" % (vif_device, network_change))
-
- if network_params.get('name'):
- network_name = network_params['name']
- else:
- network_name = vm_vif_params['network']['name_label']
-
- self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
- "vm-data/networks/%s/%s" % (vif_device, 'name'), network_name)
-
- if network_params.get('mac'):
- network_mac = network_params['mac'].lower()
- else:
- network_mac = vm_vif_params['MAC'].lower()
-
- self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
- "vm-data/networks/%s/%s" % (vif_device, 'mac'), network_mac)
-
- for network_change in network_change_list_tmp:
- self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
- "vm-data/networks/%s/%s" % (vif_device, network_change),
- network_params[network_change])
-
- position += 1
- elif change.get('networks_new'):
- for position, vif_device in change['networks_new']:
- network_params = self.module.params['networks'][position]
-
- network_ref = self.xapi_session.xenapi.network.get_by_name_label(network_params['name'])[0]
-
- network_name = network_params['name']
- network_mac = network_params['mac'] if network_params.get('mac') else ""
- network_type = network_params.get('type')
- network_ip = network_params['ip'] if network_params.get('ip') else ""
- network_prefix = network_params['prefix'] if network_params.get('prefix') else ""
- network_netmask = network_params['netmask'] if network_params.get('netmask') else ""
- network_gateway = network_params['gateway'] if network_params.get('gateway') else ""
- network_type6 = network_params.get('type6')
- network_ip6 = network_params['ip6'] if network_params.get('ip6') else ""
- network_prefix6 = network_params['prefix6'] if network_params.get('prefix6') else ""
- network_gateway6 = network_params['gateway6'] if network_params.get('gateway6') else ""
-
- vif = {
- "device": vif_device,
- "network": network_ref,
- "VM": self.vm_ref,
- "MAC": network_mac,
- "MTU": self.xapi_session.xenapi.network.get_MTU(network_ref),
- "other_config": {},
- "qos_algorithm_type": "",
- "qos_algorithm_params": {},
- }
-
- vif_ref_new = self.xapi_session.xenapi.VIF.create(vif)
-
- if self.vm_params['power_state'].lower() == "running":
- self.xapi_session.xenapi.VIF.plug(vif_ref_new)
-
- if self.vm_params['customization_agent'] == "native":
- if network_type and network_type == "static":
- self.xapi_session.xenapi.VIF.configure_ipv4(vif_ref_new, "Static",
- "%s/%s" % (network_ip, network_prefix), network_gateway)
-
- if network_type6 and network_type6 == "static":
- self.xapi_session.xenapi.VIF.configure_ipv6(vif_ref_new, "Static",
- "%s/%s" % (network_ip6, network_prefix6), network_gateway6)
- elif self.vm_params['customization_agent'] == "custom":
- # We first have to remove any existing data
- # from xenstore_data because there could be
- # some old leftover data from some interface
- # that once occupied same device location as
- # our new interface.
- for network_param in ['name', 'mac', 'type', 'ip', 'prefix', 'netmask', 'gateway', 'type6', 'ip6', 'prefix6', 'gateway6']:
- self.xapi_session.xenapi.VM.remove_from_xenstore_data(self.vm_ref, "vm-data/networks/%s/%s" % (vif_device, network_param))
-
- self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, "vm-data/networks/%s/name" % vif_device, network_name)
-
- # We get MAC from VIF itself instead of
- # networks.mac because it could be
- # autogenerated.
- vm_vif_mac = self.xapi_session.xenapi.VIF.get_MAC(vif_ref_new)
- self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, "vm-data/networks/%s/mac" % vif_device, vm_vif_mac)
-
- if network_type:
- self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, "vm-data/networks/%s/type" % vif_device, network_type)
-
- if network_type == "static":
- self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
- "vm-data/networks/%s/ip" % vif_device, network_ip)
- self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
- "vm-data/networks/%s/prefix" % vif_device, network_prefix)
- self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
- "vm-data/networks/%s/netmask" % vif_device, network_netmask)
- self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
- "vm-data/networks/%s/gateway" % vif_device, network_gateway)
-
- if network_type6:
- self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, "vm-data/networks/%s/type6" % vif_device, network_type6)
-
- if network_type6 == "static":
- self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
- "vm-data/networks/%s/ip6" % vif_device, network_ip6)
- self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
- "vm-data/networks/%s/prefix6" % vif_device, network_prefix6)
- self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
- "vm-data/networks/%s/gateway6" % vif_device, network_gateway6)
-
- elif change.get('custom_params'):
- for position in change['custom_params']:
- custom_param_key = self.module.params['custom_params'][position]['key']
- custom_param_value = self.module.params['custom_params'][position]['value']
- self.xapi_session.xenapi_request("VM.set_%s" % custom_param_key, (self.vm_ref, custom_param_value))
-
- if self.module.params['is_template']:
- self.xapi_session.xenapi.VM.set_is_a_template(self.vm_ref, True)
- elif "need_poweredoff" in config_changes and self.module.params['force'] and vm_power_state_save != 'halted':
- self.set_power_state("poweredon")
-
- # Gather new params after reconfiguration.
- self.gather_params()
-
- except XenAPI.Failure as f:
- self.module.fail_json(msg="XAPI ERROR: %s" % f.details)
-
- return config_changes
-
- def destroy(self):
- """Removes an existing VM with associated disks"""
- # Safety check.
- if not self.exists():
- self.module.fail_json(msg="Called destroy on non existing VM!")
-
- if self.vm_params['power_state'].lower() != 'halted' and not self.module.params['force']:
- self.module.fail_json(msg="VM destroy: VM has to be in powered off state to destroy but force was not specified!")
-
- # Support for Ansible check mode.
- if self.module.check_mode:
- return
-
- # Make sure that VM is poweredoff before we can destroy it.
- self.set_power_state("poweredoff")
-
- try:
- # Destroy VM!
- self.xapi_session.xenapi.VM.destroy(self.vm_ref)
-
- vm_disk_params_list = [disk_params for disk_params in self.vm_params['VBDs'] if disk_params['type'] == "Disk"]
-
- # Destroy all VDIs associated with VM!
- for vm_disk_params in vm_disk_params_list:
- vdi_ref = self.xapi_session.xenapi.VDI.get_by_uuid(vm_disk_params['VDI']['uuid'])
-
- self.xapi_session.xenapi.VDI.destroy(vdi_ref)
-
- except XenAPI.Failure as f:
- self.module.fail_json(msg="XAPI ERROR: %s" % f.details)
-
- def get_changes(self):
- """Finds VM parameters that differ from specified ones.
-
- This method builds a dictionary with hierarchy of VM parameters
- that differ from those specified in module parameters.
-
- Returns:
- list: VM parameters that differ from those specified in
- module parameters.
- """
- # Safety check.
- if not self.exists():
- self.module.fail_json(msg="Called get_changes on non existing VM!")
-
- need_poweredoff = False
-
- if self.module.params['is_template']:
- need_poweredoff = True
-
- try:
- # This VM could be a template or a snapshot. In that case we fail
- # because we can't reconfigure them or it would just be too
- # dangerous.
- if self.vm_params['is_a_template'] and not self.vm_params['is_a_snapshot']:
- self.module.fail_json(msg="VM check: targeted VM is a template! Template reconfiguration is not supported.")
-
- if self.vm_params['is_a_snapshot']:
- self.module.fail_json(msg="VM check: targeted VM is a snapshot! Snapshot reconfiguration is not supported.")
-
- # Let's build a list of parameters that changed.
- config_changes = []
-
- # Name could only differ if we found an existing VM by uuid.
- if self.module.params['name'] is not None and self.module.params['name'] != self.vm_params['name_label']:
- if self.module.params['name']:
- config_changes.append('name')
- else:
- self.module.fail_json(msg="VM check name: VM name cannot be an empty string!")
-
- if self.module.params['name_desc'] is not None and self.module.params['name_desc'] != self.vm_params['name_description']:
- config_changes.append('name_desc')
-
- # Folder parameter is found in other_config.
- vm_other_config = self.vm_params['other_config']
- vm_folder = vm_other_config.get('folder', '')
-
- if self.module.params['folder'] is not None and self.module.params['folder'] != vm_folder:
- config_changes.append('folder')
-
- if self.module.params['home_server'] is not None:
- if (self.module.params['home_server'] and
- (not self.vm_params['affinity'] or self.module.params['home_server'] != self.vm_params['affinity']['name_label'])):
-
- # Check existance only. Ignore return value.
- get_object_ref(self.module, self.module.params['home_server'], uuid=None, obj_type="home server", fail=True,
- msg_prefix="VM check home_server: ")
-
- config_changes.append('home_server')
- elif not self.module.params['home_server'] and self.vm_params['affinity']:
- config_changes.append('home_server')
-
- config_changes_hardware = []
-
- if self.module.params['hardware']:
- num_cpus = self.module.params['hardware'].get('num_cpus')
-
- if num_cpus is not None:
- # Kept for compatibility with older Ansible versions that
- # do not support subargument specs.
- try:
- num_cpus = int(num_cpus)
- except ValueError as e:
- self.module.fail_json(msg="VM check hardware.num_cpus: parameter should be an integer value!")
-
- if num_cpus < 1:
- self.module.fail_json(msg="VM check hardware.num_cpus: parameter should be greater than zero!")
-
- # We can use VCPUs_at_startup or VCPUs_max parameter. I'd
- # say the former is the way to go but this needs
- # confirmation and testing.
- if num_cpus != int(self.vm_params['VCPUs_at_startup']):
- config_changes_hardware.append('num_cpus')
- # For now, we don't support hotpluging so VM has to be in
- # poweredoff state to reconfigure.
- need_poweredoff = True
-
- num_cpu_cores_per_socket = self.module.params['hardware'].get('num_cpu_cores_per_socket')
-
- if num_cpu_cores_per_socket is not None:
- # Kept for compatibility with older Ansible versions that
- # do not support subargument specs.
- try:
- num_cpu_cores_per_socket = int(num_cpu_cores_per_socket)
- except ValueError as e:
- self.module.fail_json(msg="VM check hardware.num_cpu_cores_per_socket: parameter should be an integer value!")
-
- if num_cpu_cores_per_socket < 1:
- self.module.fail_json(msg="VM check hardware.num_cpu_cores_per_socket: parameter should be greater than zero!")
-
- if num_cpus and num_cpus % num_cpu_cores_per_socket != 0:
- self.module.fail_json(msg="VM check hardware.num_cpus: parameter should be a multiple of hardware.num_cpu_cores_per_socket!")
-
- vm_platform = self.vm_params['platform']
- vm_cores_per_socket = int(vm_platform.get('cores-per-socket', 1))
-
- if num_cpu_cores_per_socket != vm_cores_per_socket:
- config_changes_hardware.append('num_cpu_cores_per_socket')
- # For now, we don't support hotpluging so VM has to be
- # in poweredoff state to reconfigure.
- need_poweredoff = True
-
- memory_mb = self.module.params['hardware'].get('memory_mb')
-
- if memory_mb is not None:
- # Kept for compatibility with older Ansible versions that
- # do not support subargument specs.
- try:
- memory_mb = int(memory_mb)
- except ValueError as e:
- self.module.fail_json(msg="VM check hardware.memory_mb: parameter should be an integer value!")
-
- if memory_mb < 1:
- self.module.fail_json(msg="VM check hardware.memory_mb: parameter should be greater than zero!")
-
- # There are multiple memory parameters:
- # - memory_dynamic_max
- # - memory_dynamic_min
- # - memory_static_max
- # - memory_static_min
- # - memory_target
- #
- # memory_target seems like a good candidate but it returns 0 for
- # halted VMs so we can't use it.
- #
- # I decided to use memory_dynamic_max and memory_static_max
- # and use whichever is larger. This strategy needs validation
- # and testing.
- #
- # XenServer stores memory size in bytes so we need to divide
- # it by 1024*1024 = 1048576.
- if memory_mb != int(max(int(self.vm_params['memory_dynamic_max']), int(self.vm_params['memory_static_max'])) / 1048576):
- config_changes_hardware.append('memory_mb')
- # For now, we don't support hotpluging so VM has to be in
- # poweredoff state to reconfigure.
- need_poweredoff = True
-
- if config_changes_hardware:
- config_changes.append({"hardware": config_changes_hardware})
-
- config_changes_disks = []
- config_new_disks = []
-
- # Find allowed userdevices.
- vbd_userdevices_allowed = self.xapi_session.xenapi.VM.get_allowed_VBD_devices(self.vm_ref)
-
- if self.module.params['disks']:
- # Get the list of all disk. Filter out any CDs found.
- vm_disk_params_list = [disk_params for disk_params in self.vm_params['VBDs'] if disk_params['type'] == "Disk"]
-
- # Number of disks defined in module params have to be same or
- # higher than a number of existing disks attached to the VM.
- # We don't support removal or detachment of disks.
- if len(self.module.params['disks']) < len(vm_disk_params_list):
- self.module.fail_json(msg="VM check disks: provided disks configuration has less disks than the target VM (%d < %d)!" %
- (len(self.module.params['disks']), len(vm_disk_params_list)))
-
- # Find the highest disk occupied userdevice.
- if not vm_disk_params_list:
- vm_disk_userdevice_highest = "-1"
- else:
- vm_disk_userdevice_highest = vm_disk_params_list[-1]['userdevice']
-
- for position in range(len(self.module.params['disks'])):
- if position < len(vm_disk_params_list):
- vm_disk_params = vm_disk_params_list[position]
- else:
- vm_disk_params = None
-
- disk_params = self.module.params['disks'][position]
-
- disk_size = self.get_normalized_disk_size(self.module.params['disks'][position], "VM check disks[%s]: " % position)
-
- disk_name = disk_params.get('name')
-
- if disk_name is not None and not disk_name:
- self.module.fail_json(msg="VM check disks[%s]: disk name cannot be an empty string!" % position)
-
- # If this is an existing disk.
- if vm_disk_params and vm_disk_params['VDI']:
- disk_changes = []
-
- if disk_name and disk_name != vm_disk_params['VDI']['name_label']:
- disk_changes.append('name')
-
- disk_name_desc = disk_params.get('name_desc')
-
- if disk_name_desc is not None and disk_name_desc != vm_disk_params['VDI']['name_description']:
- disk_changes.append('name_desc')
-
- if disk_size:
- if disk_size > int(vm_disk_params['VDI']['virtual_size']):
- disk_changes.append('size')
- need_poweredoff = True
- elif disk_size < int(vm_disk_params['VDI']['virtual_size']):
- self.module.fail_json(msg="VM check disks[%s]: disk size is smaller than existing (%d bytes < %s bytes). "
- "Reducing disk size is not allowed!" % (position, disk_size, vm_disk_params['VDI']['virtual_size']))
-
- config_changes_disks.append(disk_changes)
- # If this is a new disk.
- else:
- if not disk_size:
- self.module.fail_json(msg="VM check disks[%s]: no valid disk size specification found!" % position)
-
- disk_sr_uuid = disk_params.get('sr_uuid')
- disk_sr = disk_params.get('sr')
-
- if disk_sr_uuid is not None or disk_sr is not None:
- # Check existance only. Ignore return value.
- get_object_ref(self.module, disk_sr, disk_sr_uuid, obj_type="SR", fail=True,
- msg_prefix="VM check disks[%s]: " % position)
- elif self.default_sr_ref == 'OpaqueRef:NULL':
- self.module.fail_json(msg="VM check disks[%s]: no default SR found! You must specify SR explicitly." % position)
-
- if not vbd_userdevices_allowed:
- self.module.fail_json(msg="VM check disks[%s]: maximum number of devices reached!" % position)
-
- disk_userdevice = None
-
- # We need to place a new disk right above the highest
- # placed existing disk to maintain relative disk
- # positions pairable with disk specifications in
- # module params. That place must not be occupied by
- # some other device like CD-ROM.
- for userdevice in vbd_userdevices_allowed:
- if int(userdevice) > int(vm_disk_userdevice_highest):
- disk_userdevice = userdevice
- vbd_userdevices_allowed.remove(userdevice)
- vm_disk_userdevice_highest = userdevice
- break
-
- # If no place was found.
- if disk_userdevice is None:
- # Highest occupied place could be a CD-ROM device
- # so we have to include all devices regardless of
- # type when calculating out-of-bound position.
- disk_userdevice = str(int(self.vm_params['VBDs'][-1]['userdevice']) + 1)
- self.module.fail_json(msg="VM check disks[%s]: new disk position %s is out of bounds!" % (position, disk_userdevice))
-
- # For new disks we only track their position.
- config_new_disks.append((position, disk_userdevice))
-
- # We should append config_changes_disks to config_changes only
- # if there is at least one changed disk, else skip.
- for disk_change in config_changes_disks:
- if disk_change:
- config_changes.append({"disks_changed": config_changes_disks})
- break
-
- if config_new_disks:
- config_changes.append({"disks_new": config_new_disks})
-
- config_changes_cdrom = []
-
- if self.module.params['cdrom']:
- # Get the list of all CD-ROMs. Filter out any regular disks
- # found. If we found no existing CD-ROM, we will create it
- # later else take the first one found.
- vm_cdrom_params_list = [cdrom_params for cdrom_params in self.vm_params['VBDs'] if cdrom_params['type'] == "CD"]
-
- # If no existing CD-ROM is found, we will need to add one.
- # We need to check if there is any userdevice allowed.
- if not vm_cdrom_params_list and not vbd_userdevices_allowed:
- self.module.fail_json(msg="VM check cdrom: maximum number of devices reached!")
-
- cdrom_type = self.module.params['cdrom'].get('type')
- cdrom_iso_name = self.module.params['cdrom'].get('iso_name')
-
- # If cdrom.iso_name is specified but cdrom.type is not,
- # then set cdrom.type to 'iso', unless cdrom.iso_name is
- # an empty string, in that case set cdrom.type to 'none'.
- if not cdrom_type:
- if cdrom_iso_name:
- cdrom_type = "iso"
- elif cdrom_iso_name is not None:
- cdrom_type = "none"
-
- self.module.params['cdrom']['type'] = cdrom_type
-
- # If type changed.
- if cdrom_type and (not vm_cdrom_params_list or cdrom_type != self.get_cdrom_type(vm_cdrom_params_list[0])):
- config_changes_cdrom.append('type')
-
- if cdrom_type == "iso":
- # Check if ISO exists.
- # Check existance only. Ignore return value.
- get_object_ref(self.module, cdrom_iso_name, uuid=None, obj_type="ISO image", fail=True,
- msg_prefix="VM check cdrom.iso_name: ")
-
- # Is ISO image changed?
- if (cdrom_iso_name and
- (not vm_cdrom_params_list or
- not vm_cdrom_params_list[0]['VDI'] or
- cdrom_iso_name != vm_cdrom_params_list[0]['VDI']['name_label'])):
- config_changes_cdrom.append('iso_name')
-
- if config_changes_cdrom:
- config_changes.append({"cdrom": config_changes_cdrom})
-
- config_changes_networks = []
- config_new_networks = []
-
- # Find allowed devices.
- vif_devices_allowed = self.xapi_session.xenapi.VM.get_allowed_VIF_devices(self.vm_ref)
-
- if self.module.params['networks']:
- # Number of VIFs defined in module params have to be same or
- # higher than a number of existing VIFs attached to the VM.
- # We don't support removal of VIFs.
- if len(self.module.params['networks']) < len(self.vm_params['VIFs']):
- self.module.fail_json(msg="VM check networks: provided networks configuration has less interfaces than the target VM (%d < %d)!" %
- (len(self.module.params['networks']), len(self.vm_params['VIFs'])))
-
- # Find the highest occupied device.
- if not self.vm_params['VIFs']:
- vif_device_highest = "-1"
- else:
- vif_device_highest = self.vm_params['VIFs'][-1]['device']
-
- for position in range(len(self.module.params['networks'])):
- if position < len(self.vm_params['VIFs']):
- vm_vif_params = self.vm_params['VIFs'][position]
- else:
- vm_vif_params = None
-
- network_params = self.module.params['networks'][position]
-
- network_name = network_params.get('name')
-
- if network_name is not None and not network_name:
- self.module.fail_json(msg="VM check networks[%s]: network name cannot be an empty string!" % position)
-
- if network_name:
- # Check existance only. Ignore return value.
- get_object_ref(self.module, network_name, uuid=None, obj_type="network", fail=True,
- msg_prefix="VM check networks[%s]: " % position)
-
- network_mac = network_params.get('mac')
-
- if network_mac is not None:
- network_mac = network_mac.lower()
-
- if not is_mac(network_mac):
- self.module.fail_json(msg="VM check networks[%s]: specified MAC address '%s' is not valid!" % (position, network_mac))
-
- # IPv4 reconfiguration.
- network_type = network_params.get('type')
- network_ip = network_params.get('ip')
- network_netmask = network_params.get('netmask')
- network_prefix = None
-
- # If networks.ip is specified and networks.type is not,
- # then set networks.type to 'static'.
- if not network_type and network_ip:
- network_type = "static"
-
- # XenServer natively supports only 'none' and 'static'
- # type with 'none' being the same as 'dhcp'.
- if self.vm_params['customization_agent'] == "native" and network_type and network_type == "dhcp":
- network_type = "none"
-
- if network_type and network_type == "static":
- if network_ip is not None:
- network_ip_split = network_ip.split('/')
- network_ip = network_ip_split[0]
-
- if network_ip and not is_valid_ip_addr(network_ip):
- self.module.fail_json(msg="VM check networks[%s]: specified IPv4 address '%s' is not valid!" % (position, network_ip))
-
- if len(network_ip_split) > 1:
- network_prefix = network_ip_split[1]
-
- if not is_valid_ip_prefix(network_prefix):
- self.module.fail_json(msg="VM check networks[%s]: specified IPv4 prefix '%s' is not valid!" % (position, network_prefix))
-
- if network_netmask is not None:
- if not is_valid_ip_netmask(network_netmask):
- self.module.fail_json(msg="VM check networks[%s]: specified IPv4 netmask '%s' is not valid!" % (position, network_netmask))
-
- network_prefix = ip_netmask_to_prefix(network_netmask, skip_check=True)
- elif network_prefix is not None:
- network_netmask = ip_prefix_to_netmask(network_prefix, skip_check=True)
-
- # If any parameter is overridden at this point, update it.
- if network_type:
- network_params['type'] = network_type
-
- if network_ip:
- network_params['ip'] = network_ip
-
- if network_netmask:
- network_params['netmask'] = network_netmask
-
- if network_prefix:
- network_params['prefix'] = network_prefix
-
- network_gateway = network_params.get('gateway')
-
- # Gateway can be an empty string (when removing gateway
- # configuration) but if it is not, it should be validated.
- if network_gateway and not is_valid_ip_addr(network_gateway):
- self.module.fail_json(msg="VM check networks[%s]: specified IPv4 gateway '%s' is not valid!" % (position, network_gateway))
-
- # IPv6 reconfiguration.
- network_type6 = network_params.get('type6')
- network_ip6 = network_params.get('ip6')
- network_prefix6 = None
-
- # If networks.ip6 is specified and networks.type6 is not,
- # then set networks.type6 to 'static'.
- if not network_type6 and network_ip6:
- network_type6 = "static"
-
- # XenServer natively supports only 'none' and 'static'
- # type with 'none' being the same as 'dhcp'.
- if self.vm_params['customization_agent'] == "native" and network_type6 and network_type6 == "dhcp":
- network_type6 = "none"
-
- if network_type6 and network_type6 == "static":
- if network_ip6 is not None:
- network_ip6_split = network_ip6.split('/')
- network_ip6 = network_ip6_split[0]
-
- if network_ip6 and not is_valid_ip6_addr(network_ip6):
- self.module.fail_json(msg="VM check networks[%s]: specified IPv6 address '%s' is not valid!" % (position, network_ip6))
-
- if len(network_ip6_split) > 1:
- network_prefix6 = network_ip6_split[1]
-
- if not is_valid_ip6_prefix(network_prefix6):
- self.module.fail_json(msg="VM check networks[%s]: specified IPv6 prefix '%s' is not valid!" % (position, network_prefix6))
-
- # If any parameter is overridden at this point, update it.
- if network_type6:
- network_params['type6'] = network_type6
-
- if network_ip6:
- network_params['ip6'] = network_ip6
-
- if network_prefix6:
- network_params['prefix6'] = network_prefix6
-
- network_gateway6 = network_params.get('gateway6')
-
- # Gateway can be an empty string (when removing gateway
- # configuration) but if it is not, it should be validated.
- if network_gateway6 and not is_valid_ip6_addr(network_gateway6):
- self.module.fail_json(msg="VM check networks[%s]: specified IPv6 gateway '%s' is not valid!" % (position, network_gateway6))
-
- # If this is an existing VIF.
- if vm_vif_params and vm_vif_params['network']:
- network_changes = []
-
- if network_name and network_name != vm_vif_params['network']['name_label']:
- network_changes.append('name')
-
- if network_mac and network_mac != vm_vif_params['MAC'].lower():
- network_changes.append('mac')
-
- if self.vm_params['customization_agent'] == "native":
- if network_type and network_type != vm_vif_params['ipv4_configuration_mode'].lower():
- network_changes.append('type')
-
- if network_type and network_type == "static":
- if network_ip and (not vm_vif_params['ipv4_addresses'] or
- not vm_vif_params['ipv4_addresses'][0] or
- network_ip != vm_vif_params['ipv4_addresses'][0].split('/')[0]):
- network_changes.append('ip')
-
- if network_prefix and (not vm_vif_params['ipv4_addresses'] or
- not vm_vif_params['ipv4_addresses'][0] or
- network_prefix != vm_vif_params['ipv4_addresses'][0].split('/')[1]):
- network_changes.append('prefix')
- network_changes.append('netmask')
-
- if network_gateway is not None and network_gateway != vm_vif_params['ipv4_gateway']:
- network_changes.append('gateway')
-
- if network_type6 and network_type6 != vm_vif_params['ipv6_configuration_mode'].lower():
- network_changes.append('type6')
-
- if network_type6 and network_type6 == "static":
- if network_ip6 and (not vm_vif_params['ipv6_addresses'] or
- not vm_vif_params['ipv6_addresses'][0] or
- network_ip6 != vm_vif_params['ipv6_addresses'][0].split('/')[0]):
- network_changes.append('ip6')
-
- if network_prefix6 and (not vm_vif_params['ipv6_addresses'] or
- not vm_vif_params['ipv6_addresses'][0] or
- network_prefix6 != vm_vif_params['ipv6_addresses'][0].split('/')[1]):
- network_changes.append('prefix6')
-
- if network_gateway6 is not None and network_gateway6 != vm_vif_params['ipv6_gateway']:
- network_changes.append('gateway6')
-
- elif self.vm_params['customization_agent'] == "custom":
- vm_xenstore_data = self.vm_params['xenstore_data']
-
- if network_type and network_type != vm_xenstore_data.get('vm-data/networks/%s/type' % vm_vif_params['device'], "none"):
- network_changes.append('type')
- need_poweredoff = True
-
- if network_type and network_type == "static":
- if network_ip and network_ip != vm_xenstore_data.get('vm-data/networks/%s/ip' % vm_vif_params['device'], ""):
- network_changes.append('ip')
- need_poweredoff = True
-
- if network_prefix and network_prefix != vm_xenstore_data.get('vm-data/networks/%s/prefix' % vm_vif_params['device'], ""):
- network_changes.append('prefix')
- network_changes.append('netmask')
- need_poweredoff = True
-
- if network_gateway is not None and network_gateway != vm_xenstore_data.get('vm-data/networks/%s/gateway' %
- vm_vif_params['device'], ""):
- network_changes.append('gateway')
- need_poweredoff = True
-
- if network_type6 and network_type6 != vm_xenstore_data.get('vm-data/networks/%s/type6' % vm_vif_params['device'], "none"):
- network_changes.append('type6')
- need_poweredoff = True
-
- if network_type6 and network_type6 == "static":
- if network_ip6 and network_ip6 != vm_xenstore_data.get('vm-data/networks/%s/ip6' % vm_vif_params['device'], ""):
- network_changes.append('ip6')
- need_poweredoff = True
-
- if network_prefix6 and network_prefix6 != vm_xenstore_data.get('vm-data/networks/%s/prefix6' % vm_vif_params['device'], ""):
- network_changes.append('prefix6')
- need_poweredoff = True
-
- if network_gateway6 is not None and network_gateway6 != vm_xenstore_data.get('vm-data/networks/%s/gateway6' %
- vm_vif_params['device'], ""):
- network_changes.append('gateway6')
- need_poweredoff = True
-
- config_changes_networks.append(network_changes)
- # If this is a new VIF.
- else:
- if not network_name:
- self.module.fail_json(msg="VM check networks[%s]: network name is required for new network interface!" % position)
-
- if network_type and network_type == "static" and network_ip and not network_netmask:
- self.module.fail_json(msg="VM check networks[%s]: IPv4 netmask or prefix is required for new network interface!" % position)
-
- if network_type6 and network_type6 == "static" and network_ip6 and not network_prefix6:
- self.module.fail_json(msg="VM check networks[%s]: IPv6 prefix is required for new network interface!" % position)
-
- # Restart is needed if we are adding new network
- # interface with IP/gateway parameters specified
- # and custom agent is used.
- if self.vm_params['customization_agent'] == "custom":
- for parameter in ['type', 'ip', 'prefix', 'gateway', 'type6', 'ip6', 'prefix6', 'gateway6']:
- if network_params.get(parameter):
- need_poweredoff = True
- break
-
- if not vif_devices_allowed:
- self.module.fail_json(msg="VM check networks[%s]: maximum number of network interfaces reached!" % position)
-
- # We need to place a new network interface right above the
- # highest placed existing interface to maintain relative
- # positions pairable with network interface specifications
- # in module params.
- vif_device = str(int(vif_device_highest) + 1)
-
- if vif_device not in vif_devices_allowed:
- self.module.fail_json(msg="VM check networks[%s]: new network interface position %s is out of bounds!" % (position, vif_device))
-
- vif_devices_allowed.remove(vif_device)
- vif_device_highest = vif_device
-
- # For new VIFs we only track their position.
- config_new_networks.append((position, vif_device))
-
- # We should append config_changes_networks to config_changes only
- # if there is at least one changed network, else skip.
- for network_change in config_changes_networks:
- if network_change:
- config_changes.append({"networks_changed": config_changes_networks})
- break
-
- if config_new_networks:
- config_changes.append({"networks_new": config_new_networks})
-
- config_changes_custom_params = []
-
- if self.module.params['custom_params']:
- for position in range(len(self.module.params['custom_params'])):
- custom_param = self.module.params['custom_params'][position]
-
- custom_param_key = custom_param['key']
- custom_param_value = custom_param['value']
-
- if custom_param_key not in self.vm_params:
- self.module.fail_json(msg="VM check custom_params[%s]: unknown VM param '%s'!" % (position, custom_param_key))
-
- if custom_param_value != self.vm_params[custom_param_key]:
- # We only need to track custom param position.
- config_changes_custom_params.append(position)
-
- if config_changes_custom_params:
- config_changes.append({"custom_params": config_changes_custom_params})
-
- if need_poweredoff:
- config_changes.append('need_poweredoff')
-
- return config_changes
-
- except XenAPI.Failure as f:
- self.module.fail_json(msg="XAPI ERROR: %s" % f.details)
-
- def get_normalized_disk_size(self, disk_params, msg_prefix=""):
- """Parses disk size parameters and returns disk size in bytes.
-
- This method tries to parse disk size module parameters. It fails
- with an error message if size cannot be parsed.
-
- Args:
- disk_params (dist): A dictionary with disk parameters.
- msg_prefix (str): A string error messages should be prefixed
- with (default: "").
-
- Returns:
- int: disk size in bytes if disk size is successfully parsed or
- None if no disk size parameters were found.
- """
- # There should be only single size spec but we make a list of all size
- # specs just in case. Priority is given to 'size' but if not found, we
- # check for 'size_tb', 'size_gb', 'size_mb' etc. and use first one
- # found.
- disk_size_spec = [x for x in disk_params.keys() if disk_params[x] is not None and (x.startswith('size_') or x == 'size')]
-
- if disk_size_spec:
- try:
- # size
- if "size" in disk_size_spec:
- size_regex = re.compile(r'(\d+(?:\.\d+)?)\s*(.*)')
- disk_size_m = size_regex.match(disk_params['size'])
-
- if disk_size_m:
- size = disk_size_m.group(1)
- unit = disk_size_m.group(2)
- else:
- raise ValueError
- # size_tb, size_gb, size_mb, size_kb, size_b
- else:
- size = disk_params[disk_size_spec[0]]
- unit = disk_size_spec[0].split('_')[-1]
-
- if not unit:
- unit = "b"
- else:
- unit = unit.lower()
-
- if re.match(r'\d+\.\d+', size):
- # We found float value in string, let's typecast it.
- if unit == "b":
- # If we found float but unit is bytes, we get the integer part only.
- size = int(float(size))
- else:
- size = float(size)
- else:
- # We found int value in string, let's typecast it.
- size = int(size)
-
- if not size or size < 0:
- raise ValueError
-
- except (TypeError, ValueError, NameError):
- # Common failure
- self.module.fail_json(msg="%sfailed to parse disk size! Please review value provided using documentation." % msg_prefix)
-
- disk_units = dict(tb=4, gb=3, mb=2, kb=1, b=0)
-
- if unit in disk_units:
- return int(size * (1024 ** disk_units[unit]))
- else:
- self.module.fail_json(msg="%s'%s' is not a supported unit for disk size! Supported units are ['%s']." %
- (msg_prefix, unit, "', '".join(sorted(disk_units.keys(), key=lambda key: disk_units[key]))))
- else:
- return None
-
- @staticmethod
- def get_cdrom_type(vm_cdrom_params):
- """Returns VM CD-ROM type."""
- # TODO: implement support for detecting type host. No server to test
- # this on at the moment.
- if vm_cdrom_params['empty']:
- return "none"
- else:
- return "iso"
-
-
-def main():
- argument_spec = xenserver_common_argument_spec()
- argument_spec.update(
- state=dict(type='str', default='present',
- choices=['present', 'absent', 'poweredon']),
- name=dict(type='str', aliases=['name_label']),
- name_desc=dict(type='str'),
- uuid=dict(type='str'),
- template=dict(type='str', aliases=['template_src']),
- template_uuid=dict(type='str'),
- is_template=dict(type='bool', default=False),
- folder=dict(type='str'),
- hardware=dict(
- type='dict',
- options=dict(
- num_cpus=dict(type='int'),
- num_cpu_cores_per_socket=dict(type='int'),
- memory_mb=dict(type='int'),
- ),
- ),
- disks=dict(
- type='list',
- elements='dict',
- options=dict(
- size=dict(type='str'),
- size_tb=dict(type='str'),
- size_gb=dict(type='str'),
- size_mb=dict(type='str'),
- size_kb=dict(type='str'),
- size_b=dict(type='str'),
- name=dict(type='str', aliases=['name_label']),
- name_desc=dict(type='str'),
- sr=dict(type='str'),
- sr_uuid=dict(type='str'),
- ),
- aliases=['disk'],
- mutually_exclusive=[
- ['size', 'size_tb', 'size_gb', 'size_mb', 'size_kb', 'size_b'],
- ['sr', 'sr_uuid'],
- ],
- ),
- cdrom=dict(
- type='dict',
- options=dict(
- type=dict(type='str', choices=['none', 'iso']),
- iso_name=dict(type='str'),
- ),
- required_if=[
- ['type', 'iso', ['iso_name']],
- ],
- ),
- networks=dict(
- type='list',
- elements='dict',
- options=dict(
- name=dict(type='str', aliases=['name_label']),
- mac=dict(type='str'),
- type=dict(type='str', choices=['none', 'dhcp', 'static']),
- ip=dict(type='str'),
- netmask=dict(type='str'),
- gateway=dict(type='str'),
- type6=dict(type='str', choices=['none', 'dhcp', 'static']),
- ip6=dict(type='str'),
- gateway6=dict(type='str'),
- ),
- aliases=['network'],
- required_if=[
- ['type', 'static', ['ip']],
- ['type6', 'static', ['ip6']],
- ],
- ),
- home_server=dict(type='str'),
- custom_params=dict(
- type='list',
- elements='dict',
- options=dict(
- key=dict(type='str', required=True, no_log=False),
- value=dict(type='raw', required=True),
- ),
- ),
- wait_for_ip_address=dict(type='bool', default=False),
- state_change_timeout=dict(type='int', default=0),
- linked_clone=dict(type='bool', default=False),
- force=dict(type='bool', default=False),
- )
-
- module = AnsibleModule(argument_spec=argument_spec,
- supports_check_mode=True,
- required_one_of=[
- ['name', 'uuid'],
- ],
- mutually_exclusive=[
- ['template', 'template_uuid'],
- ],
- )
-
- result = {'failed': False, 'changed': False}
-
- vm = XenServerVM(module)
-
- # Find existing VM
- if vm.exists():
- if module.params['state'] == "absent":
- vm.destroy()
- result['changed'] = True
- elif module.params['state'] == "present":
- config_changes = vm.reconfigure()
-
- if config_changes:
- result['changed'] = True
-
- # Make new disk and network changes more user friendly
- # and informative.
- for change in config_changes:
- if isinstance(change, dict):
- if change.get('disks_new'):
- disks_new = []
-
- for position, userdevice in change['disks_new']:
- disk_new_params = {"position": position, "vbd_userdevice": userdevice}
- disk_params = module.params['disks'][position]
-
- for k in disk_params.keys():
- if disk_params[k] is not None:
- disk_new_params[k] = disk_params[k]
-
- disks_new.append(disk_new_params)
-
- if disks_new:
- change['disks_new'] = disks_new
-
- elif change.get('networks_new'):
- networks_new = []
-
- for position, device in change['networks_new']:
- network_new_params = {"position": position, "vif_device": device}
- network_params = module.params['networks'][position]
-
- for k in network_params.keys():
- if network_params[k] is not None:
- network_new_params[k] = network_params[k]
-
- networks_new.append(network_new_params)
-
- if networks_new:
- change['networks_new'] = networks_new
-
- result['changes'] = config_changes
-
- elif module.params['state'] in ["poweredon", "poweredoff", "restarted", "shutdownguest", "rebootguest", "suspended"]:
- result['changed'] = vm.set_power_state(module.params['state'])
- elif module.params['state'] != "absent":
- vm.deploy()
- result['changed'] = True
-
- if module.params['wait_for_ip_address'] and module.params['state'] != "absent":
- vm.wait_for_ip_address()
-
- result['instance'] = vm.gather_facts()
-
- if result['failed']:
- module.fail_json(**result)
- else:
- module.exit_json(**result)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/xenserver/xenserver_guest_info.py b/ansible_collections/community/general/plugins/modules/cloud/xenserver/xenserver_guest_info.py
deleted file mode 100644
index a2e77725..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/xenserver/xenserver_guest_info.py
+++ /dev/null
@@ -1,222 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright: (c) 2018, Bojan Vitnik
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-DOCUMENTATION = r'''
----
-module: xenserver_guest_info
-short_description: Gathers information for virtual machines running on Citrix Hypervisor/XenServer host or pool
-description: >
- This module can be used to gather essential VM facts.
-author:
-- Bojan Vitnik (@bvitnik)
-notes:
-- Minimal supported version of XenServer is 5.6.
-- Module was tested with XenServer 6.5, 7.1, 7.2, 7.6, Citrix Hypervisor 8.0, XCP-ng 7.6 and 8.0.
-- 'To acquire XenAPI Python library, just run C(pip install XenAPI) on your Ansible Control Node. The library can also be found inside
- Citrix Hypervisor/XenServer SDK (downloadable from Citrix website). Copy the XenAPI.py file from the SDK to your Python site-packages on your
- Ansible Control Node to use it. Latest version of the library can also be acquired from GitHub:
- U(https://raw.githubusercontent.com/xapi-project/xen-api/master/scripts/examples/python/XenAPI/XenAPI.py)'
-- 'If no scheme is specified in C(hostname), module defaults to C(http://) because C(https://) is problematic in most setups. Make sure you are
- accessing XenServer host in trusted environment or use C(https://) scheme explicitly.'
-- 'To use C(https://) scheme for C(hostname) you have to either import host certificate to your OS certificate store or use C(validate_certs: no)
- which requires XenAPI library from XenServer 7.2 SDK or newer and Python 2.7.9 or newer.'
-- This module was called C(xenserver_guest_facts) before Ansible 2.9. The usage did not change.
-requirements:
-- python >= 2.6
-- XenAPI
-options:
- name:
- description:
- - Name of the VM to gather facts from.
- - VMs running on XenServer do not necessarily have unique names. The module will fail if multiple VMs with same name are found.
- - In case of multiple VMs with same name, use C(uuid) to uniquely specify VM to manage.
- - This parameter is case sensitive.
- type: str
- aliases: [ name_label ]
- uuid:
- description:
- - UUID of the VM to gather fact of. This is XenServer's unique identifier.
- - It is required if name is not unique.
- type: str
-extends_documentation_fragment:
-- community.general.xenserver.documentation
-
-'''
-
-EXAMPLES = r'''
-- name: Gather facts
- community.general.xenserver_guest_info:
- hostname: "{{ xenserver_hostname }}"
- username: "{{ xenserver_username }}"
- password: "{{ xenserver_password }}"
- name: testvm_11
- delegate_to: localhost
- register: facts
-'''
-
-RETURN = r'''
-instance:
- description: Metadata about the VM
- returned: always
- type: dict
- sample: {
- "cdrom": {
- "type": "none"
- },
- "customization_agent": "native",
- "disks": [
- {
- "name": "testvm_11-0",
- "name_desc": "",
- "os_device": "xvda",
- "size": 42949672960,
- "sr": "Local storage",
- "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075",
- "vbd_userdevice": "0"
- },
- {
- "name": "testvm_11-1",
- "name_desc": "",
- "os_device": "xvdb",
- "size": 42949672960,
- "sr": "Local storage",
- "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075",
- "vbd_userdevice": "1"
- }
- ],
- "domid": "56",
- "folder": "",
- "hardware": {
- "memory_mb": 8192,
- "num_cpu_cores_per_socket": 2,
- "num_cpus": 4
- },
- "home_server": "",
- "is_template": false,
- "name": "testvm_11",
- "name_desc": "",
- "networks": [
- {
- "gateway": "192.168.0.254",
- "gateway6": "fc00::fffe",
- "ip": "192.168.0.200",
- "ip6": [
- "fe80:0000:0000:0000:e9cb:625a:32c5:c291",
- "fc00:0000:0000:0000:0000:0000:0000:0001"
- ],
- "mac": "ba:91:3a:48:20:76",
- "mtu": "1500",
- "name": "Pool-wide network associated with eth1",
- "netmask": "255.255.255.128",
- "prefix": "25",
- "prefix6": "64",
- "vif_device": "0"
- }
- ],
- "other_config": {
- "base_template_name": "Windows Server 2016 (64-bit)",
- "import_task": "OpaqueRef:e43eb71c-45d6-5351-09ff-96e4fb7d0fa5",
- "install-methods": "cdrom",
- "instant": "true",
- "mac_seed": "f83e8d8a-cfdc-b105-b054-ef5cb416b77e"
- },
- "platform": {
- "acpi": "1",
- "apic": "true",
- "cores-per-socket": "2",
- "device_id": "0002",
- "hpet": "true",
- "nx": "true",
- "pae": "true",
- "timeoffset": "-25200",
- "vga": "std",
- "videoram": "8",
- "viridian": "true",
- "viridian_reference_tsc": "true",
- "viridian_time_ref_count": "true"
- },
- "state": "poweredon",
- "uuid": "e3c0b2d5-5f05-424e-479c-d3df8b3e7cda",
- "xenstore_data": {
- "vm-data": ""
- }
- }
-'''
-
-HAS_XENAPI = False
-try:
- import XenAPI
- HAS_XENAPI = True
-except ImportError:
- pass
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.xenserver import (xenserver_common_argument_spec, XAPI, XenServerObject, get_object_ref,
- gather_vm_params, gather_vm_facts)
-
-
-class XenServerVM(XenServerObject):
- """Class for managing XenServer VM.
-
- Attributes:
- vm_ref (str): XAPI reference to VM.
- vm_params (dict): A dictionary with VM parameters as returned
- by gather_vm_params() function.
- """
-
- def __init__(self, module):
- """Inits XenServerVM using module parameters.
-
- Args:
- module: Reference to AnsibleModule object.
- """
- super(XenServerVM, self).__init__(module)
-
- self.vm_ref = get_object_ref(self.module, self.module.params['name'], self.module.params['uuid'], obj_type="VM", fail=True, msg_prefix="VM search: ")
- self.gather_params()
-
- def gather_params(self):
- """Gathers all VM parameters available in XAPI database."""
- self.vm_params = gather_vm_params(self.module, self.vm_ref)
-
- def gather_facts(self):
- """Gathers and returns VM facts."""
- return gather_vm_facts(self.module, self.vm_params)
-
-
-def main():
- argument_spec = xenserver_common_argument_spec()
- argument_spec.update(
- name=dict(type='str', aliases=['name_label']),
- uuid=dict(type='str'),
- )
-
- module = AnsibleModule(argument_spec=argument_spec,
- supports_check_mode=True,
- required_one_of=[
- ['name', 'uuid'],
- ],
- )
-
- result = {'failed': False, 'changed': False}
-
- # Module will exit with an error message if no VM is found.
- vm = XenServerVM(module)
-
- # Gather facts.
- result['instance'] = vm.gather_facts()
-
- if result['failed']:
- module.fail_json(**result)
- else:
- module.exit_json(**result)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud/xenserver/xenserver_guest_powerstate.py b/ansible_collections/community/general/plugins/modules/cloud/xenserver/xenserver_guest_powerstate.py
deleted file mode 100644
index 4a195ff5..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud/xenserver/xenserver_guest_powerstate.py
+++ /dev/null
@@ -1,270 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright: (c) 2018, Bojan Vitnik
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-DOCUMENTATION = r'''
----
-module: xenserver_guest_powerstate
-short_description: Manages power states of virtual machines running on Citrix Hypervisor/XenServer host or pool
-description: >
- This module can be used to power on, power off, restart or suspend virtual machine and gracefully reboot or shutdown guest OS of virtual machine.
-author:
-- Bojan Vitnik (@bvitnik)
-notes:
-- Minimal supported version of XenServer is 5.6.
-- Module was tested with XenServer 6.5, 7.1, 7.2, 7.6, Citrix Hypervisor 8.0, XCP-ng 7.6 and 8.0.
-- 'To acquire XenAPI Python library, just run C(pip install XenAPI) on your Ansible Control Node. The library can also be found inside
- Citrix Hypervisor/XenServer SDK (downloadable from Citrix website). Copy the XenAPI.py file from the SDK to your Python site-packages on your
- Ansible Control Node to use it. Latest version of the library can also be acquired from GitHub:
- U(https://raw.githubusercontent.com/xapi-project/xen-api/master/scripts/examples/python/XenAPI/XenAPI.py)'
-- 'If no scheme is specified in C(hostname), module defaults to C(http://) because C(https://) is problematic in most setups. Make sure you are
- accessing XenServer host in trusted environment or use C(https://) scheme explicitly.'
-- 'To use C(https://) scheme for C(hostname) you have to either import host certificate to your OS certificate store or use C(validate_certs: no)
- which requires XenAPI library from XenServer 7.2 SDK or newer and Python 2.7.9 or newer.'
-requirements:
-- python >= 2.6
-- XenAPI
-options:
- state:
- description:
- - Specify the state VM should be in.
- - If C(state) is set to value other than C(present), then VM is transitioned into required state and facts are returned.
- - If C(state) is set to C(present), then VM is just checked for existence and facts are returned.
- type: str
- default: present
- choices: [ powered-on, powered-off, restarted, shutdown-guest, reboot-guest, suspended, present ]
- name:
- description:
- - Name of the VM to manage.
- - VMs running on XenServer do not necessarily have unique names. The module will fail if multiple VMs with same name are found.
- - In case of multiple VMs with same name, use C(uuid) to uniquely specify VM to manage.
- - This parameter is case sensitive.
- type: str
- aliases: [ name_label ]
- uuid:
- description:
- - UUID of the VM to manage if known. This is XenServer's unique identifier.
- - It is required if name is not unique.
- type: str
- wait_for_ip_address:
- description:
- - Wait until XenServer detects an IP address for the VM.
- - This requires XenServer Tools to be preinstalled on the VM to work properly.
- type: bool
- default: no
- state_change_timeout:
- description:
- - 'By default, module will wait indefinitely for VM to change state or acquire an IP address if C(wait_for_ip_address: yes).'
- - If this parameter is set to positive value, the module will instead wait specified number of seconds for the state change.
- - In case of timeout, module will generate an error message.
- type: int
- default: 0
-extends_documentation_fragment:
-- community.general.xenserver.documentation
-
-'''
-
-EXAMPLES = r'''
-- name: Power on VM
- community.general.xenserver_guest_powerstate:
- hostname: "{{ xenserver_hostname }}"
- username: "{{ xenserver_username }}"
- password: "{{ xenserver_password }}"
- name: testvm_11
- state: powered-on
- delegate_to: localhost
- register: facts
-'''
-
-RETURN = r'''
-instance:
- description: Metadata about the VM
- returned: always
- type: dict
- sample: {
- "cdrom": {
- "type": "none"
- },
- "customization_agent": "native",
- "disks": [
- {
- "name": "windows-template-testing-0",
- "name_desc": "",
- "os_device": "xvda",
- "size": 42949672960,
- "sr": "Local storage",
- "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075",
- "vbd_userdevice": "0"
- },
- {
- "name": "windows-template-testing-1",
- "name_desc": "",
- "os_device": "xvdb",
- "size": 42949672960,
- "sr": "Local storage",
- "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075",
- "vbd_userdevice": "1"
- }
- ],
- "domid": "56",
- "folder": "",
- "hardware": {
- "memory_mb": 8192,
- "num_cpu_cores_per_socket": 2,
- "num_cpus": 4
- },
- "home_server": "",
- "is_template": false,
- "name": "windows-template-testing",
- "name_desc": "",
- "networks": [
- {
- "gateway": "192.168.0.254",
- "gateway6": "fc00::fffe",
- "ip": "192.168.0.200",
- "ip6": [
- "fe80:0000:0000:0000:e9cb:625a:32c5:c291",
- "fc00:0000:0000:0000:0000:0000:0000:0001"
- ],
- "mac": "ba:91:3a:48:20:76",
- "mtu": "1500",
- "name": "Pool-wide network associated with eth1",
- "netmask": "255.255.255.128",
- "prefix": "25",
- "prefix6": "64",
- "vif_device": "0"
- }
- ],
- "other_config": {
- "base_template_name": "Windows Server 2016 (64-bit)",
- "import_task": "OpaqueRef:e43eb71c-45d6-5351-09ff-96e4fb7d0fa5",
- "install-methods": "cdrom",
- "instant": "true",
- "mac_seed": "f83e8d8a-cfdc-b105-b054-ef5cb416b77e"
- },
- "platform": {
- "acpi": "1",
- "apic": "true",
- "cores-per-socket": "2",
- "device_id": "0002",
- "hpet": "true",
- "nx": "true",
- "pae": "true",
- "timeoffset": "-25200",
- "vga": "std",
- "videoram": "8",
- "viridian": "true",
- "viridian_reference_tsc": "true",
- "viridian_time_ref_count": "true"
- },
- "state": "poweredon",
- "uuid": "e3c0b2d5-5f05-424e-479c-d3df8b3e7cda",
- "xenstore_data": {
- "vm-data": ""
- }
- }
-'''
-
-import re
-
-HAS_XENAPI = False
-try:
- import XenAPI
- HAS_XENAPI = True
-except ImportError:
- pass
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.xenserver import (xenserver_common_argument_spec, XAPI, XenServerObject, get_object_ref,
- gather_vm_params, gather_vm_facts, set_vm_power_state,
- wait_for_vm_ip_address)
-
-
-class XenServerVM(XenServerObject):
- """Class for managing XenServer VM.
-
- Attributes:
- vm_ref (str): XAPI reference to VM.
- vm_params (dict): A dictionary with VM parameters as returned
- by gather_vm_params() function.
- """
-
- def __init__(self, module):
- """Inits XenServerVM using module parameters.
-
- Args:
- module: Reference to Ansible module object.
- """
- super(XenServerVM, self).__init__(module)
-
- self.vm_ref = get_object_ref(self.module, self.module.params['name'], self.module.params['uuid'], obj_type="VM", fail=True, msg_prefix="VM search: ")
- self.gather_params()
-
- def gather_params(self):
- """Gathers all VM parameters available in XAPI database."""
- self.vm_params = gather_vm_params(self.module, self.vm_ref)
-
- def gather_facts(self):
- """Gathers and returns VM facts."""
- return gather_vm_facts(self.module, self.vm_params)
-
- def set_power_state(self, power_state):
- """Controls VM power state."""
- state_changed, current_state = set_vm_power_state(self.module, self.vm_ref, power_state, self.module.params['state_change_timeout'])
-
- # If state has changed, update vm_params.
- if state_changed:
- self.vm_params['power_state'] = current_state.capitalize()
-
- return state_changed
-
- def wait_for_ip_address(self):
- """Waits for VM to acquire an IP address."""
- self.vm_params['guest_metrics'] = wait_for_vm_ip_address(self.module, self.vm_ref, self.module.params['state_change_timeout'])
-
-
-def main():
- argument_spec = xenserver_common_argument_spec()
- argument_spec.update(
- state=dict(type='str', default='present',
- choices=['powered-on', 'powered-off', 'restarted', 'shutdown-guest', 'reboot-guest', 'suspended', 'present']),
- name=dict(type='str', aliases=['name_label']),
- uuid=dict(type='str'),
- wait_for_ip_address=dict(type='bool', default=False),
- state_change_timeout=dict(type='int', default=0),
- )
-
- module = AnsibleModule(argument_spec=argument_spec,
- supports_check_mode=True,
- required_one_of=[
- ['name', 'uuid'],
- ],
- )
-
- result = {'failed': False, 'changed': False}
-
- # Module will exit with an error message if no VM is found.
- vm = XenServerVM(module)
-
- # Set VM power state.
- if module.params['state'] != "present":
- result['changed'] = vm.set_power_state(module.params['state'])
-
- if module.params['wait_for_ip_address']:
- vm.wait_for_ip_address()
-
- result['instance'] = vm.gather_facts()
-
- if result['failed']:
- module.fail_json(**result)
- else:
- module.exit_json(**result)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud_init_data_facts.py b/ansible_collections/community/general/plugins/modules/cloud_init_data_facts.py
deleted file mode 120000
index bbdeea16..00000000
--- a/ansible_collections/community/general/plugins/modules/cloud_init_data_facts.py
+++ /dev/null
@@ -1 +0,0 @@
-cloud/misc/cloud_init_data_facts.py
\ No newline at end of file
diff --git a/ansible_collections/community/general/plugins/modules/cloudflare_dns.py b/ansible_collections/community/general/plugins/modules/cloudflare_dns.py
deleted file mode 120000
index bc779a4e..00000000
--- a/ansible_collections/community/general/plugins/modules/cloudflare_dns.py
+++ /dev/null
@@ -1 +0,0 @@
-net_tools/cloudflare_dns.py
\ No newline at end of file
diff --git a/ansible_collections/community/general/plugins/modules/clustering/consul/consul.py b/ansible_collections/community/general/plugins/modules/clustering/consul/consul.py
deleted file mode 100644
index 9dc1a771..00000000
--- a/ansible_collections/community/general/plugins/modules/clustering/consul/consul.py
+++ /dev/null
@@ -1,606 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# (c) 2015, Steve Gargan
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
-module: consul
-short_description: "Add, modify & delete services within a consul cluster."
-description:
- - Registers services and checks for an agent with a consul cluster.
- A service is some process running on the agent node that should be advertised by
- consul's discovery mechanism. It may optionally supply a check definition,
- a periodic service test to notify the consul cluster of service's health.
- - "Checks may also be registered per node e.g. disk usage, or cpu usage and
- notify the health of the entire node to the cluster.
- Service level checks do not require a check name or id as these are derived
- by Consul from the Service name and id respectively by appending 'service:'
- Node level checks require a I(check_name) and optionally a I(check_id)."
- - Currently, there is no complete way to retrieve the script, interval or ttl
- metadata for a registered check. Without this metadata it is not possible to
- tell if the data supplied with ansible represents a change to a check. As a
- result this does not attempt to determine changes and will always report a
- changed occurred. An API method is planned to supply this metadata so at that
- stage change management will be added.
- - "See U(http://consul.io) for more details."
-requirements:
- - python-consul
- - requests
-author: "Steve Gargan (@sgargan)"
-options:
- state:
- type: str
- description:
- - register or deregister the consul service, defaults to present
- default: present
- choices: ['present', 'absent']
- service_name:
- type: str
- description:
- - Unique name for the service on a node, must be unique per node,
- required if registering a service. May be omitted if registering
- a node level check
- service_id:
- type: str
- description:
- - the ID for the service, must be unique per node. If I(state=absent),
- defaults to the service name if supplied.
- host:
- type: str
- description:
- - host of the consul agent defaults to localhost
- default: localhost
- port:
- type: int
- description:
- - the port on which the consul agent is running
- default: 8500
- scheme:
- type: str
- description:
- - the protocol scheme on which the consul agent is running
- default: http
- validate_certs:
- description:
- - whether to verify the TLS certificate of the consul agent
- type: bool
- default: 'yes'
- notes:
- type: str
- description:
- - Notes to attach to check when registering it.
- service_port:
- type: int
- description:
- - the port on which the service is listening. Can optionally be supplied for
- registration of a service, i.e. if I(service_name) or I(service_id) is set
- service_address:
- type: str
- description:
- - the address to advertise that the service will be listening on.
- This value will be passed as the I(address) parameter to Consul's
- C(/v1/agent/service/register) API method, so refer to the Consul API
- documentation for further details.
- tags:
- type: list
- elements: str
- description:
- - tags that will be attached to the service registration.
- script:
- type: str
- description:
- - the script/command that will be run periodically to check the health
- of the service. Scripts require I(interval) and vice versa.
- interval:
- type: str
- description:
- - the interval at which the service check will be run. This is a number
- with a s or m suffix to signify the units of seconds or minutes e.g
- C(15s) or C(1m). If no suffix is supplied, m will be used by default e.g.
- C(1) will be C(1m). Required if the I(script) parameter is specified.
- check_id:
- type: str
- description:
- - an ID for the service check. If I(state=absent), defaults to
- I(check_name). Ignored if part of a service definition.
- check_name:
- type: str
- description:
- - a name for the service check. Required if standalone, ignored if
- part of service definition.
- ttl:
- type: str
- description:
- - checks can be registered with a ttl instead of a I(script) and I(interval)
- this means that the service will check in with the agent before the
- ttl expires. If it doesn't the check will be considered failed.
- Required if registering a check and the script an interval are missing
- Similar to the interval this is a number with a s or m suffix to
- signify the units of seconds or minutes e.g C(15s) or C(1m). If no suffix
- is supplied, C(m) will be used by default e.g. C(1) will be C(1m)
- tcp:
- type: str
- description:
- - Checks can be registered with a TCP port. This means that consul
- will check if the connection attempt to that port is successful (that is, the port is currently accepting connections).
- The format is C(host:port), for example C(localhost:80).
- I(interval) must also be provided with this option.
- version_added: '1.3.0'
- http:
- type: str
- description:
- - checks can be registered with an HTTP endpoint. This means that consul
- will check that the http endpoint returns a successful HTTP status.
- I(interval) must also be provided with this option.
- timeout:
- type: str
- description:
- - A custom HTTP check timeout. The consul default is 10 seconds.
- Similar to the interval this is a number with a C(s) or C(m) suffix to
- signify the units of seconds or minutes, e.g. C(15s) or C(1m).
- token:
- type: str
- description:
- - the token key identifying an ACL rule set. May be required to register services.
-'''
-
-EXAMPLES = '''
-- name: Register nginx service with the local consul agent
- community.general.consul:
- service_name: nginx
- service_port: 80
-
-- name: Register nginx service with curl check
- community.general.consul:
- service_name: nginx
- service_port: 80
- script: curl http://localhost
- interval: 60s
-
-- name: register nginx with a tcp check
- community.general.consul:
- service_name: nginx
- service_port: 80
- interval: 60s
- tcp: localhost:80
-
-- name: Register nginx with an http check
- community.general.consul:
- service_name: nginx
- service_port: 80
- interval: 60s
- http: http://localhost:80/status
-
-- name: Register external service nginx available at 10.1.5.23
- community.general.consul:
- service_name: nginx
- service_port: 80
- service_address: 10.1.5.23
-
-- name: Register nginx with some service tags
- community.general.consul:
- service_name: nginx
- service_port: 80
- tags:
- - prod
- - webservers
-
-- name: Remove nginx service
- community.general.consul:
- service_name: nginx
- state: absent
-
-- name: Register celery worker service
- community.general.consul:
- service_name: celery-worker
- tags:
- - prod
- - worker
-
-- name: Create a node level check to test disk usage
- community.general.consul:
- check_name: Disk usage
- check_id: disk_usage
- script: /opt/disk_usage.py
- interval: 5m
-
-- name: Register an http check against a service that's already registered
- community.general.consul:
- check_name: nginx-check2
- check_id: nginx-check2
- service_id: nginx
- interval: 60s
- http: http://localhost:80/morestatus
-'''
-
-try:
- import consul
- from requests.exceptions import ConnectionError
-
- class PatchedConsulAgentService(consul.Consul.Agent.Service):
- def deregister(self, service_id, token=None):
- params = {}
- if token:
- params['token'] = token
- return self.agent.http.put(consul.base.CB.bool(),
- '/v1/agent/service/deregister/%s' % service_id,
- params=params)
-
- python_consul_installed = True
-except ImportError:
- python_consul_installed = False
-
-import re
-from ansible.module_utils.basic import AnsibleModule
-
-
-def register_with_consul(module):
- state = module.params.get('state')
-
- if state == 'present':
- add(module)
- else:
- remove(module)
-
-
-def add(module):
- ''' adds a service or a check depending on supplied configuration'''
- check = parse_check(module)
- service = parse_service(module)
-
- if not service and not check:
- module.fail_json(msg='a name and port are required to register a service')
-
- if service:
- if check:
- service.add_check(check)
- add_service(module, service)
- elif check:
- add_check(module, check)
-
-
-def remove(module):
- ''' removes a service or a check '''
- service_id = module.params.get('service_id') or module.params.get('service_name')
- check_id = module.params.get('check_id') or module.params.get('check_name')
- if not (service_id or check_id):
- module.fail_json(msg='services and checks are removed by id or name. please supply a service id/name or a check id/name')
- if service_id:
- remove_service(module, service_id)
- else:
- remove_check(module, check_id)
-
-
-def add_check(module, check):
- ''' registers a check with the given agent. currently there is no way
- retrieve the full metadata of an existing check through the consul api.
- Without this we can't compare to the supplied check and so we must assume
- a change. '''
- if not check.name and not check.service_id:
- module.fail_json(msg='a check name is required for a node level check, one not attached to a service')
-
- consul_api = get_consul_api(module)
- check.register(consul_api)
-
- module.exit_json(changed=True,
- check_id=check.check_id,
- check_name=check.name,
- script=check.script,
- interval=check.interval,
- ttl=check.ttl,
- tcp=check.tcp,
- http=check.http,
- timeout=check.timeout,
- service_id=check.service_id)
-
-
-def remove_check(module, check_id):
- ''' removes a check using its id '''
- consul_api = get_consul_api(module)
-
- if check_id in consul_api.agent.checks():
- consul_api.agent.check.deregister(check_id)
- module.exit_json(changed=True, id=check_id)
-
- module.exit_json(changed=False, id=check_id)
-
-
-def add_service(module, service):
- ''' registers a service with the current agent '''
- result = service
- changed = False
-
- consul_api = get_consul_api(module)
- existing = get_service_by_id_or_name(consul_api, service.id)
-
- # there is no way to retrieve the details of checks so if a check is present
- # in the service it must be re-registered
- if service.has_checks() or not existing or not existing == service:
-
- service.register(consul_api)
- # check that it registered correctly
- registered = get_service_by_id_or_name(consul_api, service.id)
- if registered:
- result = registered
- changed = True
-
- module.exit_json(changed=changed,
- service_id=result.id,
- service_name=result.name,
- service_port=result.port,
- checks=[check.to_dict() for check in service.checks],
- tags=result.tags)
-
-
-def remove_service(module, service_id):
- ''' deregister a service from the given agent using its service id '''
- consul_api = get_consul_api(module)
- service = get_service_by_id_or_name(consul_api, service_id)
- if service:
- consul_api.agent.service.deregister(service_id, token=module.params.get('token'))
- module.exit_json(changed=True, id=service_id)
-
- module.exit_json(changed=False, id=service_id)
-
-
-def get_consul_api(module):
- consulClient = consul.Consul(host=module.params.get('host'),
- port=module.params.get('port'),
- scheme=module.params.get('scheme'),
- verify=module.params.get('validate_certs'),
- token=module.params.get('token'))
- consulClient.agent.service = PatchedConsulAgentService(consulClient)
- return consulClient
-
-
-def get_service_by_id_or_name(consul_api, service_id_or_name):
- ''' iterate the registered services and find one with the given id '''
- for name, service in consul_api.agent.services().items():
- if service['ID'] == service_id_or_name or service['Service'] == service_id_or_name:
- return ConsulService(loaded=service)
-
-
-def parse_check(module):
- if len([p for p in (module.params.get('script'), module.params.get('ttl'), module.params.get('tcp'), module.params.get('http')) if p]) > 1:
- module.fail_json(
- msg='checks are either script, tcp, http or ttl driven, supplying more than one does not make sense')
-
- if module.params.get('check_id') or module.params.get('script') or module.params.get('ttl') or module.params.get('tcp') or module.params.get('http'):
-
- return ConsulCheck(
- module.params.get('check_id'),
- module.params.get('check_name'),
- module.params.get('check_node'),
- module.params.get('check_host'),
- module.params.get('script'),
- module.params.get('interval'),
- module.params.get('ttl'),
- module.params.get('notes'),
- module.params.get('tcp'),
- module.params.get('http'),
- module.params.get('timeout'),
- module.params.get('service_id'),
- )
-
-
-def parse_service(module):
- if module.params.get('service_name'):
- return ConsulService(
- module.params.get('service_id'),
- module.params.get('service_name'),
- module.params.get('service_address'),
- module.params.get('service_port'),
- module.params.get('tags'),
- )
- elif not module.params.get('service_name'):
- module.fail_json(msg="service_name is required to configure a service.")
-
-
-class ConsulService(object):
-
- def __init__(self, service_id=None, name=None, address=None, port=-1,
- tags=None, loaded=None):
- self.id = self.name = name
- if service_id:
- self.id = service_id
- self.address = address
- self.port = port
- self.tags = tags
- self.checks = []
- if loaded:
- self.id = loaded['ID']
- self.name = loaded['Service']
- self.port = loaded['Port']
- self.tags = loaded['Tags']
-
- def register(self, consul_api):
- optional = {}
-
- if self.port:
- optional['port'] = self.port
-
- if len(self.checks) > 0:
- optional['check'] = self.checks[0].check
-
- consul_api.agent.service.register(
- self.name,
- service_id=self.id,
- address=self.address,
- tags=self.tags,
- **optional)
-
- def add_check(self, check):
- self.checks.append(check)
-
- def checks(self):
- return self.checks
-
- def has_checks(self):
- return len(self.checks) > 0
-
- def __eq__(self, other):
- return (isinstance(other, self.__class__) and
- self.id == other.id and
- self.name == other.name and
- self.port == other.port and
- self.tags == other.tags)
-
- def __ne__(self, other):
- return not self.__eq__(other)
-
- def to_dict(self):
- data = {'id': self.id, "name": self.name}
- if self.port:
- data['port'] = self.port
- if self.tags and len(self.tags) > 0:
- data['tags'] = self.tags
- if len(self.checks) > 0:
- data['check'] = self.checks[0].to_dict()
- return data
-
-
-class ConsulCheck(object):
-
- def __init__(self, check_id, name, node=None, host='localhost',
- script=None, interval=None, ttl=None, notes=None, tcp=None, http=None, timeout=None, service_id=None):
- self.check_id = self.name = name
- if check_id:
- self.check_id = check_id
- self.service_id = service_id
- self.notes = notes
- self.node = node
- self.host = host
-
- self.interval = self.validate_duration('interval', interval)
- self.ttl = self.validate_duration('ttl', ttl)
- self.script = script
- self.tcp = tcp
- self.http = http
- self.timeout = self.validate_duration('timeout', timeout)
-
- self.check = None
-
- if script:
- self.check = consul.Check.script(script, self.interval)
-
- if ttl:
- self.check = consul.Check.ttl(self.ttl)
-
- if http:
- if interval is None:
- raise Exception('http check must specify interval')
-
- self.check = consul.Check.http(http, self.interval, self.timeout)
-
- if tcp:
- if interval is None:
- raise Exception('tcp check must specify interval')
-
- regex = r"(?P.*)(?::)(?P(?:[0-9]+))$"
- match = re.match(regex, tcp)
-
- if match is None:
- raise Exception('tcp check must be in host:port format')
-
- self.check = consul.Check.tcp(match.group('host').strip('[]'), int(match.group('port')), self.interval)
-
- def validate_duration(self, name, duration):
- if duration:
- duration_units = ['ns', 'us', 'ms', 's', 'm', 'h']
- if not any((duration.endswith(suffix) for suffix in duration_units)):
- duration = "{0}s".format(duration)
- return duration
-
- def register(self, consul_api):
- consul_api.agent.check.register(self.name, check_id=self.check_id, service_id=self.service_id,
- notes=self.notes,
- check=self.check)
-
- def __eq__(self, other):
- return (isinstance(other, self.__class__) and
- self.check_id == other.check_id and
- self.service_id == other.service_id and
- self.name == other.name and
- self.script == other.script and
- self.interval == other.interval)
-
- def __ne__(self, other):
- return not self.__eq__(other)
-
- def to_dict(self):
- data = {}
- self._add(data, 'id', attr='check_id')
- self._add(data, 'name', attr='check_name')
- self._add(data, 'script')
- self._add(data, 'node')
- self._add(data, 'notes')
- self._add(data, 'host')
- self._add(data, 'interval')
- self._add(data, 'ttl')
- self._add(data, 'tcp')
- self._add(data, 'http')
- self._add(data, 'timeout')
- self._add(data, 'service_id')
- return data
-
- def _add(self, data, key, attr=None):
- try:
- if attr is None:
- attr = key
- data[key] = getattr(self, attr)
- except Exception:
- pass
-
-
-def test_dependencies(module):
- if not python_consul_installed:
- module.fail_json(msg="python-consul required for this module. see https://python-consul.readthedocs.io/en/latest/#installation")
-
-
-def main():
- module = AnsibleModule(
- argument_spec=dict(
- host=dict(default='localhost'),
- port=dict(default=8500, type='int'),
- scheme=dict(default='http'),
- validate_certs=dict(default=True, type='bool'),
- check_id=dict(),
- check_name=dict(),
- check_node=dict(),
- check_host=dict(),
- notes=dict(),
- script=dict(),
- service_id=dict(),
- service_name=dict(),
- service_address=dict(type='str'),
- service_port=dict(type='int'),
- state=dict(default='present', choices=['present', 'absent']),
- interval=dict(type='str'),
- ttl=dict(type='str'),
- tcp=dict(type='str'),
- http=dict(type='str'),
- timeout=dict(type='str'),
- tags=dict(type='list', elements='str'),
- token=dict(no_log=True)
- ),
- supports_check_mode=False,
- )
-
- test_dependencies(module)
-
- try:
- register_with_consul(module)
- except ConnectionError as e:
- module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % (
- module.params.get('host'), module.params.get('port'), str(e)))
- except Exception as e:
- module.fail_json(msg=str(e))
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/clustering/consul/consul_acl.py b/ansible_collections/community/general/plugins/modules/clustering/consul/consul_acl.py
deleted file mode 100644
index 1e01e58a..00000000
--- a/ansible_collections/community/general/plugins/modules/clustering/consul/consul_acl.py
+++ /dev/null
@@ -1,683 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# (c) 2015, Steve Gargan
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
-module: consul_acl
-short_description: Manipulate Consul ACL keys and rules
-description:
- - Allows the addition, modification and deletion of ACL keys and associated
- rules in a consul cluster via the agent. For more details on using and
- configuring ACLs, see https://www.consul.io/docs/guides/acl.html.
-author:
- - Steve Gargan (@sgargan)
- - Colin Nolan (@colin-nolan)
-options:
- mgmt_token:
- description:
- - a management token is required to manipulate the acl lists
- required: true
- type: str
- state:
- description:
- - whether the ACL pair should be present or absent
- required: false
- choices: ['present', 'absent']
- default: present
- type: str
- token_type:
- description:
- - the type of token that should be created
- choices: ['client', 'management']
- default: client
- type: str
- name:
- description:
- - the name that should be associated with the acl key, this is opaque
- to Consul
- required: false
- type: str
- token:
- description:
- - the token key identifying an ACL rule set. If generated by consul
- this will be a UUID
- required: false
- type: str
- rules:
- type: list
- elements: dict
- description:
- - rules that should be associated with a given token
- required: false
- host:
- description:
- - host of the consul agent defaults to localhost
- required: false
- default: localhost
- type: str
- port:
- type: int
- description:
- - the port on which the consul agent is running
- required: false
- default: 8500
- scheme:
- description:
- - the protocol scheme on which the consul agent is running
- required: false
- default: http
- type: str
- validate_certs:
- type: bool
- description:
- - whether to verify the tls certificate of the consul agent
- required: false
- default: True
-requirements:
- - python-consul
- - pyhcl
- - requests
-'''
-
-EXAMPLES = """
-- name: Create an ACL with rules
- community.general.consul_acl:
- host: consul1.example.com
- mgmt_token: some_management_acl
- name: Foo access
- rules:
- - key: "foo"
- policy: read
- - key: "private/foo"
- policy: deny
-
-- name: Create an ACL with a specific token
- community.general.consul_acl:
- host: consul1.example.com
- mgmt_token: some_management_acl
- name: Foo access
- token: my-token
- rules:
- - key: "foo"
- policy: read
-
-- name: Update the rules associated to an ACL token
- community.general.consul_acl:
- host: consul1.example.com
- mgmt_token: some_management_acl
- name: Foo access
- token: some_client_token
- rules:
- - event: "bbq"
- policy: write
- - key: "foo"
- policy: read
- - key: "private"
- policy: deny
- - keyring: write
- - node: "hgs4"
- policy: write
- - operator: read
- - query: ""
- policy: write
- - service: "consul"
- policy: write
- - session: "standup"
- policy: write
-
-- name: Remove a token
- community.general.consul_acl:
- host: consul1.example.com
- mgmt_token: some_management_acl
- token: 172bd5c8-9fe9-11e4-b1b0-3c15c2c9fd5e
- state: absent
-"""
-
-RETURN = """
-token:
- description: the token associated to the ACL (the ACL's ID)
- returned: success
- type: str
- sample: a2ec332f-04cf-6fba-e8b8-acf62444d3da
-rules:
- description: the HCL JSON representation of the rules associated to the ACL, in the format described in the
- Consul documentation (https://www.consul.io/docs/guides/acl.html#rule-specification).
- returned: I(status) == "present"
- type: str
- sample: {
- "key": {
- "foo": {
- "policy": "write"
- },
- "bar": {
- "policy": "deny"
- }
- }
- }
-operation:
- description: the operation performed on the ACL
- returned: changed
- type: str
- sample: update
-"""
-
-
-try:
- import consul
- python_consul_installed = True
-except ImportError:
- python_consul_installed = False
-
-try:
- import hcl
- pyhcl_installed = True
-except ImportError:
- pyhcl_installed = False
-
-try:
- from requests.exceptions import ConnectionError
- has_requests = True
-except ImportError:
- has_requests = False
-
-from collections import defaultdict
-from ansible.module_utils.basic import to_text, AnsibleModule
-
-
-RULE_SCOPES = [
- "agent",
- "agent_prefix",
- "event",
- "event_prefix",
- "key",
- "key_prefix",
- "keyring",
- "node",
- "node_prefix",
- "operator",
- "query",
- "query_prefix",
- "service",
- "service_prefix",
- "session",
- "session_prefix",
-]
-
-MANAGEMENT_PARAMETER_NAME = "mgmt_token"
-HOST_PARAMETER_NAME = "host"
-SCHEME_PARAMETER_NAME = "scheme"
-VALIDATE_CERTS_PARAMETER_NAME = "validate_certs"
-NAME_PARAMETER_NAME = "name"
-PORT_PARAMETER_NAME = "port"
-RULES_PARAMETER_NAME = "rules"
-STATE_PARAMETER_NAME = "state"
-TOKEN_PARAMETER_NAME = "token"
-TOKEN_TYPE_PARAMETER_NAME = "token_type"
-
-PRESENT_STATE_VALUE = "present"
-ABSENT_STATE_VALUE = "absent"
-
-CLIENT_TOKEN_TYPE_VALUE = "client"
-MANAGEMENT_TOKEN_TYPE_VALUE = "management"
-
-REMOVE_OPERATION = "remove"
-UPDATE_OPERATION = "update"
-CREATE_OPERATION = "create"
-
-_POLICY_JSON_PROPERTY = "policy"
-_RULES_JSON_PROPERTY = "Rules"
-_TOKEN_JSON_PROPERTY = "ID"
-_TOKEN_TYPE_JSON_PROPERTY = "Type"
-_NAME_JSON_PROPERTY = "Name"
-_POLICY_YML_PROPERTY = "policy"
-_POLICY_HCL_PROPERTY = "policy"
-
-_ARGUMENT_SPEC = {
- MANAGEMENT_PARAMETER_NAME: dict(required=True, no_log=True),
- HOST_PARAMETER_NAME: dict(default='localhost'),
- SCHEME_PARAMETER_NAME: dict(default='http'),
- VALIDATE_CERTS_PARAMETER_NAME: dict(type='bool', default=True),
- NAME_PARAMETER_NAME: dict(),
- PORT_PARAMETER_NAME: dict(default=8500, type='int'),
- RULES_PARAMETER_NAME: dict(type='list', elements='dict'),
- STATE_PARAMETER_NAME: dict(default=PRESENT_STATE_VALUE, choices=[PRESENT_STATE_VALUE, ABSENT_STATE_VALUE]),
- TOKEN_PARAMETER_NAME: dict(no_log=False),
- TOKEN_TYPE_PARAMETER_NAME: dict(choices=[CLIENT_TOKEN_TYPE_VALUE, MANAGEMENT_TOKEN_TYPE_VALUE],
- default=CLIENT_TOKEN_TYPE_VALUE)
-}
-
-
-def set_acl(consul_client, configuration):
- """
- Sets an ACL based on the given configuration.
- :param consul_client: the consul client
- :param configuration: the run configuration
- :return: the output of setting the ACL
- """
- acls_as_json = decode_acls_as_json(consul_client.acl.list())
- existing_acls_mapped_by_name = dict((acl.name, acl) for acl in acls_as_json if acl.name is not None)
- existing_acls_mapped_by_token = dict((acl.token, acl) for acl in acls_as_json)
- if None in existing_acls_mapped_by_token:
- raise AssertionError("expecting ACL list to be associated to a token: %s" %
- existing_acls_mapped_by_token[None])
-
- if configuration.token is None and configuration.name and configuration.name in existing_acls_mapped_by_name:
- # No token but name given so can get token from name
- configuration.token = existing_acls_mapped_by_name[configuration.name].token
-
- if configuration.token and configuration.token in existing_acls_mapped_by_token:
- return update_acl(consul_client, configuration)
- else:
- if configuration.token in existing_acls_mapped_by_token:
- raise AssertionError()
- if configuration.name in existing_acls_mapped_by_name:
- raise AssertionError()
- return create_acl(consul_client, configuration)
-
-
-def update_acl(consul_client, configuration):
- """
- Updates an ACL.
- :param consul_client: the consul client
- :param configuration: the run configuration
- :return: the output of the update
- """
- existing_acl = load_acl_with_token(consul_client, configuration.token)
- changed = existing_acl.rules != configuration.rules
-
- if changed:
- name = configuration.name if configuration.name is not None else existing_acl.name
- rules_as_hcl = encode_rules_as_hcl_string(configuration.rules)
- updated_token = consul_client.acl.update(
- configuration.token, name=name, type=configuration.token_type, rules=rules_as_hcl)
- if updated_token != configuration.token:
- raise AssertionError()
-
- return Output(changed=changed, token=configuration.token, rules=configuration.rules, operation=UPDATE_OPERATION)
-
-
-def create_acl(consul_client, configuration):
- """
- Creates an ACL.
- :param consul_client: the consul client
- :param configuration: the run configuration
- :return: the output of the creation
- """
- rules_as_hcl = encode_rules_as_hcl_string(configuration.rules) if len(configuration.rules) > 0 else None
- token = consul_client.acl.create(
- name=configuration.name, type=configuration.token_type, rules=rules_as_hcl, acl_id=configuration.token)
- rules = configuration.rules
- return Output(changed=True, token=token, rules=rules, operation=CREATE_OPERATION)
-
-
-def remove_acl(consul, configuration):
- """
- Removes an ACL.
- :param consul: the consul client
- :param configuration: the run configuration
- :return: the output of the removal
- """
- token = configuration.token
- changed = consul.acl.info(token) is not None
- if changed:
- consul.acl.destroy(token)
- return Output(changed=changed, token=token, operation=REMOVE_OPERATION)
-
-
-def load_acl_with_token(consul, token):
- """
- Loads the ACL with the given token (token == rule ID).
- :param consul: the consul client
- :param token: the ACL "token"/ID (not name)
- :return: the ACL associated to the given token
- :exception ConsulACLTokenNotFoundException: raised if the given token does not exist
- """
- acl_as_json = consul.acl.info(token)
- if acl_as_json is None:
- raise ConsulACLNotFoundException(token)
- return decode_acl_as_json(acl_as_json)
-
-
-def encode_rules_as_hcl_string(rules):
- """
- Converts the given rules into the equivalent HCL (string) representation.
- :param rules: the rules
- :return: the equivalent HCL (string) representation of the rules. Will be None if there is no rules (see internal
- note for justification)
- """
- if len(rules) == 0:
- # Note: empty string is not valid HCL according to `hcl.load` however, the ACL `Rule` property will be an empty
- # string if there is no rules...
- return None
- rules_as_hcl = ""
- for rule in rules:
- rules_as_hcl += encode_rule_as_hcl_string(rule)
- return rules_as_hcl
-
-
-def encode_rule_as_hcl_string(rule):
- """
- Converts the given rule into the equivalent HCL (string) representation.
- :param rule: the rule
- :return: the equivalent HCL (string) representation of the rule
- """
- if rule.pattern is not None:
- return '%s "%s" {\n %s = "%s"\n}\n' % (rule.scope, rule.pattern, _POLICY_HCL_PROPERTY, rule.policy)
- else:
- return '%s = "%s"\n' % (rule.scope, rule.policy)
-
-
-def decode_rules_as_hcl_string(rules_as_hcl):
- """
- Converts the given HCL (string) representation of rules into a list of rule domain models.
- :param rules_as_hcl: the HCL (string) representation of a collection of rules
- :return: the equivalent domain model to the given rules
- """
- rules_as_hcl = to_text(rules_as_hcl)
- rules_as_json = hcl.loads(rules_as_hcl)
- return decode_rules_as_json(rules_as_json)
-
-
-def decode_rules_as_json(rules_as_json):
- """
- Converts the given JSON representation of rules into a list of rule domain models.
- :param rules_as_json: the JSON representation of a collection of rules
- :return: the equivalent domain model to the given rules
- """
- rules = RuleCollection()
- for scope in rules_as_json:
- if not isinstance(rules_as_json[scope], dict):
- rules.add(Rule(scope, rules_as_json[scope]))
- else:
- for pattern, policy in rules_as_json[scope].items():
- rules.add(Rule(scope, policy[_POLICY_JSON_PROPERTY], pattern))
- return rules
-
-
-def encode_rules_as_json(rules):
- """
- Converts the given rules into the equivalent JSON representation according to the documentation:
- https://www.consul.io/docs/guides/acl.html#rule-specification.
- :param rules: the rules
- :return: JSON representation of the given rules
- """
- rules_as_json = defaultdict(dict)
- for rule in rules:
- if rule.pattern is not None:
- if rule.pattern in rules_as_json[rule.scope]:
- raise AssertionError()
- rules_as_json[rule.scope][rule.pattern] = {
- _POLICY_JSON_PROPERTY: rule.policy
- }
- else:
- if rule.scope in rules_as_json:
- raise AssertionError()
- rules_as_json[rule.scope] = rule.policy
- return rules_as_json
-
-
-def decode_rules_as_yml(rules_as_yml):
- """
- Converts the given YAML representation of rules into a list of rule domain models.
- :param rules_as_yml: the YAML representation of a collection of rules
- :return: the equivalent domain model to the given rules
- """
- rules = RuleCollection()
- if rules_as_yml:
- for rule_as_yml in rules_as_yml:
- rule_added = False
- for scope in RULE_SCOPES:
- if scope in rule_as_yml:
- if rule_as_yml[scope] is None:
- raise ValueError("Rule for '%s' does not have a value associated to the scope" % scope)
- policy = rule_as_yml[_POLICY_YML_PROPERTY] if _POLICY_YML_PROPERTY in rule_as_yml \
- else rule_as_yml[scope]
- pattern = rule_as_yml[scope] if _POLICY_YML_PROPERTY in rule_as_yml else None
- rules.add(Rule(scope, policy, pattern))
- rule_added = True
- break
- if not rule_added:
- raise ValueError("A rule requires one of %s and a policy." % ('/'.join(RULE_SCOPES)))
- return rules
-
-
-def decode_acl_as_json(acl_as_json):
- """
- Converts the given JSON representation of an ACL into the equivalent domain model.
- :param acl_as_json: the JSON representation of an ACL
- :return: the equivalent domain model to the given ACL
- """
- rules_as_hcl = acl_as_json[_RULES_JSON_PROPERTY]
- rules = decode_rules_as_hcl_string(acl_as_json[_RULES_JSON_PROPERTY]) if rules_as_hcl.strip() != "" \
- else RuleCollection()
- return ACL(
- rules=rules,
- token_type=acl_as_json[_TOKEN_TYPE_JSON_PROPERTY],
- token=acl_as_json[_TOKEN_JSON_PROPERTY],
- name=acl_as_json[_NAME_JSON_PROPERTY]
- )
-
-
-def decode_acls_as_json(acls_as_json):
- """
- Converts the given JSON representation of ACLs into a list of ACL domain models.
- :param acls_as_json: the JSON representation of a collection of ACLs
- :return: list of equivalent domain models for the given ACLs (order not guaranteed to be the same)
- """
- return [decode_acl_as_json(acl_as_json) for acl_as_json in acls_as_json]
-
-
-class ConsulACLNotFoundException(Exception):
- """
- Exception raised if an ACL with is not found.
- """
-
-
-class Configuration:
- """
- Configuration for this module.
- """
-
- def __init__(self, management_token=None, host=None, scheme=None, validate_certs=None, name=None, port=None,
- rules=None, state=None, token=None, token_type=None):
- self.management_token = management_token # type: str
- self.host = host # type: str
- self.scheme = scheme # type: str
- self.validate_certs = validate_certs # type: bool
- self.name = name # type: str
- self.port = port # type: int
- self.rules = rules # type: RuleCollection
- self.state = state # type: str
- self.token = token # type: str
- self.token_type = token_type # type: str
-
-
-class Output:
- """
- Output of an action of this module.
- """
-
- def __init__(self, changed=None, token=None, rules=None, operation=None):
- self.changed = changed # type: bool
- self.token = token # type: str
- self.rules = rules # type: RuleCollection
- self.operation = operation # type: str
-
-
-class ACL:
- """
- Consul ACL. See: https://www.consul.io/docs/guides/acl.html.
- """
-
- def __init__(self, rules, token_type, token, name):
- self.rules = rules
- self.token_type = token_type
- self.token = token
- self.name = name
-
- def __eq__(self, other):
- return other \
- and isinstance(other, self.__class__) \
- and self.rules == other.rules \
- and self.token_type == other.token_type \
- and self.token == other.token \
- and self.name == other.name
-
- def __hash__(self):
- return hash(self.rules) ^ hash(self.token_type) ^ hash(self.token) ^ hash(self.name)
-
-
-class Rule:
- """
- ACL rule. See: https://www.consul.io/docs/guides/acl.html#acl-rules-and-scope.
- """
-
- def __init__(self, scope, policy, pattern=None):
- self.scope = scope
- self.policy = policy
- self.pattern = pattern
-
- def __eq__(self, other):
- return isinstance(other, self.__class__) \
- and self.scope == other.scope \
- and self.policy == other.policy \
- and self.pattern == other.pattern
-
- def __ne__(self, other):
- return not self.__eq__(other)
-
- def __hash__(self):
- return (hash(self.scope) ^ hash(self.policy)) ^ hash(self.pattern)
-
- def __str__(self):
- return encode_rule_as_hcl_string(self)
-
-
-class RuleCollection:
- """
- Collection of ACL rules, which are part of a Consul ACL.
- """
-
- def __init__(self):
- self._rules = {}
- for scope in RULE_SCOPES:
- self._rules[scope] = {}
-
- def __iter__(self):
- all_rules = []
- for scope, pattern_keyed_rules in self._rules.items():
- for pattern, rule in pattern_keyed_rules.items():
- all_rules.append(rule)
- return iter(all_rules)
-
- def __len__(self):
- count = 0
- for scope in RULE_SCOPES:
- count += len(self._rules[scope])
- return count
-
- def __eq__(self, other):
- return isinstance(other, self.__class__) \
- and set(self) == set(other)
-
- def __ne__(self, other):
- return not self.__eq__(other)
-
- def __str__(self):
- return encode_rules_as_hcl_string(self)
-
- def add(self, rule):
- """
- Adds the given rule to this collection.
- :param rule: model of a rule
- :raises ValueError: raised if there already exists a rule for a given scope and pattern
- """
- if rule.pattern in self._rules[rule.scope]:
- patten_info = " and pattern '%s'" % rule.pattern if rule.pattern is not None else ""
- raise ValueError("Duplicate rule for scope '%s'%s" % (rule.scope, patten_info))
- self._rules[rule.scope][rule.pattern] = rule
-
-
-def get_consul_client(configuration):
- """
- Gets a Consul client for the given configuration.
-
- Does not check if the Consul client can connect.
- :param configuration: the run configuration
- :return: Consul client
- """
- token = configuration.management_token
- if token is None:
- token = configuration.token
- if token is None:
- raise AssertionError("Expecting the management token to always be set")
- return consul.Consul(host=configuration.host, port=configuration.port, scheme=configuration.scheme,
- verify=configuration.validate_certs, token=token)
-
-
-def check_dependencies():
- """
- Checks that the required dependencies have been imported.
- :exception ImportError: if it is detected that any of the required dependencies have not been imported
- """
- if not python_consul_installed:
- raise ImportError("python-consul required for this module. "
- "See: https://python-consul.readthedocs.io/en/latest/#installation")
-
- if not pyhcl_installed:
- raise ImportError("pyhcl required for this module. "
- "See: https://pypi.org/project/pyhcl/")
-
- if not has_requests:
- raise ImportError("requests required for this module. See https://pypi.org/project/requests/")
-
-
-def main():
- """
- Main method.
- """
- module = AnsibleModule(_ARGUMENT_SPEC, supports_check_mode=False)
-
- try:
- check_dependencies()
- except ImportError as e:
- module.fail_json(msg=str(e))
-
- configuration = Configuration(
- management_token=module.params.get(MANAGEMENT_PARAMETER_NAME),
- host=module.params.get(HOST_PARAMETER_NAME),
- scheme=module.params.get(SCHEME_PARAMETER_NAME),
- validate_certs=module.params.get(VALIDATE_CERTS_PARAMETER_NAME),
- name=module.params.get(NAME_PARAMETER_NAME),
- port=module.params.get(PORT_PARAMETER_NAME),
- rules=decode_rules_as_yml(module.params.get(RULES_PARAMETER_NAME)),
- state=module.params.get(STATE_PARAMETER_NAME),
- token=module.params.get(TOKEN_PARAMETER_NAME),
- token_type=module.params.get(TOKEN_TYPE_PARAMETER_NAME)
- )
- consul_client = get_consul_client(configuration)
-
- try:
- if configuration.state == PRESENT_STATE_VALUE:
- output = set_acl(consul_client, configuration)
- else:
- output = remove_acl(consul_client, configuration)
- except ConnectionError as e:
- module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % (
- configuration.host, configuration.port, str(e)))
- raise
-
- return_values = dict(changed=output.changed, token=output.token, operation=output.operation)
- if output.rules is not None:
- return_values["rules"] = encode_rules_as_json(output.rules)
- module.exit_json(**return_values)
-
-
-if __name__ == "__main__":
- main()
diff --git a/ansible_collections/community/general/plugins/modules/clustering/consul/consul_kv.py b/ansible_collections/community/general/plugins/modules/clustering/consul/consul_kv.py
deleted file mode 100644
index f7b33b85..00000000
--- a/ansible_collections/community/general/plugins/modules/clustering/consul/consul_kv.py
+++ /dev/null
@@ -1,328 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# (c) 2015, Steve Gargan
-# (c) 2018 Genome Research Ltd.
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
-module: consul_kv
-short_description: Manipulate entries in the key/value store of a consul cluster
-description:
- - Allows the retrieval, addition, modification and deletion of key/value entries in a
- consul cluster via the agent. The entire contents of the record, including
- the indices, flags and session are returned as C(value).
- - If the C(key) represents a prefix then note that when a value is removed, the existing
- value if any is returned as part of the results.
- - See http://www.consul.io/docs/agent/http.html#kv for more details.
-requirements:
- - python-consul
- - requests
-author:
- - Steve Gargan (@sgargan)
- - Colin Nolan (@colin-nolan)
-options:
- state:
- description:
- - The action to take with the supplied key and value. If the state is 'present' and `value` is set, the key
- contents will be set to the value supplied and `changed` will be set to `true` only if the value was
- different to the current contents. If the state is 'present' and `value` is not set, the existing value
- associated to the key will be returned. The state 'absent' will remove the key/value pair,
- again 'changed' will be set to true only if the key actually existed
- prior to the removal. An attempt can be made to obtain or free the
- lock associated with a key/value pair with the states 'acquire' or
- 'release' respectively. a valid session must be supplied to make the
- attempt changed will be true if the attempt is successful, false
- otherwise.
- type: str
- choices: [ absent, acquire, present, release ]
- default: present
- key:
- description:
- - The key at which the value should be stored.
- type: str
- required: yes
- value:
- description:
- - The value should be associated with the given key, required if C(state)
- is C(present).
- type: str
- recurse:
- description:
- - If the key represents a prefix, each entry with the prefix can be
- retrieved by setting this to C(yes).
- type: bool
- retrieve:
- description:
- - If the I(state) is C(present) and I(value) is set, perform a
- read after setting the value and return this value.
- default: True
- type: bool
- session:
- description:
- - The session that should be used to acquire or release a lock
- associated with a key/value pair.
- type: str
- token:
- description:
- - The token key identifying an ACL rule set that controls access to
- the key value pair
- type: str
- cas:
- description:
- - Used when acquiring a lock with a session. If the C(cas) is C(0), then
- Consul will only put the key if it does not already exist. If the
- C(cas) value is non-zero, then the key is only set if the index matches
- the ModifyIndex of that key.
- type: str
- flags:
- description:
- - Opaque positive integer value that can be passed when setting a value.
- type: str
- host:
- description:
- - Host of the consul agent.
- type: str
- default: localhost
- port:
- description:
- - The port on which the consul agent is running.
- type: int
- default: 8500
- scheme:
- description:
- - The protocol scheme on which the consul agent is running.
- type: str
- default: http
- validate_certs:
- description:
- - Whether to verify the tls certificate of the consul agent.
- type: bool
- default: 'yes'
-'''
-
-
-EXAMPLES = '''
-# If the key does not exist, the value associated to the "data" property in `retrieved_key` will be `None`
-# If the key value is empty string, `retrieved_key["data"]["Value"]` will be `None`
-- name: Retrieve a value from the key/value store
- community.general.consul_kv:
- key: somekey
- register: retrieved_key
-
-- name: Add or update the value associated with a key in the key/value store
- community.general.consul_kv:
- key: somekey
- value: somevalue
-
-- name: Remove a key from the store
- community.general.consul_kv:
- key: somekey
- state: absent
-
-- name: Add a node to an arbitrary group via consul inventory (see consul.ini)
- community.general.consul_kv:
- key: ansible/groups/dc1/somenode
- value: top_secret
-
-- name: Register a key/value pair with an associated session
- community.general.consul_kv:
- key: stg/node/server_birthday
- value: 20160509
- session: "{{ sessionid }}"
- state: acquire
-'''
-
-from ansible.module_utils.common.text.converters import to_text
-
-try:
- import consul
- from requests.exceptions import ConnectionError
- python_consul_installed = True
-except ImportError:
- python_consul_installed = False
-
-from ansible.module_utils.basic import AnsibleModule
-
-# Note: although the python-consul documentation implies that using a key with a value of `None` with `put` has a
-# special meaning (https://python-consul.readthedocs.io/en/latest/#consul-kv), if not set in the subsequently API call,
-# the value just defaults to an empty string (https://www.consul.io/api/kv.html#create-update-key)
-NOT_SET = None
-
-
-def _has_value_changed(consul_client, key, target_value):
- """
- Uses the given Consul client to determine if the value associated to the given key is different to the given target
- value.
- :param consul_client: Consul connected client
- :param key: key in Consul
- :param target_value: value to be associated to the key
- :return: tuple where the first element is the value of the "X-Consul-Index" header and the second is `True` if the
- value has changed (i.e. the stored value is not the target value)
- """
- index, existing = consul_client.kv.get(key)
- if not existing:
- return index, True
- try:
- changed = to_text(existing['Value'], errors='surrogate_or_strict') != target_value
- return index, changed
- except UnicodeError:
- # Existing value was not decodable but all values we set are valid utf-8
- return index, True
-
-
-def execute(module):
- state = module.params.get('state')
-
- if state == 'acquire' or state == 'release':
- lock(module, state)
- elif state == 'present':
- if module.params.get('value') is NOT_SET:
- get_value(module)
- else:
- set_value(module)
- elif state == 'absent':
- remove_value(module)
- else:
- module.exit_json(msg="Unsupported state: %s" % (state, ))
-
-
-def lock(module, state):
-
- consul_api = get_consul_api(module)
-
- session = module.params.get('session')
- key = module.params.get('key')
- value = module.params.get('value')
-
- if not session:
- module.fail(
- msg='%s of lock for %s requested but no session supplied' %
- (state, key))
-
- index, changed = _has_value_changed(consul_api, key, value)
-
- if changed and not module.check_mode:
- if state == 'acquire':
- changed = consul_api.kv.put(key, value,
- cas=module.params.get('cas'),
- acquire=session,
- flags=module.params.get('flags'))
- else:
- changed = consul_api.kv.put(key, value,
- cas=module.params.get('cas'),
- release=session,
- flags=module.params.get('flags'))
-
- module.exit_json(changed=changed,
- index=index,
- key=key)
-
-
-def get_value(module):
- consul_api = get_consul_api(module)
- key = module.params.get('key')
-
- index, existing_value = consul_api.kv.get(key, recurse=module.params.get('recurse'))
-
- module.exit_json(changed=False, index=index, data=existing_value)
-
-
-def set_value(module):
- consul_api = get_consul_api(module)
-
- key = module.params.get('key')
- value = module.params.get('value')
-
- if value is NOT_SET:
- raise AssertionError('Cannot set value of "%s" to `NOT_SET`' % key)
-
- index, changed = _has_value_changed(consul_api, key, value)
-
- if changed and not module.check_mode:
- changed = consul_api.kv.put(key, value,
- cas=module.params.get('cas'),
- flags=module.params.get('flags'))
-
- stored = None
- if module.params.get('retrieve'):
- index, stored = consul_api.kv.get(key)
-
- module.exit_json(changed=changed,
- index=index,
- key=key,
- data=stored)
-
-
-def remove_value(module):
- ''' remove the value associated with the given key. if the recurse parameter
- is set then any key prefixed with the given key will be removed. '''
- consul_api = get_consul_api(module)
-
- key = module.params.get('key')
-
- index, existing = consul_api.kv.get(
- key, recurse=module.params.get('recurse'))
-
- changed = existing is not None
- if changed and not module.check_mode:
- consul_api.kv.delete(key, module.params.get('recurse'))
-
- module.exit_json(changed=changed,
- index=index,
- key=key,
- data=existing)
-
-
-def get_consul_api(module, token=None):
- return consul.Consul(host=module.params.get('host'),
- port=module.params.get('port'),
- scheme=module.params.get('scheme'),
- verify=module.params.get('validate_certs'),
- token=module.params.get('token'))
-
-
-def test_dependencies(module):
- if not python_consul_installed:
- module.fail_json(msg="python-consul required for this module. "
- "see https://python-consul.readthedocs.io/en/latest/#installation")
-
-
-def main():
-
- module = AnsibleModule(
- argument_spec=dict(
- cas=dict(type='str'),
- flags=dict(type='str'),
- key=dict(type='str', required=True, no_log=False),
- host=dict(type='str', default='localhost'),
- scheme=dict(type='str', default='http'),
- validate_certs=dict(type='bool', default=True),
- port=dict(type='int', default=8500),
- recurse=dict(type='bool'),
- retrieve=dict(type='bool', default=True),
- state=dict(type='str', default='present', choices=['absent', 'acquire', 'present', 'release']),
- token=dict(type='str', no_log=True),
- value=dict(type='str', default=NOT_SET),
- session=dict(type='str'),
- ),
- supports_check_mode=True
- )
-
- test_dependencies(module)
-
- try:
- execute(module)
- except ConnectionError as e:
- module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % (
- module.params.get('host'), module.params.get('port'), e))
- except Exception as e:
- module.fail_json(msg=str(e))
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/clustering/consul/consul_session.py b/ansible_collections/community/general/plugins/modules/clustering/consul/consul_session.py
deleted file mode 100644
index 7ace1f89..00000000
--- a/ansible_collections/community/general/plugins/modules/clustering/consul/consul_session.py
+++ /dev/null
@@ -1,277 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2015, Steve Gargan
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-DOCUMENTATION = '''
-module: consul_session
-short_description: Manipulate consul sessions
-description:
- - Allows the addition, modification and deletion of sessions in a consul
- cluster. These sessions can then be used in conjunction with key value pairs
- to implement distributed locks. In depth documentation for working with
- sessions can be found at http://www.consul.io/docs/internals/sessions.html
-requirements:
- - python-consul
- - requests
-author:
-- Steve Gargan (@sgargan)
-options:
- id:
- description:
- - ID of the session, required when I(state) is either C(info) or
- C(remove).
- type: str
- state:
- description:
- - Whether the session should be present i.e. created if it doesn't
- exist, or absent, removed if present. If created, the I(id) for the
- session is returned in the output. If C(absent), I(id) is
- required to remove the session. Info for a single session, all the
- sessions for a node or all available sessions can be retrieved by
- specifying C(info), C(node) or C(list) for the I(state); for C(node)
- or C(info), the node I(name) or session I(id) is required as parameter.
- choices: [ absent, info, list, node, present ]
- type: str
- default: present
- name:
- description:
- - The name that should be associated with the session. Required when
- I(state=node) is used.
- type: str
- delay:
- description:
- - The optional lock delay that can be attached to the session when it
- is created. Locks for invalidated sessions ar blocked from being
- acquired until this delay has expired. Durations are in seconds.
- type: int
- default: 15
- node:
- description:
- - The name of the node that with which the session will be associated.
- by default this is the name of the agent.
- type: str
- datacenter:
- description:
- - The name of the datacenter in which the session exists or should be
- created.
- type: str
- checks:
- description:
- - Checks that will be used to verify the session health. If
- all the checks fail, the session will be invalidated and any locks
- associated with the session will be release and can be acquired once
- the associated lock delay has expired.
- type: list
- elements: str
- host:
- description:
- - The host of the consul agent defaults to localhost.
- type: str
- default: localhost
- port:
- description:
- - The port on which the consul agent is running.
- type: int
- default: 8500
- scheme:
- description:
- - The protocol scheme on which the consul agent is running.
- type: str
- default: http
- validate_certs:
- description:
- - Whether to verify the TLS certificate of the consul agent.
- type: bool
- default: True
- behavior:
- description:
- - The optional behavior that can be attached to the session when it
- is created. This controls the behavior when a session is invalidated.
- choices: [ delete, release ]
- type: str
- default: release
-'''
-
-EXAMPLES = '''
-- name: Register basic session with consul
- community.general.consul_session:
- name: session1
-
-- name: Register a session with an existing check
- community.general.consul_session:
- name: session_with_check
- checks:
- - existing_check_name
-
-- name: Register a session with lock_delay
- community.general.consul_session:
- name: session_with_delay
- delay: 20s
-
-- name: Retrieve info about session by id
- community.general.consul_session:
- id: session_id
- state: info
-
-- name: Retrieve active sessions
- community.general.consul_session:
- state: list
-'''
-
-try:
- import consul
- from requests.exceptions import ConnectionError
- python_consul_installed = True
-except ImportError:
- python_consul_installed = False
-
-from ansible.module_utils.basic import AnsibleModule
-
-
-def execute(module):
-
- state = module.params.get('state')
-
- if state in ['info', 'list', 'node']:
- lookup_sessions(module)
- elif state == 'present':
- update_session(module)
- else:
- remove_session(module)
-
-
-def lookup_sessions(module):
-
- datacenter = module.params.get('datacenter')
-
- state = module.params.get('state')
- consul_client = get_consul_api(module)
- try:
- if state == 'list':
- sessions_list = consul_client.session.list(dc=datacenter)
- # Ditch the index, this can be grabbed from the results
- if sessions_list and len(sessions_list) >= 2:
- sessions_list = sessions_list[1]
- module.exit_json(changed=True,
- sessions=sessions_list)
- elif state == 'node':
- node = module.params.get('node')
- sessions = consul_client.session.node(node, dc=datacenter)
- module.exit_json(changed=True,
- node=node,
- sessions=sessions)
- elif state == 'info':
- session_id = module.params.get('id')
-
- session_by_id = consul_client.session.info(session_id, dc=datacenter)
- module.exit_json(changed=True,
- session_id=session_id,
- sessions=session_by_id)
-
- except Exception as e:
- module.fail_json(msg="Could not retrieve session info %s" % e)
-
-
-def update_session(module):
-
- name = module.params.get('name')
- delay = module.params.get('delay')
- checks = module.params.get('checks')
- datacenter = module.params.get('datacenter')
- node = module.params.get('node')
- behavior = module.params.get('behavior')
-
- consul_client = get_consul_api(module)
-
- try:
- session = consul_client.session.create(
- name=name,
- behavior=behavior,
- node=node,
- lock_delay=delay,
- dc=datacenter,
- checks=checks
- )
- module.exit_json(changed=True,
- session_id=session,
- name=name,
- behavior=behavior,
- delay=delay,
- checks=checks,
- node=node)
- except Exception as e:
- module.fail_json(msg="Could not create/update session %s" % e)
-
-
-def remove_session(module):
- session_id = module.params.get('id')
-
- consul_client = get_consul_api(module)
-
- try:
- consul_client.session.destroy(session_id)
-
- module.exit_json(changed=True,
- session_id=session_id)
- except Exception as e:
- module.fail_json(msg="Could not remove session with id '%s' %s" % (
- session_id, e))
-
-
-def get_consul_api(module):
- return consul.Consul(host=module.params.get('host'),
- port=module.params.get('port'),
- scheme=module.params.get('scheme'),
- verify=module.params.get('validate_certs'))
-
-
-def test_dependencies(module):
- if not python_consul_installed:
- module.fail_json(msg="python-consul required for this module. "
- "see https://python-consul.readthedocs.io/en/latest/#installation")
-
-
-def main():
- argument_spec = dict(
- checks=dict(type='list', elements='str'),
- delay=dict(type='int', default='15'),
- behavior=dict(type='str', default='release', choices=['release', 'delete']),
- host=dict(type='str', default='localhost'),
- port=dict(type='int', default=8500),
- scheme=dict(type='str', default='http'),
- validate_certs=dict(type='bool', default=True),
- id=dict(type='str'),
- name=dict(type='str'),
- node=dict(type='str'),
- state=dict(type='str', default='present', choices=['absent', 'info', 'list', 'node', 'present']),
- datacenter=dict(type='str'),
- )
-
- module = AnsibleModule(
- argument_spec=argument_spec,
- required_if=[
- ('state', 'node', ['name']),
- ('state', 'info', ['id']),
- ('state', 'remove', ['id']),
- ],
- supports_check_mode=False
- )
-
- test_dependencies(module)
-
- try:
- execute(module)
- except ConnectionError as e:
- module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % (
- module.params.get('host'), module.params.get('port'), e))
- except Exception as e:
- module.fail_json(msg=str(e))
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/clustering/etcd3.py b/ansible_collections/community/general/plugins/modules/clustering/etcd3.py
deleted file mode 100644
index 6a095133..00000000
--- a/ansible_collections/community/general/plugins/modules/clustering/etcd3.py
+++ /dev/null
@@ -1,252 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# (c) 2018, Jean-Philippe Evrard
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: etcd3
-short_description: "Set or delete key value pairs from an etcd3 cluster"
-requirements:
- - etcd3
-description:
- - Sets or deletes values in etcd3 cluster using its v3 api.
- - Needs python etcd3 lib to work
-options:
- key:
- type: str
- description:
- - the key where the information is stored in the cluster
- required: true
- value:
- type: str
- description:
- - the information stored
- required: true
- host:
- type: str
- description:
- - the IP address of the cluster
- default: 'localhost'
- port:
- type: int
- description:
- - the port number used to connect to the cluster
- default: 2379
- state:
- type: str
- description:
- - the state of the value for the key.
- - can be present or absent
- required: true
- choices: [ present, absent ]
- user:
- type: str
- description:
- - The etcd user to authenticate with.
- password:
- type: str
- description:
- - The password to use for authentication.
- - Required if I(user) is defined.
- ca_cert:
- type: path
- description:
- - The Certificate Authority to use to verify the etcd host.
- - Required if I(client_cert) and I(client_key) are defined.
- client_cert:
- type: path
- description:
- - PEM formatted certificate chain file to be used for SSL client authentication.
- - Required if I(client_key) is defined.
- client_key:
- type: path
- description:
- - PEM formatted file that contains your private key to be used for SSL client authentication.
- - Required if I(client_cert) is defined.
- timeout:
- type: int
- description:
- - The socket level timeout in seconds.
-author:
- - Jean-Philippe Evrard (@evrardjp)
- - Victor Fauth (@vfauth)
-'''
-
-EXAMPLES = """
-- name: Store a value "bar" under the key "foo" for a cluster located "http://localhost:2379"
- community.general.etcd3:
- key: "foo"
- value: "baz3"
- host: "localhost"
- port: 2379
- state: "present"
-
-- name: Authenticate using user/password combination with a timeout of 10 seconds
- community.general.etcd3:
- key: "foo"
- value: "baz3"
- state: "present"
- user: "someone"
- password: "password123"
- timeout: 10
-
-- name: Authenticate using TLS certificates
- community.general.etcd3:
- key: "foo"
- value: "baz3"
- state: "present"
- ca_cert: "/etc/ssl/certs/CA_CERT.pem"
- client_cert: "/etc/ssl/certs/cert.crt"
- client_key: "/etc/ssl/private/key.pem"
-"""
-
-RETURN = '''
-key:
- description: The key that was queried
- returned: always
- type: str
-old_value:
- description: The previous value in the cluster
- returned: always
- type: str
-'''
-
-import traceback
-
-from ansible.module_utils.basic import AnsibleModule, missing_required_lib
-from ansible.module_utils.common.text.converters import to_native
-
-
-try:
- import etcd3
- HAS_ETCD = True
-except ImportError:
- ETCD_IMP_ERR = traceback.format_exc()
- HAS_ETCD = False
-
-
-def run_module():
- # define the available arguments/parameters that a user can pass to
- # the module
- module_args = dict(
- key=dict(type='str', required=True, no_log=False),
- value=dict(type='str', required=True),
- host=dict(type='str', default='localhost'),
- port=dict(type='int', default=2379),
- state=dict(type='str', required=True, choices=['present', 'absent']),
- user=dict(type='str'),
- password=dict(type='str', no_log=True),
- ca_cert=dict(type='path'),
- client_cert=dict(type='path'),
- client_key=dict(type='path'),
- timeout=dict(type='int'),
- )
-
- # seed the result dict in the object
- # we primarily care about changed and state
- # change is if this module effectively modified the target
- # state will include any data that you want your module to pass back
- # for consumption, for example, in a subsequent task
- result = dict(
- changed=False,
- )
-
- # the AnsibleModule object will be our abstraction working with Ansible
- # this includes instantiation, a couple of common attr would be the
- # args/params passed to the execution, as well as if the module
- # supports check mode
- module = AnsibleModule(
- argument_spec=module_args,
- supports_check_mode=True,
- required_together=[['client_cert', 'client_key'], ['user', 'password']],
- )
-
- # It is possible to set `ca_cert` to verify the server identity without
- # setting `client_cert` or `client_key` to authenticate the client
- # so required_together is enough
- # Due to `required_together=[['client_cert', 'client_key']]`, checking the presence
- # of either `client_cert` or `client_key` is enough
- if module.params['ca_cert'] is None and module.params['client_cert'] is not None:
- module.fail_json(msg="The 'ca_cert' parameter must be defined when 'client_cert' and 'client_key' are present.")
-
- result['key'] = module.params.get('key')
- module.params['cert_cert'] = module.params.pop('client_cert')
- module.params['cert_key'] = module.params.pop('client_key')
-
- if not HAS_ETCD:
- module.fail_json(msg=missing_required_lib('etcd3'), exception=ETCD_IMP_ERR)
-
- allowed_keys = ['host', 'port', 'ca_cert', 'cert_cert', 'cert_key',
- 'timeout', 'user', 'password']
- # TODO(evrardjp): Move this back to a dict comprehension when python 2.7 is
- # the minimum supported version
- # client_params = {key: value for key, value in module.params.items() if key in allowed_keys}
- client_params = dict()
- for key, value in module.params.items():
- if key in allowed_keys:
- client_params[key] = value
- try:
- etcd = etcd3.client(**client_params)
- except Exception as exp:
- module.fail_json(msg='Cannot connect to etcd cluster: %s' % (to_native(exp)),
- exception=traceback.format_exc())
- try:
- cluster_value = etcd.get(module.params['key'])
- except Exception as exp:
- module.fail_json(msg='Cannot reach data: %s' % (to_native(exp)),
- exception=traceback.format_exc())
-
- # Make the cluster_value[0] a string for string comparisons
- result['old_value'] = to_native(cluster_value[0])
-
- if module.params['state'] == 'absent':
- if cluster_value[0] is not None:
- if module.check_mode:
- result['changed'] = True
- else:
- try:
- etcd.delete(module.params['key'])
- except Exception as exp:
- module.fail_json(msg='Cannot delete %s: %s' % (module.params['key'], to_native(exp)),
- exception=traceback.format_exc())
- else:
- result['changed'] = True
- elif module.params['state'] == 'present':
- if result['old_value'] != module.params['value']:
- if module.check_mode:
- result['changed'] = True
- else:
- try:
- etcd.put(module.params['key'], module.params['value'])
- except Exception as exp:
- module.fail_json(msg='Cannot add or edit key %s: %s' % (module.params['key'], to_native(exp)),
- exception=traceback.format_exc())
- else:
- result['changed'] = True
- else:
- module.fail_json(msg="State not recognized")
-
- # manipulate or modify the state as needed (this is going to be the
- # part where your module will do what it needs to do)
-
- # during the execution of the module, if there is an exception or a
- # conditional state that effectively causes a failure, run
- # AnsibleModule.fail_json() to pass in the message and the result
-
- # in the event of a successful module execution, you will want to
- # simple AnsibleModule.exit_json(), passing the key/value results
- module.exit_json(**result)
-
-
-def main():
- run_module()
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/clustering/nomad/nomad_job.py b/ansible_collections/community/general/plugins/modules/clustering/nomad/nomad_job.py
deleted file mode 100644
index 341592be..00000000
--- a/ansible_collections/community/general/plugins/modules/clustering/nomad/nomad_job.py
+++ /dev/null
@@ -1,255 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# (c) 2020, FERREIRA Christophe
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-DOCUMENTATION = '''
----
-module: nomad_job
-author: FERREIRA Christophe (@chris93111)
-version_added: "1.3.0"
-short_description: Launch a Nomad Job
-description:
- - Launch a Nomad job.
- - Stop a Nomad job.
- - Force start a Nomad job
-requirements:
- - python-nomad
-extends_documentation_fragment:
- - community.general.nomad
-options:
- name:
- description:
- - Name of job for delete, stop and start job without source.
- - Name of job for delete, stop and start job without source.
- - Either this or I(content) must be specified.
- type: str
- state:
- description:
- - Deploy or remove job.
- choices: ["present", "absent"]
- required: true
- type: str
- force_start:
- description:
- - Force job to started.
- type: bool
- default: false
- content:
- description:
- - Content of Nomad job.
- - Either this or I(name) must be specified.
- type: str
- content_format:
- description:
- - Type of content of Nomad job.
- choices: ["hcl", "json"]
- default: hcl
- type: str
-notes:
- - C(check_mode) is supported.
-seealso:
- - name: Nomad jobs documentation
- description: Complete documentation for Nomad API jobs.
- link: https://www.nomadproject.io/api-docs/jobs/
-'''
-
-EXAMPLES = '''
-- name: Create job
- community.general.nomad_job:
- host: localhost
- state: present
- content: "{{ lookup('ansible.builtin.file', 'job.hcl') }}"
- timeout: 120
-
-- name: Stop job
- community.general.nomad_job:
- host: localhost
- state: absent
- name: api
-
-- name: Force job to start
- community.general.nomad_job:
- host: localhost
- state: present
- name: api
- timeout: 120
- force_start: true
-'''
-
-import json
-
-from ansible.module_utils.basic import AnsibleModule, missing_required_lib
-from ansible.module_utils.common.text.converters import to_native
-
-import_nomad = None
-try:
- import nomad
- import_nomad = True
-except ImportError:
- import_nomad = False
-
-
-def run():
- module = AnsibleModule(
- argument_spec=dict(
- host=dict(required=True, type='str'),
- state=dict(required=True, choices=['present', 'absent']),
- use_ssl=dict(type='bool', default=True),
- timeout=dict(type='int', default=5),
- validate_certs=dict(type='bool', default=True),
- client_cert=dict(type='path', default=None),
- client_key=dict(type='path', default=None),
- namespace=dict(type='str', default=None),
- name=dict(type='str', default=None),
- content_format=dict(choices=['hcl', 'json'], default='hcl'),
- content=dict(type='str', default=None),
- force_start=dict(type='bool', default=False),
- token=dict(type='str', default=None, no_log=True)
- ),
- supports_check_mode=True,
- mutually_exclusive=[
- ["name", "content"]
- ],
- required_one_of=[
- ['name', 'content']
- ]
- )
-
- if not import_nomad:
- module.fail_json(msg=missing_required_lib("python-nomad"))
-
- certificate_ssl = (module.params.get('client_cert'), module.params.get('client_key'))
-
- nomad_client = nomad.Nomad(
- host=module.params.get('host'),
- secure=module.params.get('use_ssl'),
- timeout=module.params.get('timeout'),
- verify=module.params.get('validate_certs'),
- cert=certificate_ssl,
- namespace=module.params.get('namespace'),
- token=module.params.get('token')
- )
-
- if module.params.get('state') == "present":
-
- if module.params.get('name') and not module.params.get('force_start'):
- module.fail_json(msg='For start job with name, force_start is needed')
-
- changed = False
- if module.params.get('content'):
-
- if module.params.get('content_format') == 'json':
-
- job_json = module.params.get('content')
- try:
- job_json = json.loads(job_json)
- except ValueError as e:
- module.fail_json(msg=to_native(e))
- job = dict()
- job['job'] = job_json
- try:
- job_id = job_json.get('ID')
- if job_id is None:
- module.fail_json(msg="Cannot retrieve job with ID None")
- plan = nomad_client.job.plan_job(job_id, job, diff=True)
- if not plan['Diff'].get('Type') == "None":
- changed = True
- if not module.check_mode:
- result = nomad_client.jobs.register_job(job)
- else:
- result = plan
- else:
- result = plan
- except Exception as e:
- module.fail_json(msg=to_native(e))
-
- if module.params.get('content_format') == 'hcl':
-
- try:
- job_hcl = module.params.get('content')
- job_json = nomad_client.jobs.parse(job_hcl)
- job = dict()
- job['job'] = job_json
- except nomad.api.exceptions.BadRequestNomadException as err:
- msg = str(err.nomad_resp.reason) + " " + str(err.nomad_resp.text)
- module.fail_json(msg=to_native(msg))
- try:
- job_id = job_json.get('ID')
- plan = nomad_client.job.plan_job(job_id, job, diff=True)
- if not plan['Diff'].get('Type') == "None":
- changed = True
- if not module.check_mode:
- result = nomad_client.jobs.register_job(job)
- else:
- result = plan
- else:
- result = plan
- except Exception as e:
- module.fail_json(msg=to_native(e))
-
- if module.params.get('force_start'):
-
- try:
- job = dict()
- if module.params.get('name'):
- job_name = module.params.get('name')
- else:
- job_name = job_json['Name']
- job_json = nomad_client.job.get_job(job_name)
- if job_json['Status'] == 'running':
- result = job_json
- else:
- job_json['Status'] = 'running'
- job_json['Stop'] = False
- job['job'] = job_json
- if not module.check_mode:
- result = nomad_client.jobs.register_job(job)
- else:
- result = nomad_client.validate.validate_job(job)
- if not result.status_code == 200:
- module.fail_json(msg=to_native(result.text))
- result = json.loads(result.text)
- changed = True
- except Exception as e:
- module.fail_json(msg=to_native(e))
-
- if module.params.get('state') == "absent":
-
- try:
- if not module.params.get('name') is None:
- job_name = module.params.get('name')
- else:
- if module.params.get('content_format') == 'hcl':
- job_json = nomad_client.jobs.parse(module.params.get('content'))
- job_name = job_json['Name']
- if module.params.get('content_format') == 'json':
- job_json = module.params.get('content')
- job_name = job_json['Name']
- job = nomad_client.job.get_job(job_name)
- if job['Status'] == 'dead':
- changed = False
- result = job
- else:
- if not module.check_mode:
- result = nomad_client.job.deregister_job(job_name)
- else:
- result = job
- changed = True
- except Exception as e:
- module.fail_json(msg=to_native(e))
-
- module.exit_json(changed=changed, result=result)
-
-
-def main():
-
- run()
-
-
-if __name__ == "__main__":
- main()
diff --git a/ansible_collections/community/general/plugins/modules/clustering/nomad/nomad_job_info.py b/ansible_collections/community/general/plugins/modules/clustering/nomad/nomad_job_info.py
deleted file mode 100644
index d49111bb..00000000
--- a/ansible_collections/community/general/plugins/modules/clustering/nomad/nomad_job_info.py
+++ /dev/null
@@ -1,344 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# (c) 2020, FERREIRA Christophe
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-DOCUMENTATION = '''
----
-module: nomad_job_info
-author: FERREIRA Christophe (@chris93111)
-version_added: "1.3.0"
-short_description: Get Nomad Jobs info
-description:
- - Get info for one Nomad job.
- - List Nomad jobs.
-requirements:
- - python-nomad
-extends_documentation_fragment:
- - community.general.nomad
-options:
- name:
- description:
- - Name of job for Get info.
- - If not specified, lists all jobs.
- type: str
-notes:
- - C(check_mode) is supported.
-seealso:
- - name: Nomad jobs documentation
- description: Complete documentation for Nomad API jobs.
- link: https://www.nomadproject.io/api-docs/jobs/
-'''
-
-EXAMPLES = '''
-- name: Get info for job awx
- community.general.nomad_job_info:
- host: localhost
- name: awx
- register: result
-
-- name: List Nomad jobs
- community.general.nomad_job_info:
- host: localhost
- register: result
-
-'''
-
-RETURN = '''
-result:
- description: List with dictionary contains jobs info
- returned: success
- type: list
- sample: [
- {
- "Affinities": null,
- "AllAtOnce": false,
- "Constraints": null,
- "ConsulToken": "",
- "CreateIndex": 13,
- "Datacenters": [
- "dc1"
- ],
- "Dispatched": false,
- "ID": "example",
- "JobModifyIndex": 13,
- "Meta": null,
- "ModifyIndex": 13,
- "Multiregion": null,
- "Name": "example",
- "Namespace": "default",
- "NomadTokenID": "",
- "ParameterizedJob": null,
- "ParentID": "",
- "Payload": null,
- "Periodic": null,
- "Priority": 50,
- "Region": "global",
- "Spreads": null,
- "Stable": false,
- "Status": "pending",
- "StatusDescription": "",
- "Stop": false,
- "SubmitTime": 1602244370615307000,
- "TaskGroups": [
- {
- "Affinities": null,
- "Constraints": null,
- "Count": 1,
- "EphemeralDisk": {
- "Migrate": false,
- "SizeMB": 300,
- "Sticky": false
- },
- "Meta": null,
- "Migrate": {
- "HealthCheck": "checks",
- "HealthyDeadline": 300000000000,
- "MaxParallel": 1,
- "MinHealthyTime": 10000000000
- },
- "Name": "cache",
- "Networks": null,
- "ReschedulePolicy": {
- "Attempts": 0,
- "Delay": 30000000000,
- "DelayFunction": "exponential",
- "Interval": 0,
- "MaxDelay": 3600000000000,
- "Unlimited": true
- },
- "RestartPolicy": {
- "Attempts": 3,
- "Delay": 15000000000,
- "Interval": 1800000000000,
- "Mode": "fail"
- },
- "Scaling": null,
- "Services": null,
- "ShutdownDelay": null,
- "Spreads": null,
- "StopAfterClientDisconnect": null,
- "Tasks": [
- {
- "Affinities": null,
- "Artifacts": null,
- "CSIPluginConfig": null,
- "Config": {
- "image": "redis:3.2",
- "port_map": [
- {
- "db": 6379.0
- }
- ]
- },
- "Constraints": null,
- "DispatchPayload": null,
- "Driver": "docker",
- "Env": null,
- "KillSignal": "",
- "KillTimeout": 5000000000,
- "Kind": "",
- "Leader": false,
- "Lifecycle": null,
- "LogConfig": {
- "MaxFileSizeMB": 10,
- "MaxFiles": 10
- },
- "Meta": null,
- "Name": "redis",
- "Resources": {
- "CPU": 500,
- "Devices": null,
- "DiskMB": 0,
- "IOPS": 0,
- "MemoryMB": 256,
- "Networks": [
- {
- "CIDR": "",
- "DNS": null,
- "Device": "",
- "DynamicPorts": [
- {
- "HostNetwork": "default",
- "Label": "db",
- "To": 0,
- "Value": 0
- }
- ],
- "IP": "",
- "MBits": 10,
- "Mode": "",
- "ReservedPorts": null
- }
- ]
- },
- "RestartPolicy": {
- "Attempts": 3,
- "Delay": 15000000000,
- "Interval": 1800000000000,
- "Mode": "fail"
- },
- "Services": [
- {
- "AddressMode": "auto",
- "CanaryMeta": null,
- "CanaryTags": null,
- "Checks": [
- {
- "AddressMode": "",
- "Args": null,
- "CheckRestart": null,
- "Command": "",
- "Expose": false,
- "FailuresBeforeCritical": 0,
- "GRPCService": "",
- "GRPCUseTLS": false,
- "Header": null,
- "InitialStatus": "",
- "Interval": 10000000000,
- "Method": "",
- "Name": "alive",
- "Path": "",
- "PortLabel": "",
- "Protocol": "",
- "SuccessBeforePassing": 0,
- "TLSSkipVerify": false,
- "TaskName": "",
- "Timeout": 2000000000,
- "Type": "tcp"
- }
- ],
- "Connect": null,
- "EnableTagOverride": false,
- "Meta": null,
- "Name": "redis-cache",
- "PortLabel": "db",
- "Tags": [
- "global",
- "cache"
- ],
- "TaskName": ""
- }
- ],
- "ShutdownDelay": 0,
- "Templates": null,
- "User": "",
- "Vault": null,
- "VolumeMounts": null
- }
- ],
- "Update": {
- "AutoPromote": false,
- "AutoRevert": false,
- "Canary": 0,
- "HealthCheck": "checks",
- "HealthyDeadline": 180000000000,
- "MaxParallel": 1,
- "MinHealthyTime": 10000000000,
- "ProgressDeadline": 600000000000,
- "Stagger": 30000000000
- },
- "Volumes": null
- }
- ],
- "Type": "service",
- "Update": {
- "AutoPromote": false,
- "AutoRevert": false,
- "Canary": 0,
- "HealthCheck": "",
- "HealthyDeadline": 0,
- "MaxParallel": 1,
- "MinHealthyTime": 0,
- "ProgressDeadline": 0,
- "Stagger": 30000000000
- },
- "VaultNamespace": "",
- "VaultToken": "",
- "Version": 0
- }
- ]
-
-'''
-
-
-import os
-import json
-
-from ansible.module_utils.basic import AnsibleModule, missing_required_lib
-from ansible.module_utils.common.text.converters import to_native
-
-import_nomad = None
-try:
- import nomad
- import_nomad = True
-except ImportError:
- import_nomad = False
-
-
-def run():
- module = AnsibleModule(
- argument_spec=dict(
- host=dict(required=True, type='str'),
- use_ssl=dict(type='bool', default=True),
- timeout=dict(type='int', default=5),
- validate_certs=dict(type='bool', default=True),
- client_cert=dict(type='path', default=None),
- client_key=dict(type='path', default=None),
- namespace=dict(type='str', default=None),
- name=dict(type='str', default=None),
- token=dict(type='str', default=None, no_log=True)
- ),
- supports_check_mode=True
- )
-
- if not import_nomad:
- module.fail_json(msg=missing_required_lib("python-nomad"))
-
- certificate_ssl = (module.params.get('client_cert'), module.params.get('client_key'))
-
- nomad_client = nomad.Nomad(
- host=module.params.get('host'),
- secure=module.params.get('use_ssl'),
- timeout=module.params.get('timeout'),
- verify=module.params.get('validate_certs'),
- cert=certificate_ssl,
- namespace=module.params.get('namespace'),
- token=module.params.get('token')
- )
-
- changed = False
- result = list()
- try:
- job_list = nomad_client.jobs.get_jobs()
- for job in job_list:
- result.append(nomad_client.job.get_job(job.get('ID')))
- except Exception as e:
- module.fail_json(msg=to_native(e))
-
- if module.params.get('name'):
- filter = list()
- try:
- for job in result:
- if job.get('ID') == module.params.get('name'):
- filter.append(job)
- result = filter
- if not filter:
- module.fail_json(msg="Couldn't find Job with id " + str(module.params.get('name')))
- except Exception as e:
- module.fail_json(msg=to_native(e))
-
- module.exit_json(changed=changed, result=result)
-
-
-def main():
-
- run()
-
-
-if __name__ == "__main__":
- main()
diff --git a/ansible_collections/community/general/plugins/modules/clustering/pacemaker_cluster.py b/ansible_collections/community/general/plugins/modules/clustering/pacemaker_cluster.py
deleted file mode 100644
index 4ec6010f..00000000
--- a/ansible_collections/community/general/plugins/modules/clustering/pacemaker_cluster.py
+++ /dev/null
@@ -1,222 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2016, Mathieu Bultel
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-DOCUMENTATION = '''
----
-module: pacemaker_cluster
-short_description: Manage pacemaker clusters
-author:
-- Mathieu Bultel (@matbu)
-description:
- - This module can manage a pacemaker cluster and nodes from Ansible using
- the pacemaker cli.
-options:
- state:
- description:
- - Indicate desired state of the cluster
- choices: [ cleanup, offline, online, restart ]
- type: str
- node:
- description:
- - Specify which node of the cluster you want to manage. None == the
- cluster status itself, 'all' == check the status of all nodes.
- type: str
- timeout:
- description:
- - Timeout when the module should considered that the action has failed
- default: 300
- type: int
- force:
- description:
- - Force the change of the cluster state
- type: bool
- default: 'yes'
-'''
-EXAMPLES = '''
----
-- name: Set cluster Online
- hosts: localhost
- gather_facts: no
- tasks:
- - name: Get cluster state
- community.general.pacemaker_cluster:
- state: online
-'''
-
-RETURN = '''
-changed:
- description: True if the cluster state has changed
- type: bool
- returned: always
-out:
- description: The output of the current state of the cluster. It return a
- list of the nodes state.
- type: str
- sample: 'out: [[" overcloud-controller-0", " Online"]]}'
- returned: always
-rc:
- description: exit code of the module
- type: bool
- returned: always
-'''
-
-import time
-
-from ansible.module_utils.basic import AnsibleModule
-
-
-_PCS_CLUSTER_DOWN = "Error: cluster is not currently running on this node"
-
-
-def get_cluster_status(module):
- cmd = "pcs cluster status"
- rc, out, err = module.run_command(cmd)
- if out in _PCS_CLUSTER_DOWN:
- return 'offline'
- else:
- return 'online'
-
-
-def get_node_status(module, node='all'):
- if node == 'all':
- cmd = "pcs cluster pcsd-status %s" % node
- else:
- cmd = "pcs cluster pcsd-status"
- rc, out, err = module.run_command(cmd)
- if rc == 1:
- module.fail_json(msg="Command execution failed.\nCommand: `%s`\nError: %s" % (cmd, err))
- status = []
- for o in out.splitlines():
- status.append(o.split(':'))
- return status
-
-
-def clean_cluster(module, timeout):
- cmd = "pcs resource cleanup"
- rc, out, err = module.run_command(cmd)
- if rc == 1:
- module.fail_json(msg="Command execution failed.\nCommand: `%s`\nError: %s" % (cmd, err))
-
-
-def set_cluster(module, state, timeout, force):
- if state == 'online':
- cmd = "pcs cluster start"
- if state == 'offline':
- cmd = "pcs cluster stop"
- if force:
- cmd = "%s --force" % cmd
- rc, out, err = module.run_command(cmd)
- if rc == 1:
- module.fail_json(msg="Command execution failed.\nCommand: `%s`\nError: %s" % (cmd, err))
-
- t = time.time()
- ready = False
- while time.time() < t + timeout:
- cluster_state = get_cluster_status(module)
- if cluster_state == state:
- ready = True
- break
- if not ready:
- module.fail_json(msg="Failed to set the state `%s` on the cluster\n" % (state))
-
-
-def set_node(module, state, timeout, force, node='all'):
- # map states
- if state == 'online':
- cmd = "pcs cluster start"
- if state == 'offline':
- cmd = "pcs cluster stop"
- if force:
- cmd = "%s --force" % cmd
-
- nodes_state = get_node_status(module, node)
- for node in nodes_state:
- if node[1].strip().lower() != state:
- cmd = "%s %s" % (cmd, node[0].strip())
- rc, out, err = module.run_command(cmd)
- if rc == 1:
- module.fail_json(msg="Command execution failed.\nCommand: `%s`\nError: %s" % (cmd, err))
-
- t = time.time()
- ready = False
- while time.time() < t + timeout:
- nodes_state = get_node_status(module)
- for node in nodes_state:
- if node[1].strip().lower() == state:
- ready = True
- break
- if not ready:
- module.fail_json(msg="Failed to set the state `%s` on the cluster\n" % (state))
-
-
-def main():
- argument_spec = dict(
- state=dict(type='str', choices=['online', 'offline', 'restart', 'cleanup']),
- node=dict(type='str'),
- timeout=dict(type='int', default=300),
- force=dict(type='bool', default=True),
- )
-
- module = AnsibleModule(
- argument_spec,
- supports_check_mode=True,
- )
- changed = False
- state = module.params['state']
- node = module.params['node']
- force = module.params['force']
- timeout = module.params['timeout']
-
- if state in ['online', 'offline']:
- # Get cluster status
- if node is None:
- cluster_state = get_cluster_status(module)
- if cluster_state == state:
- module.exit_json(changed=changed, out=cluster_state)
- else:
- set_cluster(module, state, timeout, force)
- cluster_state = get_cluster_status(module)
- if cluster_state == state:
- module.exit_json(changed=True, out=cluster_state)
- else:
- module.fail_json(msg="Fail to bring the cluster %s" % state)
- else:
- cluster_state = get_node_status(module, node)
- # Check cluster state
- for node_state in cluster_state:
- if node_state[1].strip().lower() == state:
- module.exit_json(changed=changed, out=cluster_state)
- else:
- # Set cluster status if needed
- set_cluster(module, state, timeout, force)
- cluster_state = get_node_status(module, node)
- module.exit_json(changed=True, out=cluster_state)
-
- if state in ['restart']:
- set_cluster(module, 'offline', timeout, force)
- cluster_state = get_cluster_status(module)
- if cluster_state == 'offline':
- set_cluster(module, 'online', timeout, force)
- cluster_state = get_cluster_status(module)
- if cluster_state == 'online':
- module.exit_json(changed=True, out=cluster_state)
- else:
- module.fail_json(msg="Failed during the restart of the cluster, the cluster can't be started")
- else:
- module.fail_json(msg="Failed during the restart of the cluster, the cluster can't be stopped")
-
- if state in ['cleanup']:
- clean_cluster(module, timeout)
- cluster_state = get_cluster_status(module)
- module.exit_json(changed=True,
- out=cluster_state)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/clustering/znode.py b/ansible_collections/community/general/plugins/modules/clustering/znode.py
deleted file mode 100644
index d55a502b..00000000
--- a/ansible_collections/community/general/plugins/modules/clustering/znode.py
+++ /dev/null
@@ -1,258 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# Copyright 2015 WP Engine, Inc. All rights reserved.
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
----
-module: znode
-short_description: Create, delete, retrieve, and update znodes using ZooKeeper
-description:
- - Create, delete, retrieve, and update znodes using ZooKeeper.
-options:
- hosts:
- description:
- - A list of ZooKeeper servers (format '[server]:[port]').
- required: true
- type: str
- name:
- description:
- - The path of the znode.
- required: true
- type: str
- value:
- description:
- - The value assigned to the znode.
- type: str
- op:
- description:
- - An operation to perform. Mutually exclusive with state.
- choices: [ get, wait, list ]
- type: str
- state:
- description:
- - The state to enforce. Mutually exclusive with op.
- choices: [ present, absent ]
- type: str
- timeout:
- description:
- - The amount of time to wait for a node to appear.
- default: 300
- type: int
- recursive:
- description:
- - Recursively delete node and all its children.
- type: bool
- default: 'no'
-requirements:
- - kazoo >= 2.1
- - python >= 2.6
-author: "Trey Perry (@treyperry)"
-'''
-
-EXAMPLES = """
-- name: Creating or updating a znode with a given value
- community.general.znode:
- hosts: 'localhost:2181'
- name: /mypath
- value: myvalue
- state: present
-
-- name: Getting the value and stat structure for a znode
- community.general.znode:
- hosts: 'localhost:2181'
- name: /mypath
- op: get
-
-- name: Listing a particular znode's children
- community.general.znode:
- hosts: 'localhost:2181'
- name: /zookeeper
- op: list
-
-- name: Waiting 20 seconds for a znode to appear at path /mypath
- community.general.znode:
- hosts: 'localhost:2181'
- name: /mypath
- op: wait
- timeout: 20
-
-- name: Deleting a znode at path /mypath
- community.general.znode:
- hosts: 'localhost:2181'
- name: /mypath
- state: absent
-
-- name: Creating or updating a znode with a given value on a remote Zookeeper
- community.general.znode:
- hosts: 'my-zookeeper-node:2181'
- name: /mypath
- value: myvalue
- state: present
- delegate_to: 127.0.0.1
-"""
-
-import time
-import traceback
-
-KAZOO_IMP_ERR = None
-try:
- from kazoo.client import KazooClient
- from kazoo.handlers.threading import KazooTimeoutError
- KAZOO_INSTALLED = True
-except ImportError:
- KAZOO_IMP_ERR = traceback.format_exc()
- KAZOO_INSTALLED = False
-
-from ansible.module_utils.basic import AnsibleModule, missing_required_lib
-from ansible.module_utils.common.text.converters import to_bytes
-
-
-def main():
- module = AnsibleModule(
- argument_spec=dict(
- hosts=dict(required=True, type='str'),
- name=dict(required=True, type='str'),
- value=dict(type='str'),
- op=dict(choices=['get', 'wait', 'list']),
- state=dict(choices=['present', 'absent']),
- timeout=dict(default=300, type='int'),
- recursive=dict(default=False, type='bool')
- ),
- supports_check_mode=False
- )
-
- if not KAZOO_INSTALLED:
- module.fail_json(msg=missing_required_lib('kazoo >= 2.1'), exception=KAZOO_IMP_ERR)
-
- check = check_params(module.params)
- if not check['success']:
- module.fail_json(msg=check['msg'])
-
- zoo = KazooCommandProxy(module)
- try:
- zoo.start()
- except KazooTimeoutError:
- module.fail_json(msg='The connection to the ZooKeeper ensemble timed out.')
-
- command_dict = {
- 'op': {
- 'get': zoo.get,
- 'list': zoo.list,
- 'wait': zoo.wait
- },
- 'state': {
- 'present': zoo.present,
- 'absent': zoo.absent
- }
- }
-
- command_type = 'op' if 'op' in module.params and module.params['op'] is not None else 'state'
- method = module.params[command_type]
- result, result_dict = command_dict[command_type][method]()
- zoo.shutdown()
-
- if result:
- module.exit_json(**result_dict)
- else:
- module.fail_json(**result_dict)
-
-
-def check_params(params):
- if not params['state'] and not params['op']:
- return {'success': False, 'msg': 'Please define an operation (op) or a state.'}
-
- if params['state'] and params['op']:
- return {'success': False, 'msg': 'Please choose an operation (op) or a state, but not both.'}
-
- return {'success': True}
-
-
-class KazooCommandProxy():
- def __init__(self, module):
- self.module = module
- self.zk = KazooClient(module.params['hosts'])
-
- def absent(self):
- return self._absent(self.module.params['name'])
-
- def exists(self, znode):
- return self.zk.exists(znode)
-
- def list(self):
- children = self.zk.get_children(self.module.params['name'])
- return True, {'count': len(children), 'items': children, 'msg': 'Retrieved znodes in path.',
- 'znode': self.module.params['name']}
-
- def present(self):
- return self._present(self.module.params['name'], self.module.params['value'])
-
- def get(self):
- return self._get(self.module.params['name'])
-
- def shutdown(self):
- self.zk.stop()
- self.zk.close()
-
- def start(self):
- self.zk.start()
-
- def wait(self):
- return self._wait(self.module.params['name'], self.module.params['timeout'])
-
- def _absent(self, znode):
- if self.exists(znode):
- self.zk.delete(znode, recursive=self.module.params['recursive'])
- return True, {'changed': True, 'msg': 'The znode was deleted.'}
- else:
- return True, {'changed': False, 'msg': 'The znode does not exist.'}
-
- def _get(self, path):
- if self.exists(path):
- value, zstat = self.zk.get(path)
- stat_dict = {}
- for i in dir(zstat):
- if not i.startswith('_'):
- attr = getattr(zstat, i)
- if isinstance(attr, (int, str)):
- stat_dict[i] = attr
- result = True, {'msg': 'The node was retrieved.', 'znode': path, 'value': value,
- 'stat': stat_dict}
- else:
- result = False, {'msg': 'The requested node does not exist.'}
-
- return result
-
- def _present(self, path, value):
- if self.exists(path):
- (current_value, zstat) = self.zk.get(path)
- if value != current_value:
- self.zk.set(path, to_bytes(value))
- return True, {'changed': True, 'msg': 'Updated the znode value.', 'znode': path,
- 'value': value}
- else:
- return True, {'changed': False, 'msg': 'No changes were necessary.', 'znode': path, 'value': value}
- else:
- self.zk.create(path, to_bytes(value), makepath=True)
- return True, {'changed': True, 'msg': 'Created a new znode.', 'znode': path, 'value': value}
-
- def _wait(self, path, timeout, interval=5):
- lim = time.time() + timeout
-
- while time.time() < lim:
- if self.exists(path):
- return True, {'msg': 'The node appeared before the configured timeout.',
- 'znode': path, 'timeout': timeout}
- else:
- time.sleep(interval)
-
- return False, {'msg': 'The node did not appear before the operation timed out.', 'timeout': timeout,
- 'znode': path}
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/cobbler_sync.py b/ansible_collections/community/general/plugins/modules/cobbler_sync.py
deleted file mode 120000
index 9c1b6ace..00000000
--- a/ansible_collections/community/general/plugins/modules/cobbler_sync.py
+++ /dev/null
@@ -1 +0,0 @@
-remote_management/cobbler/cobbler_sync.py
\ No newline at end of file
diff --git a/ansible_collections/community/general/plugins/modules/cobbler_system.py b/ansible_collections/community/general/plugins/modules/cobbler_system.py
deleted file mode 120000
index d4731356..00000000
--- a/ansible_collections/community/general/plugins/modules/cobbler_system.py
+++ /dev/null
@@ -1 +0,0 @@
-remote_management/cobbler/cobbler_system.py
\ No newline at end of file
diff --git a/ansible_collections/community/general/plugins/modules/composer.py b/ansible_collections/community/general/plugins/modules/composer.py
deleted file mode 120000
index 33d194fd..00000000
--- a/ansible_collections/community/general/plugins/modules/composer.py
+++ /dev/null
@@ -1 +0,0 @@
-packaging/language/composer.py
\ No newline at end of file
diff --git a/ansible_collections/community/general/plugins/modules/consul.py b/ansible_collections/community/general/plugins/modules/consul.py
deleted file mode 120000
index 7f72e9cd..00000000
--- a/ansible_collections/community/general/plugins/modules/consul.py
+++ /dev/null
@@ -1 +0,0 @@
-clustering/consul/consul.py
\ No newline at end of file
diff --git a/ansible_collections/community/general/plugins/modules/consul_acl.py b/ansible_collections/community/general/plugins/modules/consul_acl.py
deleted file mode 120000
index d6afb151..00000000
--- a/ansible_collections/community/general/plugins/modules/consul_acl.py
+++ /dev/null
@@ -1 +0,0 @@
-clustering/consul/consul_acl.py
\ No newline at end of file
diff --git a/ansible_collections/community/general/plugins/modules/consul_kv.py b/ansible_collections/community/general/plugins/modules/consul_kv.py
deleted file mode 120000
index 4f96db68..00000000
--- a/ansible_collections/community/general/plugins/modules/consul_kv.py
+++ /dev/null
@@ -1 +0,0 @@
-clustering/consul/consul_kv.py
\ No newline at end of file
diff --git a/ansible_collections/community/general/plugins/modules/consul_session.py b/ansible_collections/community/general/plugins/modules/consul_session.py
deleted file mode 120000
index e167757a..00000000
--- a/ansible_collections/community/general/plugins/modules/consul_session.py
+++ /dev/null
@@ -1 +0,0 @@
-clustering/consul/consul_session.py
\ No newline at end of file
diff --git a/ansible_collections/community/general/plugins/modules/copr.py b/ansible_collections/community/general/plugins/modules/copr.py
deleted file mode 120000
index 6d4f8e2f..00000000
--- a/ansible_collections/community/general/plugins/modules/copr.py
+++ /dev/null
@@ -1 +0,0 @@
-packaging/os/copr.py
\ No newline at end of file
diff --git a/ansible_collections/community/general/plugins/modules/cpanm.py b/ansible_collections/community/general/plugins/modules/cpanm.py
deleted file mode 120000
index e3dae867..00000000
--- a/ansible_collections/community/general/plugins/modules/cpanm.py
+++ /dev/null
@@ -1 +0,0 @@
-packaging/language/cpanm.py
\ No newline at end of file
diff --git a/ansible_collections/community/general/plugins/modules/cronvar.py b/ansible_collections/community/general/plugins/modules/cronvar.py
deleted file mode 120000
index 148f3ccd..00000000
--- a/ansible_collections/community/general/plugins/modules/cronvar.py
+++ /dev/null
@@ -1 +0,0 @@
-system/cronvar.py
\ No newline at end of file
diff --git a/ansible_collections/community/general/plugins/modules/crypttab.py b/ansible_collections/community/general/plugins/modules/crypttab.py
deleted file mode 120000
index 189173f0..00000000
--- a/ansible_collections/community/general/plugins/modules/crypttab.py
+++ /dev/null
@@ -1 +0,0 @@
-system/crypttab.py
\ No newline at end of file
diff --git a/ansible_collections/community/general/plugins/modules/database/aerospike/aerospike_migrations.py b/ansible_collections/community/general/plugins/modules/database/aerospike/aerospike_migrations.py
deleted file mode 100644
index 27b979ad..00000000
--- a/ansible_collections/community/general/plugins/modules/database/aerospike/aerospike_migrations.py
+++ /dev/null
@@ -1,521 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-"""short_description: Check or wait for migrations between nodes"""
-
-# Copyright: (c) 2018, Albert Autin
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import (absolute_import, division, print_function)
-
-__metaclass__ = type
-
-DOCUMENTATION = '''
----
-module: aerospike_migrations
-short_description: Check or wait for migrations between nodes
-description:
- - This can be used to check for migrations in a cluster.
- This makes it easy to do a rolling upgrade/update on Aerospike nodes.
- - If waiting for migrations is not desired, simply just poll until
- port 3000 if available or asinfo -v status returns ok
-author: "Albert Autin (@Alb0t)"
-options:
- host:
- description:
- - Which host do we use as seed for info connection
- required: False
- type: str
- default: localhost
- port:
- description:
- - Which port to connect to Aerospike on (service port)
- required: False
- type: int
- default: 3000
- connect_timeout:
- description:
- - How long to try to connect before giving up (milliseconds)
- required: False
- type: int
- default: 1000
- consecutive_good_checks:
- description:
- - How many times should the cluster report "no migrations"
- consecutively before returning OK back to ansible?
- required: False
- type: int
- default: 3
- sleep_between_checks:
- description:
- - How long to sleep between each check (seconds).
- required: False
- type: int
- default: 60
- tries_limit:
- description:
- - How many times do we poll before giving up and failing?
- default: 300
- required: False
- type: int
- local_only:
- description:
- - Do you wish to only check for migrations on the local node
- before returning, or do you want all nodes in the cluster
- to finish before returning?
- required: True
- type: bool
- min_cluster_size:
- description:
- - Check will return bad until cluster size is met
- or until tries is exhausted
- required: False
- type: int
- default: 1
- fail_on_cluster_change:
- description:
- - Fail if the cluster key changes
- if something else is changing the cluster, we may want to fail
- required: False
- type: bool
- default: True
- migrate_tx_key:
- description:
- - The metric key used to determine if we have tx migrations
- remaining. Changeable due to backwards compatibility.
- required: False
- type: str
- default: migrate_tx_partitions_remaining
- migrate_rx_key:
- description:
- - The metric key used to determine if we have rx migrations
- remaining. Changeable due to backwards compatibility.
- required: False
- type: str
- default: migrate_rx_partitions_remaining
- target_cluster_size:
- description:
- - When all aerospike builds in the cluster are greater than
- version 4.3, then the C(cluster-stable) info command will be used.
- Inside this command, you can optionally specify what the target
- cluster size is - but it is not necessary. You can still rely on
- min_cluster_size if you don't want to use this option.
- - If this option is specified on a cluster that has at least 1
- host <4.3 then it will be ignored until the min version reaches
- 4.3.
- required: False
- type: int
-'''
-EXAMPLES = '''
-# check for migrations on local node
-- name: Wait for migrations on local node before proceeding
- community.general.aerospike_migrations:
- host: "localhost"
- connect_timeout: 2000
- consecutive_good_checks: 5
- sleep_between_checks: 15
- tries_limit: 600
- local_only: False
-
-# example playbook:
-- name: Upgrade aerospike
- hosts: all
- become: true
- serial: 1
- tasks:
- - name: Install dependencies
- ansible.builtin.apt:
- name:
- - python
- - python-pip
- - python-setuptools
- state: latest
- - name: Setup aerospike
- ansible.builtin.pip:
- name: aerospike
-# check for migrations every (sleep_between_checks)
-# If at least (consecutive_good_checks) checks come back OK in a row, then return OK.
-# Will exit if any exception, which can be caused by bad nodes,
-# nodes not returning data, or other reasons.
-# Maximum runtime before giving up in this case will be:
-# Tries Limit * Sleep Between Checks * delay * retries
- - name: Wait for aerospike migrations
- community.general.aerospike_migrations:
- local_only: True
- sleep_between_checks: 1
- tries_limit: 5
- consecutive_good_checks: 3
- fail_on_cluster_change: true
- min_cluster_size: 3
- target_cluster_size: 4
- register: migrations_check
- until: migrations_check is succeeded
- changed_when: false
- delay: 60
- retries: 120
- - name: Another thing
- ansible.builtin.shell: |
- echo foo
- - name: Reboot
- ansible.builtin.reboot:
-'''
-
-RETURN = '''
-# Returns only a success/failure result. Changed is always false.
-'''
-
-import traceback
-
-from ansible.module_utils.basic import AnsibleModule, missing_required_lib
-
-LIB_FOUND_ERR = None
-try:
- import aerospike
- from time import sleep
- import re
-except ImportError as ie:
- LIB_FOUND = False
- LIB_FOUND_ERR = traceback.format_exc()
-else:
- LIB_FOUND = True
-
-
-def run_module():
- """run ansible module"""
- module_args = dict(
- host=dict(type='str', required=False, default='localhost'),
- port=dict(type='int', required=False, default=3000),
- connect_timeout=dict(type='int', required=False, default=1000),
- consecutive_good_checks=dict(type='int', required=False, default=3),
- sleep_between_checks=dict(type='int', required=False, default=60),
- tries_limit=dict(type='int', required=False, default=300),
- local_only=dict(type='bool', required=True),
- min_cluster_size=dict(type='int', required=False, default=1),
- target_cluster_size=dict(type='int', required=False, default=None),
- fail_on_cluster_change=dict(type='bool', required=False, default=True),
- migrate_tx_key=dict(type='str', required=False, no_log=False,
- default="migrate_tx_partitions_remaining"),
- migrate_rx_key=dict(type='str', required=False, no_log=False,
- default="migrate_rx_partitions_remaining")
- )
-
- result = dict(
- changed=False,
- )
-
- module = AnsibleModule(
- argument_spec=module_args,
- supports_check_mode=True
- )
- if not LIB_FOUND:
- module.fail_json(msg=missing_required_lib('aerospike'),
- exception=LIB_FOUND_ERR)
-
- try:
- if module.check_mode:
- has_migrations, skip_reason = False, None
- else:
- migrations = Migrations(module)
- has_migrations, skip_reason = migrations.has_migs(
- module.params['local_only']
- )
-
- if has_migrations:
- module.fail_json(msg="Failed.", skip_reason=skip_reason)
- except Exception as e:
- module.fail_json(msg="Error: {0}".format(e))
-
- module.exit_json(**result)
-
-
-class Migrations:
- """ Check or wait for migrations between nodes """
-
- def __init__(self, module):
- self.module = module
- self._client = self._create_client().connect()
- self._nodes = {}
- self._update_nodes_list()
- self._cluster_statistics = {}
- self._update_cluster_statistics()
- self._namespaces = set()
- self._update_cluster_namespace_list()
- self._build_list = set()
- self._update_build_list()
- self._start_cluster_key = \
- self._cluster_statistics[self._nodes[0]]['cluster_key']
-
- def _create_client(self):
- """ TODO: add support for auth, tls, and other special features
- I won't use those features, so I'll wait until somebody complains
- or does it for me (Cross fingers)
- create the client object"""
- config = {
- 'hosts': [
- (self.module.params['host'], self.module.params['port'])
- ],
- 'policies': {
- 'timeout': self.module.params['connect_timeout']
- }
- }
- return aerospike.client(config)
-
- def _info_cmd_helper(self, cmd, node=None, delimiter=';'):
- """delimiter is for separate stats that come back, NOT for kv
- separation which is ="""
- if node is None: # If no node passed, use the first one (local)
- node = self._nodes[0]
- data = self._client.info_node(cmd, node)
- data = data.split("\t")
- if len(data) != 1 and len(data) != 2:
- self.module.fail_json(
- msg="Unexpected number of values returned in info command: " +
- str(len(data))
- )
- # data will be in format 'command\touput'
- data = data[-1]
- data = data.rstrip("\n\r")
- data_arr = data.split(delimiter)
-
- # some commands don't return in kv format
- # so we dont want a dict from those.
- if '=' in data:
- retval = dict(
- metric.split("=", 1) for metric in data_arr
- )
- else:
- # if only 1 element found, and not kv, return just the value.
- if len(data_arr) == 1:
- retval = data_arr[0]
- else:
- retval = data_arr
- return retval
-
- def _update_build_list(self):
- """creates self._build_list which is a unique list
- of build versions."""
- self._build_list = set()
- for node in self._nodes:
- build = self._info_cmd_helper('build', node)
- self._build_list.add(build)
-
- # just checks to see if the version is 4.3 or greater
- def _can_use_cluster_stable(self):
- # if version <4.3 we can't use cluster-stable info cmd
- # regex hack to check for versions beginning with 0-3 or
- # beginning with 4.0,4.1,4.2
- if re.search(R'^([0-3]\.|4\.[0-2])', min(self._build_list)):
- return False
- return True
-
- def _update_cluster_namespace_list(self):
- """ make a unique list of namespaces
- TODO: does this work on a rolling namespace add/deletion?
- thankfully if it doesn't, we dont need this on builds >=4.3"""
- self._namespaces = set()
- for node in self._nodes:
- namespaces = self._info_cmd_helper('namespaces', node)
- for namespace in namespaces:
- self._namespaces.add(namespace)
-
- def _update_cluster_statistics(self):
- """create a dict of nodes with their related stats """
- self._cluster_statistics = {}
- for node in self._nodes:
- self._cluster_statistics[node] = \
- self._info_cmd_helper('statistics', node)
-
- def _update_nodes_list(self):
- """get a fresh list of all the nodes"""
- self._nodes = self._client.get_nodes()
- if not self._nodes:
- self.module.fail_json("Failed to retrieve at least 1 node.")
-
- def _namespace_has_migs(self, namespace, node=None):
- """returns a True or False.
- Does the namespace have migrations for the node passed?
- If no node passed, uses the local node or the first one in the list"""
- namespace_stats = self._info_cmd_helper("namespace/" + namespace, node)
- try:
- namespace_tx = \
- int(namespace_stats[self.module.params['migrate_tx_key']])
- namespace_rx = \
- int(namespace_stats[self.module.params['migrate_rx_key']])
- except KeyError:
- self.module.fail_json(
- msg="Did not find partition remaining key:" +
- self.module.params['migrate_tx_key'] +
- " or key:" +
- self.module.params['migrate_rx_key'] +
- " in 'namespace/" +
- namespace +
- "' output."
- )
- except TypeError:
- self.module.fail_json(
- msg="namespace stat returned was not numerical"
- )
- return namespace_tx != 0 or namespace_rx != 0
-
- def _node_has_migs(self, node=None):
- """just calls namespace_has_migs and
- if any namespace has migs returns true"""
- migs = 0
- self._update_cluster_namespace_list()
- for namespace in self._namespaces:
- if self._namespace_has_migs(namespace, node):
- migs += 1
- return migs != 0
-
- def _cluster_key_consistent(self):
- """create a dictionary to store what each node
- returns the cluster key as. we should end up with only 1 dict key,
- with the key being the cluster key."""
- cluster_keys = {}
- for node in self._nodes:
- cluster_key = self._cluster_statistics[node][
- 'cluster_key']
- if cluster_key not in cluster_keys:
- cluster_keys[cluster_key] = 1
- else:
- cluster_keys[cluster_key] += 1
- if len(cluster_keys.keys()) == 1 and \
- self._start_cluster_key in cluster_keys:
- return True
- return False
-
- def _cluster_migrates_allowed(self):
- """ensure all nodes have 'migrate_allowed' in their stats output"""
- for node in self._nodes:
- node_stats = self._info_cmd_helper('statistics', node)
- allowed = node_stats['migrate_allowed']
- if allowed == "false":
- return False
- return True
-
- def _cluster_has_migs(self):
- """calls node_has_migs for each node"""
- migs = 0
- for node in self._nodes:
- if self._node_has_migs(node):
- migs += 1
- if migs == 0:
- return False
- return True
-
- def _has_migs(self, local):
- if local:
- return self._local_node_has_migs()
- return self._cluster_has_migs()
-
- def _local_node_has_migs(self):
- return self._node_has_migs(None)
-
- def _is_min_cluster_size(self):
- """checks that all nodes in the cluster are returning the
- minimum cluster size specified in their statistics output"""
- sizes = set()
- for node in self._cluster_statistics:
- sizes.add(int(self._cluster_statistics[node]['cluster_size']))
-
- if (len(sizes)) > 1: # if we are getting more than 1 size, lets say no
- return False
- if (min(sizes)) >= self.module.params['min_cluster_size']:
- return True
- return False
-
- def _cluster_stable(self):
- """Added 4.3:
- cluster-stable:size=;ignore-migrations=;namespace=
- Returns the current 'cluster_key' when the following are satisfied:
-
- If 'size' is specified then the target node's 'cluster-size'
- must match size.
- If 'ignore-migrations' is either unspecified or 'false' then
- the target node's migrations counts must be zero for the provided
- 'namespace' or all namespaces if 'namespace' is not provided."""
- cluster_key = set()
- cluster_key.add(self._info_cmd_helper('statistics')['cluster_key'])
- cmd = "cluster-stable:"
- target_cluster_size = self.module.params['target_cluster_size']
- if target_cluster_size is not None:
- cmd = cmd + "size=" + str(target_cluster_size) + ";"
- for node in self._nodes:
- try:
- cluster_key.add(self._info_cmd_helper(cmd, node))
- except aerospike.exception.ServerError as e: # unstable-cluster is returned in form of Exception
- if 'unstable-cluster' in e.msg:
- return False
- raise e
- if len(cluster_key) == 1:
- return True
- return False
-
- def _cluster_good_state(self):
- """checks a few things to make sure we're OK to say the cluster
- has no migs. It could be in a unhealthy condition that does not allow
- migs, or a split brain"""
- if self._cluster_key_consistent() is not True:
- return False, "Cluster key inconsistent."
- if self._is_min_cluster_size() is not True:
- return False, "Cluster min size not reached."
- if self._cluster_migrates_allowed() is not True:
- return False, "migrate_allowed is false somewhere."
- return True, "OK."
-
- def has_migs(self, local=True):
- """returns a boolean, False if no migrations otherwise True"""
- consecutive_good = 0
- try_num = 0
- skip_reason = list()
- while \
- try_num < int(self.module.params['tries_limit']) and \
- consecutive_good < \
- int(self.module.params['consecutive_good_checks']):
-
- self._update_nodes_list()
- self._update_cluster_statistics()
-
- # These checks are outside of the while loop because
- # we probably want to skip & sleep instead of failing entirely
- stable, reason = self._cluster_good_state()
- if stable is not True:
- skip_reason.append(
- "Skipping on try#" + str(try_num) +
- " for reason:" + reason
- )
- else:
- if self._can_use_cluster_stable():
- if self._cluster_stable():
- consecutive_good += 1
- else:
- consecutive_good = 0
- skip_reason.append(
- "Skipping on try#" + str(try_num) +
- " for reason:" + " cluster_stable"
- )
- elif self._has_migs(local):
- # print("_has_migs")
- skip_reason.append(
- "Skipping on try#" + str(try_num) +
- " for reason:" + " migrations"
- )
- consecutive_good = 0
- else:
- consecutive_good += 1
- if consecutive_good == self.module.params[
- 'consecutive_good_checks']:
- break
- try_num += 1
- sleep(self.module.params['sleep_between_checks'])
- # print(skip_reason)
- if consecutive_good == self.module.params['consecutive_good_checks']:
- return False, None
- return True, skip_reason
-
-
-def main():
- """main method for ansible module"""
- run_module()
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/database/influxdb/influxdb_database.py b/ansible_collections/community/general/plugins/modules/database/influxdb/influxdb_database.py
deleted file mode 100644
index 6601b301..00000000
--- a/ansible_collections/community/general/plugins/modules/database/influxdb/influxdb_database.py
+++ /dev/null
@@ -1,142 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2016, Kamil Szczygiel
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = r'''
----
-module: influxdb_database
-short_description: Manage InfluxDB databases
-description:
- - Manage InfluxDB databases.
-author: "Kamil Szczygiel (@kamsz)"
-requirements:
- - "python >= 2.6"
- - "influxdb >= 0.9"
- - requests
-options:
- database_name:
- description:
- - Name of the database.
- required: true
- type: str
- state:
- description:
- - Determines if the database should be created or destroyed.
- choices: [ absent, present ]
- default: present
- type: str
-extends_documentation_fragment:
-- community.general.influxdb
-
-'''
-
-EXAMPLES = r'''
-# Example influxdb_database command from Ansible Playbooks
-- name: Create database
- community.general.influxdb_database:
- hostname: "{{influxdb_ip_address}}"
- database_name: "{{influxdb_database_name}}"
-
-- name: Destroy database
- community.general.influxdb_database:
- hostname: "{{influxdb_ip_address}}"
- database_name: "{{influxdb_database_name}}"
- state: absent
-
-- name: Create database using custom credentials
- community.general.influxdb_database:
- hostname: "{{influxdb_ip_address}}"
- username: "{{influxdb_username}}"
- password: "{{influxdb_password}}"
- database_name: "{{influxdb_database_name}}"
- ssl: yes
- validate_certs: yes
-'''
-
-RETURN = r'''
-# only defaults
-'''
-
-try:
- import requests.exceptions
- from influxdb import exceptions
-except ImportError:
- pass
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.influxdb import InfluxDb
-
-
-def find_database(module, client, database_name):
- database = None
-
- try:
- databases = client.get_list_database()
- for db in databases:
- if db['name'] == database_name:
- database = db
- break
- except requests.exceptions.ConnectionError as e:
- module.fail_json(msg=str(e))
- return database
-
-
-def create_database(module, client, database_name):
- if not module.check_mode:
- try:
- client.create_database(database_name)
- except requests.exceptions.ConnectionError as e:
- module.fail_json(msg=str(e))
-
- module.exit_json(changed=True)
-
-
-def drop_database(module, client, database_name):
- if not module.check_mode:
- try:
- client.drop_database(database_name)
- except exceptions.InfluxDBClientError as e:
- module.fail_json(msg=e.content)
-
- module.exit_json(changed=True)
-
-
-def main():
- argument_spec = InfluxDb.influxdb_argument_spec()
- argument_spec.update(
- database_name=dict(required=True, type='str'),
- state=dict(default='present', type='str', choices=['present', 'absent'])
- )
- module = AnsibleModule(
- argument_spec=argument_spec,
- supports_check_mode=True
- )
-
- state = module.params['state']
-
- influxdb = InfluxDb(module)
- client = influxdb.connect_to_influxdb()
- database_name = influxdb.database_name
- database = find_database(module, client, database_name)
-
- if state == 'present':
- if database:
- module.exit_json(changed=False)
- else:
- create_database(module, client, database_name)
-
- if state == 'absent':
- if database:
- drop_database(module, client, database_name)
- else:
- module.exit_json(changed=False)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/database/influxdb/influxdb_query.py b/ansible_collections/community/general/plugins/modules/database/influxdb/influxdb_query.py
deleted file mode 100644
index bff6fa98..00000000
--- a/ansible_collections/community/general/plugins/modules/database/influxdb/influxdb_query.py
+++ /dev/null
@@ -1,101 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# Copyright (c) 2017, René Moser
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-DOCUMENTATION = r'''
----
-module: influxdb_query
-short_description: Query data points from InfluxDB
-description:
- - Query data points from InfluxDB.
-author: "René Moser (@resmo)"
-requirements:
- - "python >= 2.6"
- - "influxdb >= 0.9"
-options:
- query:
- description:
- - Query to be executed.
- required: true
- type: str
- database_name:
- description:
- - Name of the database.
- required: true
- type: str
-extends_documentation_fragment:
-- community.general.influxdb
-
-'''
-
-EXAMPLES = r'''
-- name: Query connections
- community.general.influxdb_query:
- hostname: "{{ influxdb_ip_address }}"
- database_name: "{{ influxdb_database_name }}"
- query: "select mean(value) from connections"
- register: connection
-
-- name: Query connections with tags filters
- community.general.influxdb_query:
- hostname: "{{ influxdb_ip_address }}"
- database_name: "{{ influxdb_database_name }}"
- query: "select mean(value) from connections where region='zue01' and host='server01'"
- register: connection
-
-- name: Print results from the query
- ansible.builtin.debug:
- var: connection.query_results
-'''
-
-RETURN = r'''
-query_results:
- description: Result from the query
- returned: success
- type: list
- sample:
- - mean: 1245.5333333333333
- time: "1970-01-01T00:00:00Z"
-'''
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.common.text.converters import to_native
-from ansible_collections.community.general.plugins.module_utils.influxdb import InfluxDb
-
-
-class AnsibleInfluxDBRead(InfluxDb):
-
- def read_by_query(self, query):
- client = self.connect_to_influxdb()
- try:
- rs = client.query(query)
- if rs:
- return list(rs.get_points())
- except Exception as e:
- self.module.fail_json(msg=to_native(e))
-
-
-def main():
- argument_spec = InfluxDb.influxdb_argument_spec()
- argument_spec.update(
- query=dict(type='str', required=True),
- database_name=dict(required=True, type='str'),
- )
- module = AnsibleModule(
- argument_spec=argument_spec,
- supports_check_mode=True
- )
-
- influx = AnsibleInfluxDBRead(module)
- query = module.params.get('query')
- results = influx.read_by_query(query)
- module.exit_json(changed=True, query_results=results)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/database/influxdb/influxdb_retention_policy.py b/ansible_collections/community/general/plugins/modules/database/influxdb/influxdb_retention_policy.py
deleted file mode 100644
index 6cb45229..00000000
--- a/ansible_collections/community/general/plugins/modules/database/influxdb/influxdb_retention_policy.py
+++ /dev/null
@@ -1,343 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2016, Kamil Szczygiel
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-
-__metaclass__ = type
-
-DOCUMENTATION = r'''
----
-module: influxdb_retention_policy
-short_description: Manage InfluxDB retention policies
-description:
- - Manage InfluxDB retention policies.
-author: "Kamil Szczygiel (@kamsz)"
-requirements:
- - "python >= 2.6"
- - "influxdb >= 0.9"
- - requests
-options:
- database_name:
- description:
- - Name of the database.
- required: true
- type: str
- policy_name:
- description:
- - Name of the retention policy.
- required: true
- type: str
- state:
- description:
- - State of the retention policy.
- choices: [ absent, present ]
- default: present
- type: str
- version_added: 3.1.0
- duration:
- description:
- - Determines how long InfluxDB should keep the data. If specified, it
- should be C(INF) or at least one hour. If not specified, C(INF) is
- assumed. Supports complex duration expressions with multiple units.
- - Required only if I(state) is set to C(present).
- type: str
- replication:
- description:
- - Determines how many independent copies of each point are stored in the cluster.
- - Required only if I(state) is set to C(present).
- type: int
- default:
- description:
- - Sets the retention policy as default retention policy.
- type: bool
- default: false
- shard_group_duration:
- description:
- - Determines the time range covered by a shard group. If specified it
- must be at least one hour. If none, it's determined by InfluxDB by
- the rentention policy's duration. Supports complex duration expressions
- with multiple units.
- type: str
- version_added: '2.0.0'
-extends_documentation_fragment:
-- community.general.influxdb
-
-'''
-
-EXAMPLES = r'''
-# Example influxdb_retention_policy command from Ansible Playbooks
-- name: Create 1 hour retention policy
- community.general.influxdb_retention_policy:
- hostname: "{{ influxdb_ip_address }}"
- database_name: "{{ influxdb_database_name }}"
- policy_name: test
- duration: 1h
- replication: 1
- ssl: yes
- validate_certs: yes
- state: present
-
-- name: Create 1 day retention policy with 1 hour shard group duration
- community.general.influxdb_retention_policy:
- hostname: "{{ influxdb_ip_address }}"
- database_name: "{{ influxdb_database_name }}"
- policy_name: test
- duration: 1d
- replication: 1
- shard_group_duration: 1h
- state: present
-
-- name: Create 1 week retention policy with 1 day shard group duration
- community.general.influxdb_retention_policy:
- hostname: "{{ influxdb_ip_address }}"
- database_name: "{{ influxdb_database_name }}"
- policy_name: test
- duration: 1w
- replication: 1
- shard_group_duration: 1d
- state: present
-
-- name: Create infinite retention policy with 1 week of shard group duration
- community.general.influxdb_retention_policy:
- hostname: "{{ influxdb_ip_address }}"
- database_name: "{{ influxdb_database_name }}"
- policy_name: test
- duration: INF
- replication: 1
- ssl: no
- validate_certs: no
- shard_group_duration: 1w
- state: present
-
-- name: Create retention policy with complex durations
- community.general.influxdb_retention_policy:
- hostname: "{{ influxdb_ip_address }}"
- database_name: "{{ influxdb_database_name }}"
- policy_name: test
- duration: 5d1h30m
- replication: 1
- ssl: no
- validate_certs: no
- shard_group_duration: 1d10h30m
- state: present
-
-- name: Drop retention policy
- community.general.influxdb_retention_policy:
- hostname: "{{ influxdb_ip_address }}"
- database_name: "{{ influxdb_database_name }}"
- policy_name: test
- state: absent
-'''
-
-RETURN = r'''
-# only defaults
-'''
-
-import re
-
-try:
- import requests.exceptions
- from influxdb import exceptions
-except ImportError:
- pass
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.community.general.plugins.module_utils.influxdb import InfluxDb
-from ansible.module_utils.common.text.converters import to_native
-
-
-VALID_DURATION_REGEX = re.compile(r'^(INF|(\d+(ns|u|µ|ms|s|m|h|d|w)))+$')
-
-DURATION_REGEX = re.compile(r'(\d+)(ns|u|µ|ms|s|m|h|d|w)')
-EXTENDED_DURATION_REGEX = re.compile(r'(?:(\d+)(ns|u|µ|ms|m|h|d|w)|(\d+(?:\.\d+)?)(s))')
-
-DURATION_UNIT_NANOSECS = {
- 'ns': 1,
- 'u': 1000,
- 'µ': 1000,
- 'ms': 1000 * 1000,
- 's': 1000 * 1000 * 1000,
- 'm': 1000 * 1000 * 1000 * 60,
- 'h': 1000 * 1000 * 1000 * 60 * 60,
- 'd': 1000 * 1000 * 1000 * 60 * 60 * 24,
- 'w': 1000 * 1000 * 1000 * 60 * 60 * 24 * 7,
-}
-
-MINIMUM_VALID_DURATION = 1 * DURATION_UNIT_NANOSECS['h']
-MINIMUM_VALID_SHARD_GROUP_DURATION = 1 * DURATION_UNIT_NANOSECS['h']
-
-
-def check_duration_literal(value):
- return VALID_DURATION_REGEX.search(value) is not None
-
-
-def parse_duration_literal(value, extended=False):
- duration = 0.0
-
- if value == "INF":
- return duration
-
- lookup = (EXTENDED_DURATION_REGEX if extended else DURATION_REGEX).findall(value)
-
- for duration_literal in lookup:
- filtered_literal = list(filter(None, duration_literal))
- duration_val = float(filtered_literal[0])
- duration += duration_val * DURATION_UNIT_NANOSECS[filtered_literal[1]]
-
- return duration
-
-
-def find_retention_policy(module, client):
- database_name = module.params['database_name']
- policy_name = module.params['policy_name']
- hostname = module.params['hostname']
- retention_policy = None
-
- try:
- retention_policies = client.get_list_retention_policies(database=database_name)
- for policy in retention_policies:
- if policy['name'] == policy_name:
- retention_policy = policy
- break
- except requests.exceptions.ConnectionError as e:
- module.fail_json(msg="Cannot connect to database %s on %s : %s" % (database_name, hostname, to_native(e)))
-
- if retention_policy is not None:
- retention_policy["duration"] = parse_duration_literal(retention_policy["duration"], extended=True)
- retention_policy["shardGroupDuration"] = parse_duration_literal(retention_policy["shardGroupDuration"], extended=True)
-
- return retention_policy
-
-
-def create_retention_policy(module, client):
- database_name = module.params['database_name']
- policy_name = module.params['policy_name']
- duration = module.params['duration']
- replication = module.params['replication']
- default = module.params['default']
- shard_group_duration = module.params['shard_group_duration']
-
- if not check_duration_literal(duration):
- module.fail_json(msg="Failed to parse value of duration")
-
- influxdb_duration_format = parse_duration_literal(duration)
- if influxdb_duration_format != 0 and influxdb_duration_format < MINIMUM_VALID_DURATION:
- module.fail_json(msg="duration value must be at least 1h")
-
- if shard_group_duration is not None:
- if not check_duration_literal(shard_group_duration):
- module.fail_json(msg="Failed to parse value of shard_group_duration")
-
- influxdb_shard_group_duration_format = parse_duration_literal(shard_group_duration)
- if influxdb_shard_group_duration_format < MINIMUM_VALID_SHARD_GROUP_DURATION:
- module.fail_json(msg="shard_group_duration value must be finite and at least 1h")
-
- if not module.check_mode:
- try:
- if shard_group_duration:
- client.create_retention_policy(policy_name, duration, replication, database_name, default,
- shard_group_duration)
- else:
- client.create_retention_policy(policy_name, duration, replication, database_name, default)
- except exceptions.InfluxDBClientError as e:
- module.fail_json(msg=e.content)
- module.exit_json(changed=True)
-
-
-def alter_retention_policy(module, client, retention_policy):
- database_name = module.params['database_name']
- policy_name = module.params['policy_name']
- duration = module.params['duration']
- replication = module.params['replication']
- default = module.params['default']
- shard_group_duration = module.params['shard_group_duration']
-
- changed = False
-
- if not check_duration_literal(duration):
- module.fail_json(msg="Failed to parse value of duration")
-
- influxdb_duration_format = parse_duration_literal(duration)
- if influxdb_duration_format != 0 and influxdb_duration_format < MINIMUM_VALID_DURATION:
- module.fail_json(msg="duration value must be at least 1h")
-
- if shard_group_duration is None:
- influxdb_shard_group_duration_format = retention_policy["shardGroupDuration"]
- else:
- if not check_duration_literal(shard_group_duration):
- module.fail_json(msg="Failed to parse value of shard_group_duration")
-
- influxdb_shard_group_duration_format = parse_duration_literal(shard_group_duration)
- if influxdb_shard_group_duration_format < MINIMUM_VALID_SHARD_GROUP_DURATION:
- module.fail_json(msg="shard_group_duration value must be finite and at least 1h")
-
- if (retention_policy['duration'] != influxdb_duration_format or
- retention_policy['shardGroupDuration'] != influxdb_shard_group_duration_format or
- retention_policy['replicaN'] != int(replication) or
- retention_policy['default'] != default):
- if not module.check_mode:
- try:
- client.alter_retention_policy(policy_name, database_name, duration, replication, default,
- shard_group_duration)
- except exceptions.InfluxDBClientError as e:
- module.fail_json(msg=e.content)
- changed = True
- module.exit_json(changed=changed)
-
-
-def drop_retention_policy(module, client):
- database_name = module.params['database_name']
- policy_name = module.params['policy_name']
-
- if not module.check_mode:
- try:
- client.drop_retention_policy(policy_name, database_name)
- except exceptions.InfluxDBClientError as e:
- module.fail_json(msg=e.content)
- module.exit_json(changed=True)
-
-
-def main():
- argument_spec = InfluxDb.influxdb_argument_spec()
- argument_spec.update(
- state=dict(default='present', type='str', choices=['present', 'absent']),
- database_name=dict(required=True, type='str'),
- policy_name=dict(required=True, type='str'),
- duration=dict(type='str'),
- replication=dict(type='int'),
- default=dict(default=False, type='bool'),
- shard_group_duration=dict(type='str'),
- )
- module = AnsibleModule(
- argument_spec=argument_spec,
- supports_check_mode=True,
- required_if=(
- ('state', 'present', ['duration', 'replication']),
- ),
- )
-
- state = module.params['state']
-
- influxdb = InfluxDb(module)
- client = influxdb.connect_to_influxdb()
-
- retention_policy = find_retention_policy(module, client)
-
- if state == 'present':
- if retention_policy:
- alter_retention_policy(module, client, retention_policy)
- else:
- create_retention_policy(module, client)
-
- if state == 'absent':
- if retention_policy:
- drop_retention_policy(module, client)
- else:
- module.exit_json(changed=False)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/community/general/plugins/modules/database/influxdb/influxdb_user.py b/ansible_collections/community/general/plugins/modules/database/influxdb/influxdb_user.py
deleted file mode 100644
index 76524d86..00000000
--- a/ansible_collections/community/general/plugins/modules/database/influxdb/influxdb_user.py
+++ /dev/null
@@ -1,285 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2017, Vitaliy Zhhuta
-# insipred by Kamil Szczygiel