Browse Source

Merge remote-tracking branch 'origin/master' into constellix-improvements

pull/616/head
apatserkovskyi 4 years ago
parent
commit
c55264e657
143 changed files with 12948 additions and 784 deletions
  1. +6
    -6
      .git_hooks_pre-commit
  2. +3
    -0
      .github/FUNDING.yml
  3. +2
    -1
      .github/workflows/main.yml
  4. +2
    -1
      .gitignore
  5. +104
    -6
      CHANGELOG.md
  6. +1
    -1
      CONTRIBUTING.md
  7. +8
    -4
      MANIFEST.in
  8. +87
    -19
      README.md
  9. +1
    -1
      docs/geo_records.md
  10. +56
    -1
      docs/records.md
  11. +1
    -1
      octodns/__init__.py
  12. +1
    -2
      octodns/cmds/report.py
  13. +180
    -28
      octodns/manager.py
  14. +6
    -0
      octodns/processor/__init__.py
  15. +61
    -0
      octodns/processor/acme.py
  16. +69
    -0
      octodns/processor/base.py
  17. +42
    -0
      octodns/processor/filter.py
  18. +100
    -0
      octodns/processor/ownership.py
  19. +8
    -0
      octodns/provider/__init__.py
  20. +946
    -92
      octodns/provider/azuredns.py
  21. +60
    -3
      octodns/provider/base.py
  22. +246
    -43
      octodns/provider/cloudflare.py
  23. +2
    -7
      octodns/provider/constellix.py
  24. +7
    -2
      octodns/provider/digitalocean.py
  25. +43
    -6
      octodns/provider/dnsimple.py
  26. +26
    -1
      octodns/provider/dnsmadeeasy.py
  27. +6
    -6
      octodns/provider/dyn.py
  28. +3
    -2
      octodns/provider/easydns.py
  29. +2
    -1
      octodns/provider/edgedns.py
  30. +379
    -0
      octodns/provider/gandi.py
  31. +624
    -0
      octodns/provider/gcore.py
  32. +340
    -0
      octodns/provider/hetzner.py
  33. +6
    -5
      octodns/provider/mythicbeasts.py
  34. +289
    -96
      octodns/provider/ns1.py
  35. +7
    -3
      octodns/provider/ovh.py
  36. +1
    -1
      octodns/provider/plan.py
  37. +61
    -3
      octodns/provider/powerdns.py
  38. +51
    -7
      octodns/provider/route53.py
  39. +2
    -1
      octodns/provider/selectel.py
  40. +4
    -3
      octodns/provider/transip.py
  41. +34
    -13
      octodns/provider/ultra.py
  42. +8
    -4
      octodns/provider/yaml.py
  43. +372
    -20
      octodns/record/__init__.py
  44. +57
    -7
      octodns/source/axfr.py
  45. +2
    -0
      octodns/source/base.py
  46. +53
    -2
      octodns/zone.py
  47. +1
    -1
      requirements-dev.txt
  48. +6
    -3
      requirements.txt
  49. +5
    -1
      script/coverage
  50. +4
    -0
      script/test
  51. +2
    -1
      setup.py
  52. +21
    -0
      tests/config/alias-zone-loop.yaml
  53. +23
    -0
      tests/config/dynamic.tests.yaml
  54. +6
    -0
      tests/config/plan-output-filehandle.yaml
  55. +23
    -0
      tests/config/processors-missing-class.yaml
  56. +25
    -0
      tests/config/processors-wants-config.yaml
  57. +33
    -0
      tests/config/processors.yaml
  58. +19
    -0
      tests/config/simple-alias-zone.yaml
  59. +3
    -0
      tests/config/simple-split.yaml
  60. +0
    -0
      tests/config/split/dynamic.tests.tst/a.yaml
  61. +0
    -0
      tests/config/split/dynamic.tests.tst/aaaa.yaml
  62. +0
    -0
      tests/config/split/dynamic.tests.tst/cname.yaml
  63. +0
    -0
      tests/config/split/dynamic.tests.tst/real-ish-a.yaml
  64. +0
    -0
      tests/config/split/dynamic.tests.tst/simple-weighted.yaml
  65. +0
    -0
      tests/config/split/empty.tst/.gitkeep
  66. +0
    -0
      tests/config/split/subzone.unit.tests.tst/12.yaml
  67. +0
    -0
      tests/config/split/subzone.unit.tests.tst/2.yaml
  68. +0
    -0
      tests/config/split/subzone.unit.tests.tst/test.yaml
  69. +0
    -0
      tests/config/split/unit.tests.tst/$unit.tests.yaml
  70. +0
    -0
      tests/config/split/unit.tests.tst/_srv._tcp.yaml
  71. +0
    -0
      tests/config/split/unit.tests.tst/aaaa.yaml
  72. +0
    -0
      tests/config/split/unit.tests.tst/cname.yaml
  73. +5
    -0
      tests/config/split/unit.tests.tst/dname.yaml
  74. +0
    -0
      tests/config/split/unit.tests.tst/excluded.yaml
  75. +0
    -0
      tests/config/split/unit.tests.tst/ignored.yaml
  76. +0
    -0
      tests/config/split/unit.tests.tst/included.yaml
  77. +0
    -0
      tests/config/split/unit.tests.tst/mx.yaml
  78. +0
    -0
      tests/config/split/unit.tests.tst/naptr.yaml
  79. +1
    -1
      tests/config/split/unit.tests.tst/ptr.yaml
  80. +0
    -0
      tests/config/split/unit.tests.tst/spf.yaml
  81. +0
    -0
      tests/config/split/unit.tests.tst/sub.yaml
  82. +0
    -0
      tests/config/split/unit.tests.tst/txt.yaml
  83. +15
    -0
      tests/config/split/unit.tests.tst/urlfwd.yaml
  84. +0
    -0
      tests/config/split/unit.tests.tst/www.sub.yaml
  85. +0
    -0
      tests/config/split/unit.tests.tst/www.yaml
  86. +0
    -0
      tests/config/split/unordered.tst/abc.yaml
  87. +0
    -0
      tests/config/split/unordered.tst/xyz.yaml
  88. +63
    -1
      tests/config/unit.tests.yaml
  89. +17
    -0
      tests/config/unknown-processor.yaml
  90. +18
    -0
      tests/config/unknown-source-zone.yaml
  91. +16
    -16
      tests/fixtures/cloudflare-dns_records-page-2.json
  92. +128
    -0
      tests/fixtures/cloudflare-dns_records-page-3.json
  93. +103
    -0
      tests/fixtures/cloudflare-pagerules.json
  94. +56
    -37
      tests/fixtures/constellix-records.json
  95. +22
    -0
      tests/fixtures/digitalocean-page-2.json
  96. +0
    -14
      tests/fixtures/dnsmadeeasy-records.json
  97. +24
    -2
      tests/fixtures/easydns-records.json
  98. +18
    -2
      tests/fixtures/edgedns-records.json
  99. +154
    -0
      tests/fixtures/gandi-no-changes.json
  100. +111
    -0
      tests/fixtures/gandi-records.json

+ 6
- 6
.git_hooks_pre-commit View File

@ -2,10 +2,10 @@
set -e
HOOKS=`dirname $0`
GIT=`dirname $HOOKS`
ROOT=`dirname $GIT`
HOOKS=$(dirname "$0")
GIT=$(dirname "$HOOKS")
ROOT=$(dirname "$GIT")
. $ROOT/env/bin/activate
$ROOT/script/lint
$ROOT/script/coverage
. "$ROOT/env/bin/activate"
"$ROOT/script/lint"
"$ROOT/script/coverage"

+ 3
- 0
.github/FUNDING.yml View File

@ -0,0 +1,3 @@
# These are supported funding model platforms
github: ross

+ 2
- 1
.github/workflows/main.yml View File

@ -6,7 +6,8 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: [2.7, 3.7]
# Tested versions based on dates in https://devguide.python.org/devcycle/#end-of-life-branches,
python-version: [3.6, 3.7, 3.8, 3.9]
steps:
- uses: actions/checkout@master
- name: Setup python


+ 2
- 1
.gitignore View File

@ -5,8 +5,8 @@
*.pyc
.coverage
.env
/config/
/build/
/config/
coverage.xml
dist/
env/
@ -14,4 +14,5 @@ htmlcov/
nosetests.xml
octodns.egg-info/
output/
tests/zones/unit.tests.
tmp/

+ 104
- 6
CHANGELOG.md View File

@ -1,6 +1,104 @@
## v0.9.11 - 2020-??-?? - ???????????????
* Added support for TCP health checking to dynamic records
## v0.9.14 - 2021-??-?? - A new supports system
#### Noteworthy changes
* Provider `strict_supports` param added, currently defaults to `false`, along
with Provider._process_desired_zone this forms the foundations of a new
"supports" system where providers will warn or error (depending on the value
of `strict_supports`) during planning about their inability to do what
they're being asked. When `false` they will warn and "adjust" the desired
records. When true they will abort with an error indicating the problem. Over
time it is expected that all "supports" checking/handling will move into this
paradigm and `strict_supports` will likely be changed to default to `true`.
* Zone shallow copy support, reworking of Processors (alpha) semantics
* NS1 NA target now includes `SX` and `UM`. If `NA` continent is in use in
dynamic records care must be taken to upgrade/downgrade to v0.9.13.
* Ns1Provider now supports a new parameter, shared_notifylist, which results in
all dynamic record monitors using a shared notify list named 'octoDNS NS1
Notify List'. Only newly created record values will use the shared notify
list. It should be safe to enable this functionality, but existing records
will not be converted. Note: Once this option is enabled downgrades to
previous versions of octoDNS are discouraged and may result in undefined
behavior and broken records. See https://github.com/octodns/octodns/pull/749
for related discussion.
## v0.9.13 - 2021-07-18 - Processors Alpha
#### Noteworthy changes
* Alpha support for Processors has been added. Processors allow for hooking
into the source, target, and planing process to make nearly arbitrary changes
to data. See the [octodns/processor/](/octodns/processor) directory for
examples. The change has been designed to have no impact on the process
unless the `processors` key is present in zone configs.
* Fixes NS1 provider's geotarget limitation of using `NA` continent. Now, when
`NA` is used in geos it considers **all** the countries of `North America`
insted of just `us-east`, `us-west` and `us-central` regions
* `SX' & 'UM` country support added to NS1Provider, not yet in the North
America list for backwards compatibility reasons. They will be added in the
next releaser.
#### Stuff
* Lots of progress on the partial/beta support for dynamic records in Azure,
still not production ready.
* NS1 fix for when a pool only exists as a fallback
* Zone level lenient flag
* Validate weight makes sense for pools with a single record
* UltraDNS support for aliases and general fixes/improvements
* Misc doc fixes and improvements
## v0.9.12 - 2021-04-30 - Enough time has passed
#### Noteworthy changes
* Formal Python 2.7 support removed, deps and tooling were becoming
unmaintainable
* octodns/octodns move, from github/octodns, more to come
#### Stuff
* ZoneFileSource supports specifying an extension & no files end in . to better
support Windows
* LOC record type support added
* Support for pre-release versions of PowerDNS
* PowerDNS delete before create which allows A <-> CNAME etc.
* Improved validation of fqdn's in ALIAS, CNAME, etc.
* Transip support for NS records
* Support for sending plan output to a file
* DNSimple uses zone api rather than domain to support non-registered stuff,
e.g. reverse zones.
* Support for fallback-only dynamic pools and related fixes to NS1 provider
* Initial Hetzner provider
## v0.9.11 - 2020-11-05 - We still don't know edition
#### Noteworthy changes
* ALIAS records only allowed at the root of zones - see `leient` in record docs
for work-arounds if you really need them.
#### New Providers
* Gandi LiveDNS
* UltraDNS
* easyDNS
#### Stuff
* Add support for zones aliases
* octodns-compare: Prefix filtering and status code on on mismatch
* Implement octodns-sync --source
* Adding environment variable record injection
* Add support for wildcard SRV records, as shown in RFC 2782
* Add healthcheck option 'request_interval' for Route53 provider
* NS1 georegion, country, and catchall need to be separate groups
* Add the ability to mark a zone as lenient
* Add support for geo-targeting of CA provinces
* Update geo_data to pick up a couple renames
* Cloudflare: Add PTR Support, update rate-limit handling and pagination
* Support PowerDNS 4.3.x
* Added support for TCP health checking of dynamic records
## v0.9.10 - 2020-04-20 - Dynamic NS1 and lots of misc
@ -30,7 +128,7 @@
* Explicit ordering of changes by (name, type) to address inconsistent
ordering for a number of providers that just convert changes into API
calls as they come. Python 2 sets ordered consistently, Python 3 they do
not. https://github.com/github/octodns/pull/384/commits/7958233fccf9ea22d95e2fd06c48d7d0a4529e26
not. https://github.com/octodns/octodns/pull/384/commits/7958233fccf9ea22d95e2fd06c48d7d0a4529e26
* Route53 `_mod_keyer` ordering wasn't 100% complete and thus unreliable and
random in Python 3. This has been addressed and may result in value
reordering on next plan, no actual changes in behavior should occur.
@ -127,10 +225,10 @@ recreating all health checks. This process has been tested pretty thoroughly to
try and ensure a seemless upgrade without any traffic shifting around. It's
probably best to take extra care when updating and to try and make sure that
all health checks are passing before the first sync with `--doit`. See
[#67](https://github.com/github/octodns/pull/67) for more information.
[#67](https://github.com/octodns/octodns/pull/67) for more information.
* Major update to geo healthchecks to allow configuring host (header), path,
protocol, and port [#67](https://github.com/github/octodns/pull/67)
protocol, and port [#67](https://github.com/octodns/octodns/pull/67)
* SSHFP algorithm type 4
* NS1 and DNSimple support skipping unsupported record types
* Revert back to old style setup.py &amp; requirements.txt, setup.cfg was


+ 1
- 1
CONTRIBUTING.md View File

@ -4,7 +4,7 @@ Hi there! We're thrilled that you'd like to contribute to OctoDNS. Your help is
Please note that this project adheres to the [Contributor Covenant Code of Conduct](/CODE_OF_CONDUCT.md). By participating in this project you agree to abide by its terms.
If you have questions, or you'd like to check with us before embarking on a major development effort, please [open an issue](https://github.com/github/octodns/issues/new).
If you have questions, or you'd like to check with us before embarking on a major development effort, please [open an issue](https://github.com/octodns/octodns/issues/new).
## How to contribute


+ 8
- 4
MANIFEST.in View File

@ -1,7 +1,11 @@
include README.md
include CHANGELOG.md
include CODE_OF_CONDUCT.md
include CONTRIBUTING.md
include LICENSE
include docs/*
include octodns/*
include README.md
include requirements-dev.txt
include requirements.txt
include script/*
include tests/*
recursive-include docs *.png *.md
recursive-include tests *.json *.py *.txt *.yaml
recursive-include tests/zones *

+ 87
- 19
README.md View File

@ -1,4 +1,4 @@
<img src="https://raw.githubusercontent.com/github/octodns/master/docs/logos/octodns-logo.png?" height=251 width=404>
<img src="https://raw.githubusercontent.com/octodns/octodns/master/docs/logos/octodns-logo.png?" alt="OctoDNS Logo" height=251 width=404>
## DNS as code - Tools for managing DNS across multiple providers
@ -28,6 +28,7 @@ It is similar to [Netflix/denominator](https://github.com/Netflix/denominator).
- [Dynamic sources](#dynamic-sources)
- [Contributing](#contributing)
- [Getting help](#getting-help)
- [Related Projects & Resources](#related-projects--resources)
- [License](#license)
- [Authors](#authors)
@ -37,7 +38,7 @@ It is similar to [Netflix/denominator](https://github.com/Netflix/denominator).
Running through the following commands will install the latest release of OctoDNS and set up a place for your config files to live. To determine if provider specific requirements are necessary see the [Supported providers table](#supported-providers) below.
```
```shell
$ mkdir dns
$ cd dns
$ virtualenv env
@ -47,6 +48,14 @@ $ pip install octodns <provider-specific-requirements>
$ mkdir config
```
#### Installing a specific commit SHA
If you'd like to install a version that has not yet been released in a repetable/safe manner you can do the following. In general octoDNS is fairly stable inbetween releases thanks to the plan and apply process, but care should be taken regardless.
```shell
$ pip install -e git+https://git@github.com/octodns/octodns.git@<SHA>#egg=octodns
```
### Config
We start by creating a config file to tell OctoDNS about our providers and the zone(s) we want it to manage. Below we're setting up a `YamlProvider` to source records from our config files and both a `Route53Provider` and `DynProvider` to serve as the targets for those records. You can have any number of zones set up and any number of sources of data and targets for records for each. You can also have multiple config files, that make use of separate accounts and each manage a distinct set of zones. A good example of this this might be `./config/staging.yaml` & `./config/production.yaml`. We'll focus on a `config/production.yaml`.
@ -79,6 +88,9 @@ zones:
targets:
- dyn
- route53
example.net.:
alias: example.com.
```
`class` is a special key that tells OctoDNS what python class should be loaded. Any other keys will be passed as configuration values to that provider. In general any sensitive or frequently rotated values should come from environmental variables. When OctoDNS sees a value that starts with `env/` it will look for that value in the process's environment and pass the result along.
@ -87,7 +99,9 @@ Further information can be found in the `docstring` of each source and provider
The `max_workers` key in the `manager` section of the config enables threading to parallelize the planning portion of the sync.
Now that we have something to tell OctoDNS about our providers & zones we need to tell it about or records. We'll keep it simple for now and just create a single `A` record at the top-level of the domain.
In this example, `example.net` is an alias of zone `example.com`, which means they share the same sources and targets. They will therefore have identical records.
Now that we have something to tell OctoDNS about our providers & zones we need to tell it about our records. We'll keep it simple for now and just create a single `A` record at the top-level of the domain.
`config/example.com.yaml`
@ -97,8 +111,8 @@ Now that we have something to tell OctoDNS about our providers & zones we need t
ttl: 60
type: A
values:
- 1.2.3.4
- 1.2.3.5
- 1.2.3.4
- 1.2.3.5
```
Further information can be found in [Records Documentation](/docs/records.md).
@ -107,7 +121,7 @@ Further information can be found in [Records Documentation](/docs/records.md).
We're ready to do a dry-run with our new setup to see what changes it would make. Since we're pretending here we'll act like there are no existing records for `example.com.` in our accounts on either provider.
```
```shell
$ octodns-sync --config-file=./config/production.yaml
...
********************************************************************************
@ -131,7 +145,7 @@ There will be other logging information presented on the screen, but successful
Now it's time to tell OctoDNS to make things happen. We'll invoke it again with the same options and add a `--doit` on the end to tell it this time we actually want it to try and make the specified changes.
```
```shell
$ octodns-sync --config-file=./config/production.yaml --doit
...
```
@ -144,17 +158,17 @@ In the above case we manually ran OctoDNS from the command line. That works and
The first step is to create a PR with your changes.
![](/docs/assets/pr.png)
![GitHub user interface of a pull request](/docs/assets/pr.png)
Assuming the code tests and config validation statuses are green the next step is to do a noop deploy and verify that the changes OctoDNS plans to make are the ones you expect.
![](/docs/assets/noop.png)
![Output of a noop deployment command](/docs/assets/noop.png)
After that comes a set of reviews. One from a teammate who should have full context on what you're trying to accomplish and visibility in to the changes you're making to do it. The other is from a member of the team here at GitHub that owns DNS, mostly as a sanity check and to make sure that best practices are being followed. As much of that as possible is baked into `octodns-validate`.
After the reviews it's time to branch deploy the change.
![](/docs/assets/deploy.png)
![Output of a deployment command](/docs/assets/deploy.png)
If that goes smoothly, you again see the expected changes, and verify them with `dig` and/or `octodns-report` you're good to hit the merge button. If there are problems you can quickly do a `.deploy dns/master` to go back to the previous state.
@ -162,7 +176,7 @@ If that goes smoothly, you again see the expected changes, and verify them with
Very few situations will involve starting with a blank slate which is why there's tooling built in to pull existing data out of providers into a matching config file.
```
```shell
$ octodns-dump --config-file=config/production.yaml --output-dir=tmp/ example.com. route53
2017-03-15T13:33:34 INFO Manager __init__: config_file=tmp/production.yaml
2017-03-15T13:33:34 INFO Manager dump: zone=example.com., sources=('route53',)
@ -178,9 +192,9 @@ The above command pulled the existing data out of Route53 and placed the results
| Provider | Requirements | Record Support | Dynamic | Notes |
|--|--|--|--|--|
| [AzureProvider](/octodns/provider/azuredns.py) | azure-mgmt-dns | A, AAAA, CAA, CNAME, MX, NS, PTR, SRV, TXT | No | |
| [AzureProvider](/octodns/provider/azuredns.py) | azure-identity, azure-mgmt-dns, azure-mgmt-trafficmanager | A, AAAA, CAA, CNAME, MX, NS, PTR, SRV, TXT | Alpha (A, AAAA, CNAME) | |
| [Akamai](/octodns/provider/edgedns.py) | edgegrid-python | A, AAAA, CNAME, MX, NAPTR, NS, PTR, SPF, SRV, SSHFP, TXT | No | |
| [CloudflareProvider](/octodns/provider/cloudflare.py) | | A, AAAA, ALIAS, CAA, CNAME, MX, NS, PTR, SPF, SRV, TXT | No | CAA tags restricted |
| [CloudflareProvider](/octodns/provider/cloudflare.py) | | A, AAAA, ALIAS, CAA, CNAME, LOC, MX, NS, PTR, SPF, SRV, TXT | No | CAA tags restricted |
| [ConstellixProvider](/octodns/provider/constellix.py) | | A, AAAA, ALIAS (ANAME), CAA, CNAME, MX, NS, PTR, SPF, SRV, TXT | No | CAA tags restricted |
| [DigitalOceanProvider](/octodns/provider/digitalocean.py) | | A, AAAA, CAA, CNAME, MX, NS, TXT, SRV | No | CAA tags restricted |
| [DnsMadeEasyProvider](/octodns/provider/dnsmadeeasy.py) | | A, AAAA, ALIAS (ANAME), CAA, CNAME, MX, NS, PTR, SPF, SRV, TXT | No | CAA tags restricted |
@ -189,18 +203,21 @@ The above command pulled the existing data out of Route53 and placed the results
| [EasyDNSProvider](/octodns/provider/easydns.py) | | A, AAAA, CAA, CNAME, MX, NAPTR, NS, SRV, TXT | No | |
| [EtcHostsProvider](/octodns/provider/etc_hosts.py) | | A, AAAA, ALIAS, CNAME | No | |
| [EnvVarSource](/octodns/source/envvar.py) | | TXT | No | read-only environment variable injection |
| [GandiProvider](/octodns/provider/gandi.py) | | A, AAAA, ALIAS, CAA, CNAME, DNAME, MX, NS, PTR, SPF, SRV, SSHFP, TXT | No | |
| [GCoreProvider](/octodns/provider/gcore.py) | | A, AAAA, NS, MX, TXT, SRV, CNAME, PTR | Dynamic | |
| [GoogleCloudProvider](/octodns/provider/googlecloud.py) | google-cloud-dns | A, AAAA, CAA, CNAME, MX, NAPTR, NS, PTR, SPF, SRV, TXT | No | |
| [HetznerProvider](/octodns/provider/hetzner.py) | | A, AAAA, CAA, CNAME, MX, NS, SRV, TXT | No | |
| [MythicBeastsProvider](/octodns/provider/mythicbeasts.py) | Mythic Beasts | A, AAAA, ALIAS, CNAME, MX, NS, SRV, SSHFP, CAA, TXT | No | |
| [Ns1Provider](/octodns/provider/ns1.py) | ns1-python | All | Yes | Missing `NA` geo target |
| [Ns1Provider](/octodns/provider/ns1.py) | ns1-python | All | Yes | |
| [OVH](/octodns/provider/ovh.py) | ovh | A, AAAA, CAA, CNAME, MX, NAPTR, NS, PTR, SPF, SRV, SSHFP, TXT, DKIM | No | |
| [PowerDnsProvider](/octodns/provider/powerdns.py) | | All | No | |
| [Rackspace](/octodns/provider/rackspace.py) | | A, AAAA, ALIAS, CNAME, MX, NS, PTR, SPF, TXT | No | |
| [Route53](/octodns/provider/route53.py) | boto3 | A, AAAA, CAA, CNAME, MX, NAPTR, NS, PTR, SPF, SRV, TXT | Both | CNAME health checks don't support a Host header |
| [Selectel](/octodns/provider/selectel.py) | | A, AAAA, CNAME, MX, NS, SPF, SRV, TXT | No | |
| [Transip](/octodns/provider/transip.py) | transip | A, AAAA, CNAME, MX, SRV, SPF, TXT, SSHFP, CAA | No | |
| [Transip](/octodns/provider/transip.py) | transip | A, AAAA, CNAME, MX, NS, SRV, SPF, TXT, SSHFP, CAA | No | |
| [UltraDns](/octodns/provider/ultra.py) | | A, AAAA, CAA, CNAME, MX, NS, PTR, SPF, SRV, TXT | No | |
| [AxfrSource](/octodns/source/axfr.py) | | A, AAAA, CNAME, MX, NS, PTR, SPF, SRV, TXT | No | read-only |
| [ZoneFileSource](/octodns/source/axfr.py) | | A, AAAA, CNAME, MX, NS, PTR, SPF, SRV, TXT | No | read-only |
| [AxfrSource](/octodns/source/axfr.py) | | A, AAAA, CAA, CNAME, LOC, MX, NS, PTR, SPF, SRV, TXT | No | read-only |
| [ZoneFileSource](/octodns/source/axfr.py) | | A, AAAA, CAA, CNAME, MX, NS, PTR, SPF, SRV, TXT | No | read-only |
| [TinyDnsFileSource](/octodns/source/tinydns.py) | | A, CNAME, MX, NS, PTR | No | read-only |
| [YamlProvider](/octodns/provider/yaml.py) | | All | Yes | config |
@ -211,6 +228,32 @@ The above command pulled the existing data out of Route53 and placed the results
* Dnsimple's uses the configured TTL when serving things through the ALIAS, there's also a secondary TXT record created alongside the ALIAS that octoDNS ignores
* octoDNS itself supports non-ASCII character sets, but in testing Cloudflare is the only provider where that is currently functional end-to-end. Others have failures either in the client libraries or API calls
## Compatibilty & Compliance
### `lenient`
`lenient` mostly focuses on the details of `Record`s and standards compliance. When set to `true` octoDNS will allow allow non-compliant configurations & values where possible. For example CNAME values that don't end with a `.`, label length restrictions, and invalid geo codes on `dynamic` records. When in lenient mode octoDNS will log validation problems at `WARNING` and try and continue with the configuration or source data as it exists. See [Lenience](/docs/records.md#lenience) for more information on the concept and how it can be configured.
### `strict_supports` (Work In Progress)
`strict_supports` is a `Provider` level parameter that comes into play when a provider has been asked to create a record that it is unable to support. The simplest case of this would be record type, e.g. `SSHFP` not being supported by `AzureProvider`. If such a record is passed to an `AzureProvider` as a target the provider will take action based on the `strict_supports`. When `true` it will throw an exception saying that it's unable to create the record, when set to `false` it will log at `WARNING` with information about what it's unable to do and how it is attempting to working around it. Other examples of things that cannot be supported would be `dynamic` records on a provider that only supports simple or the lack of support for specific geos in a provider, e.g. Route53Provider does not support `NA-CA-*`.
It is worth noting that these errors will happen during the plan phase of things so that problems will be visible without having to make changes.
This concept is currently a work in progress and only partially implemented. While work is on-going `strict_supports` will default to `false`. Once the work is considered complete & ready the default will change to `true` as it's a much safer and less surprising default as what you configure is what you'll get unless an error is throw telling you why it cannot be done. You will then have the choice to explicitly request that things continue with work-arounds with `strict_supports` set to false`. In the meantime it is encouraged that you manually configure the parameter to `true` in your provider configs.
### Configuring `strict_supports`
The `strict_supports` parameter is available on all providers and can be configured in YAML as follows:
```yaml
providers:
someprovider:
class: whatever.TheProvider
...
strict_supports: true
```
## Custom Sources and Providers
You can check out the [source](/octodns/source/) and [provider](/octodns/provider/) directory to see what's currently supported. Sources act as a source of record information. AxfrSource and TinyDnsFileSource are currently the only OSS sources, though we have several others internally that are specific to our environment. These include something to pull host data from [gPanel](https://githubengineering.com/githubs-metal-cloud/) and a similar provider that sources information about our network gear to create both `A` & `PTR` records for their interfaces. Things that might make good OSS sources might include an `ElbSource` that pulls information about [AWS Elastic Load Balancers](https://aws.amazon.com/elasticloadbalancing/) and dynamically creates `CNAME`s for them, or `Ec2Source` that pulls instance information so that records can be created for hosts similar to how our `GPanelProvider` works.
@ -219,6 +262,8 @@ Most of the things included in OctoDNS are providers, the obvious difference bei
The `class` key in the providers config section can be used to point to arbitrary classes in the python path so internal or 3rd party providers can easily be included with no coordination beyond getting them into PYTHONPATH, most likely installed into the virtualenv with OctoDNS.
For examples of building third-party sources and providers, see [Related Projects & Resources](#related-projects--resources).
## Other Uses
### Syncing between providers
@ -278,13 +323,36 @@ Please see our [contributing document](/CONTRIBUTING.md) if you would like to pa
## Getting help
If you have a problem or suggestion, please [open an issue](https://github.com/github/octodns/issues/new) in this repository, and we will do our best to help. Please note that this project adheres to the [Contributor Covenant Code of Conduct](/CODE_OF_CONDUCT.md).
If you have a problem or suggestion, please [open an issue](https://github.com/octodns/octodns/issues/new) in this repository, and we will do our best to help. Please note that this project adheres to the [Contributor Covenant Code of Conduct](/CODE_OF_CONDUCT.md).
## Related Projects & Resources
- **GitHub Action:** [OctoDNS-Sync](https://github.com/marketplace/actions/octodns-sync)
- **Sample Implementations.** See how others are using it
- [`hackclub/dns`](https://github.com/hackclub/dns)
- [`kubernetes/k8s.io:/dns`](https://github.com/kubernetes/k8s.io/tree/main/dns)
- [`g0v-network/domains`](https://github.com/g0v-network/domains)
- [`jekyll/dns`](https://github.com/jekyll/dns)
- **Custom Sources & Providers.**
- [`octodns/octodns-ddns`](https://github.com/octodns/octodns-ddns): A simple Dynamic DNS source.
- [`doddo/octodns-lexicon`](https://github.com/doddo/octodns-lexicon): Use [Lexicon](https://github.com/AnalogJ/lexicon) providers as octoDNS providers.
- [`asyncon/octoblox`](https://github.com/asyncon/octoblox): [Infoblox](https://www.infoblox.com/) provider.
- [`sukiyaki/octodns-netbox`](https://github.com/sukiyaki/octodns-netbox): [NetBox](https://github.com/netbox-community/netbox) source.
- [`kompetenzbolzen/octodns-custom-provider`](https://github.com/kompetenzbolzen/octodns-custom-provider): zonefile provider & phpIPAM source.
- **Resources.**
- Article: [Visualising DNS records with Neo4j](https://medium.com/@costask/querying-and-visualising-octodns-records-with-neo4j-f4f72ab2d474) + code
- Video: [FOSDEM 2019 - DNS as code with octodns](https://archive.fosdem.org/2019/schedule/event/dns_octodns/)
- GitHub Blog: [Enabling DNS split authority with OctoDNS](https://github.blog/2017-04-27-enabling-split-authority-dns-with-octodns/)
- Tutorial: [How To Deploy and Manage Your DNS using OctoDNS on Ubuntu 18.04](https://www.digitalocean.com/community/tutorials/how-to-deploy-and-manage-your-dns-using-octodns-on-ubuntu-18-04)
- Cloudflare Blog: [Improving the Resiliency of Our Infrastructure DNS Zone](https://blog.cloudflare.com/improving-the-resiliency-of-our-infrastructure-dns-zone/)
If you know of any other resources, please do let us know!
## License
OctoDNS is licensed under the [MIT license](LICENSE).
The MIT license grant is not for GitHub's trademarks, which include the logo designs. GitHub reserves all trademark and copyright rights in and to all GitHub trademarks. GitHub's logos include, for instance, the stylized designs that include "logo" in the file title in the following folder: https://github.com/github/octodns/tree/master/docs/logos/
The MIT license grant is not for GitHub's trademarks, which include the logo designs. GitHub reserves all trademark and copyright rights in and to all GitHub trademarks. GitHub's logos include, for instance, the stylized designs that include "logo" in the file title in the following folder: https://github.com/octodns/octodns/tree/master/docs/logos/
GitHub® and its stylized versions and the Invertocat mark are GitHub's Trademarks or registered Trademarks. When using GitHub's logos, be sure to follow the GitHub logo guidelines.


+ 1
- 1
docs/geo_records.md View File

@ -1,6 +1,6 @@
## Geo Record Support
Note: Geo DNS records are still supported for the time being, but it is still strongy encouraged that you look at [Dynamic Records](/docs/dynamic_records.md) instead as they are a superset of functionality.
Note: Geo DNS records are still supported for the time being, but it is still strongly encouraged that you look at [Dynamic Records](/docs/dynamic_records.md) instead as they are a superset of functionality.
GeoDNS is currently supported for `A` and `AAAA` records on the Dyn (via Traffic Directors) and Route53 providers. Records with geo information pushed to providers without support for them will be managed as non-geo records using the base values.


+ 56
- 1
docs/records.md View File

@ -6,15 +6,20 @@ OctoDNS supports the following record types:
* `A`
* `AAAA`
* `ALIAS`
* `CAA`
* `CNAME`
* `DNAME`
* `LOC`
* `MX`
* `NAPTR`
* `NS`
* `PTR`
* `SSHFP`
* `SPF`
* `SRV`
* `SSHFP`
* `TXT`
* `URLFWD`
Underlying provider support for each of these varies and some providers have extra requirements or limitations. In cases where a record type is not supported by a provider OctoDNS will ignore it there and continue to manage the record elsewhere. For example `SSHFP` is supported by Dyn, but not Route53. If your source data includes an SSHFP record OctoDNS will keep it in sync on Dyn, but not consider it when evaluating the state of Route53. The best way to find out what types are supported by a provider is to look for its `supports` method. If that method exists the logic will drive which records are supported and which are ignored. If the provider does not implement the method it will fall back to `BaseProvider.supports` which indicates full support.
@ -81,3 +86,53 @@ In the above example each name had a single record, but there are cases where a
Each record type has a corresponding set of required data. The easiest way to determine what's required is probably to look at the record object in [`octodns/record/__init__.py`](/octodns/record/__init__.py). You may also utilize `octodns-validate` which will throw errors about what's missing when run.
`type` is required for all records. `ttl` is optional. When TTL is not specified the `YamlProvider`'s default will be used. In any situation where an array of `values` can be used you can opt to go with `value` as a single item if there's only one.
### Lenience
octoDNS is fairly strict in terms of standards compliance and is opinionated in terms of best practices. Examples of former include SRV record naming requirements and the latter that ALIAS records are constrained to the root of zones. The strictness and support of providers varies so you may encounter existing records that fail validation when you try to dump them or you may even have use cases for which you need to create or preserve records that don't validate. octoDNS's solution to this is the `lenient` flag.
It's best to think of the `lenient` flag as "I know what I'm doing and accept any problems I run across." The main reason being is that some providers may allow the non-compliant setup and others may not. The behavior of the non-compliant records may even vary from one provider to another. Caveat emptor.
#### octodns-dump
If you're trying to import a zone into octoDNS config file using `octodns-dump` which fails due to validation errors you can supply the `--lenient` argument to tell octoDNS that you acknowledge that things aren't lining up with its expectations, but you'd like it to go ahead anyway. This will do its best to populate the zone and dump the results out into an octoDNS zone file and include the non-compliant bits. If you go to use that config file octoDNS will again complain about the validation problems. You can correct them in cases where that makes sense, but if you need to preserve the non-compliant records read on for options.
#### Record level lenience
When there are non-compliant records configured in Yaml you can add the following to tell octoDNS to do it's best to proceed with them anyway. If you use `--lenient` above to dump a zone and you'd like to sync it as-is you can mark the problematic records this way.
```yaml
'not-root':
octodns:
lenient: true
type: ALIAS
values: something.else.com.
```
#### Zone level lenience
If you'd like to enable lenience for a whole zone you can do so with the following, thought it's strongly encouraged to mark things at record level when possible. The most common case where things may need to be done at the zone level is when using something other than `YamlProvider` as a source, e.g. syncing from `Route53Provider` to `Ns1Provider` when there are non-compliant records in the zone in Route53.
```yaml
non-compliant-zone.com.:
lenient: true
sources:
- route53
targets:
- ns1
```
#### Restrict Record manipulations
OctoDNS currently provides the ability to limit the number of updates/deletes on
DNS records by configuring a percentage of allowed operations as a threshold.
If left unconfigured, suitable defaults take over instead. In the below example,
the Dyn provider is configured with limits of 40% on both update and
delete operations over all the records present.
````yaml
dyn:
class: octodns.provider.dyn.DynProvider
update_pcent_threshold: 0.4
delete_pcent_threshold: 0.4
````

+ 1
- 1
octodns/__init__.py View File

@ -3,4 +3,4 @@
from __future__ import absolute_import, division, print_function, \
unicode_literals
__VERSION__ = '0.9.10'
__VERSION__ = '0.9.13'

+ 1
- 2
octodns/cmds/report.py View File

@ -17,7 +17,6 @@ from six import text_type
from octodns.cmds.args import ArgumentParser
from octodns.manager import Manager
from octodns.zone import Zone
class AsyncResolver(Resolver):
@ -56,7 +55,7 @@ def main():
except KeyError as e:
raise Exception('Unknown source: {}'.format(e.args[0]))
zone = Zone(args.zone, manager.configured_sub_zones(args.zone))
zone = manager.get_zone(args.zone)
for source in sources:
source.populate(zone)


+ 180
- 28
octodns/manager.py View File

@ -9,6 +9,7 @@ from concurrent.futures import ThreadPoolExecutor
from importlib import import_module
from os import environ
from six import text_type
from sys import stdout
import logging
from .provider.base import BaseProvider
@ -121,6 +122,25 @@ class Manager(object):
raise ManagerException('Incorrect provider config for {}'
.format(provider_name))
self.processors = {}
for processor_name, processor_config in \
self.config.get('processors', {}).items():
try:
_class = processor_config.pop('class')
except KeyError:
self.log.exception('Invalid processor class')
raise ManagerException('Processor {} is missing class'
.format(processor_name))
_class = self._get_named_class('processor', _class)
kwargs = self._build_kwargs(processor_config)
try:
self.processors[processor_name] = _class(processor_name,
**kwargs)
except TypeError:
self.log.exception('Invalid processor config')
raise ManagerException('Incorrect processor config for {}'
.format(processor_name))
zone_tree = {}
# sort by reversed strings so that parent zones always come first
for name in sorted(self.config['zones'].keys(), key=lambda s: s[::-1]):
@ -222,21 +242,34 @@ class Manager(object):
self.log.debug('configured_sub_zones: subs=%s', sub_zone_names)
return set(sub_zone_names)
def _populate_and_plan(self, zone_name, sources, targets, lenient=False):
def _populate_and_plan(self, zone_name, processors, sources, targets,
desired=None, lenient=False):
self.log.debug('sync: populating, zone=%s, lenient=%s',
zone_name, lenient)
zone = Zone(zone_name,
sub_zones=self.configured_sub_zones(zone_name))
for source in sources:
try:
source.populate(zone, lenient=lenient)
except TypeError as e:
if "keyword argument 'lenient'" not in text_type(e):
raise
self.log.warn(': provider %s does not accept lenient param',
source.__class__.__name__)
source.populate(zone)
if desired:
# This is an alias zone, rather than populate it we'll copy the
# records over from `desired`.
for _, records in desired._records.items():
for record in records:
zone.add_record(record.copy(zone=zone), lenient=lenient)
else:
for source in sources:
try:
source.populate(zone, lenient=lenient)
except TypeError as e:
if ("unexpected keyword argument 'lenient'"
not in text_type(e)):
raise
self.log.warn('provider %s does not accept lenient '
'param', source.__class__.__name__)
source.populate(zone)
for processor in processors:
zone = processor.process_source_zone(zone, sources=sources)
self.log.debug('sync: planning, zone=%s', zone_name)
plans = []
@ -249,25 +282,63 @@ class Manager(object):
'value': 'provider={}'.format(target.id)
})
zone.add_record(meta, replace=True)
plan = target.plan(zone)
try:
plan = target.plan(zone, processors=processors)
except TypeError as e:
if "keyword argument 'processors'" not in text_type(e):
raise
self.log.warn('provider.plan %s does not accept processors '
'param', target.__class__.__name__)
plan = target.plan(zone)
for processor in processors:
plan = processor.process_plan(plan, sources=sources,
target=target)
if plan:
plans.append((target, plan))
return plans
# Return the zone as it's the desired state
return plans, zone
def sync(self, eligible_zones=[], eligible_sources=[], eligible_targets=[],
dry_run=True, force=False):
self.log.info('sync: eligible_zones=%s, eligible_targets=%s, '
'dry_run=%s, force=%s', eligible_zones, eligible_targets,
dry_run, force)
dry_run=True, force=False, plan_output_fh=stdout):
self.log.info(
'sync: eligible_zones=%s, eligible_targets=%s, dry_run=%s, '
'force=%s, plan_output_fh=%s',
eligible_zones, eligible_targets, dry_run, force,
getattr(plan_output_fh, 'name', plan_output_fh.__class__.__name__))
zones = self.config['zones'].items()
if eligible_zones:
zones = [z for z in zones if z[0] in eligible_zones]
aliased_zones = {}
futures = []
for zone_name, config in zones:
self.log.info('sync: zone=%s', zone_name)
if 'alias' in config:
source_zone = config['alias']
# Check that the source zone is defined.
if source_zone not in self.config['zones']:
self.log.error('Invalid alias zone {}, target {} does '
'not exist'.format(zone_name, source_zone))
raise ManagerException('Invalid alias zone {}: '
'source zone {} does not exist'
.format(zone_name, source_zone))
# Check that the source zone is not an alias zone itself.
if 'alias' in self.config['zones'][source_zone]:
self.log.error('Invalid alias zone {}, target {} is an '
'alias zone'.format(zone_name, source_zone))
raise ManagerException('Invalid alias zone {}: source '
'zone {} is an alias zone'
.format(zone_name, source_zone))
aliased_zones[zone_name] = source_zone
continue
lenient = config.get('lenient', False)
try:
sources = config['sources']
@ -281,6 +352,8 @@ class Manager(object):
raise ManagerException('Zone {} is missing targets'
.format(zone_name))
processors = config.get('processors', [])
if (eligible_sources and not
[s for s in sources if s in eligible_sources]):
self.log.info('sync: no eligible sources, skipping')
@ -298,6 +371,15 @@ class Manager(object):
self.log.info('sync: sources=%s -> targets=%s', sources, targets)
try:
collected = []
for processor in processors:
collected.append(self.processors[processor])
processors = collected
except KeyError:
raise ManagerException('Zone {}, unknown processor: {}'
.format(zone_name, processor))
try:
# rather than using a list comprehension, we break this loop
# out so that the `except` block below can reference the
@ -324,12 +406,43 @@ class Manager(object):
.format(zone_name, target))
futures.append(self._executor.submit(self._populate_and_plan,
zone_name, sources,
targets, lenient=lenient))
zone_name, processors,
sources, targets,
lenient=lenient))
# Wait on all results and unpack/flatten them in to a list of target &
# plan pairs.
plans = [p for f in futures for p in f.result()]
# Wait on all results and unpack/flatten the plans and store the
# desired states in case we need them below
plans = []
desired = {}
for future in futures:
ps, d = future.result()
desired[d.name] = d
for plan in ps:
plans.append(plan)
# Populate aliases zones.
futures = []
for zone_name, zone_source in aliased_zones.items():
source_config = self.config['zones'][zone_source]
try:
desired_config = desired[zone_source]
except KeyError:
raise ManagerException('Zone {} cannot be sync without zone '
'{} sinced it is aliased'
.format(zone_name, zone_source))
futures.append(self._executor.submit(
self._populate_and_plan,
zone_name,
processors,
[],
[self.providers[t] for t in source_config['targets']],
desired=desired_config,
lenient=lenient
))
# Wait on results and unpack/flatten the plans, ignore the desired here
# as these are aliased zones
plans += [p for f in futures for p in f.result()[0]]
# Best effort sort plans children first so that we create/update
# children zones before parents which should allow us to more safely
@ -339,7 +452,7 @@ class Manager(object):
plans.sort(key=self._plan_keyer, reverse=True)
for output in self.plan_outputs.values():
output.run(plans=plans, log=self.log)
output.run(plans=plans, log=self.log, fh=plan_output_fh)
if not force:
self.log.debug('sync: checking safety')
@ -377,12 +490,11 @@ class Manager(object):
except KeyError as e:
raise ManagerException('Unknown source: {}'.format(e.args[0]))
sub_zones = self.configured_sub_zones(zone)
za = Zone(zone, sub_zones)
za = self.get_zone(zone)
for source in a:
source.populate(za)
zb = Zone(zone, sub_zones)
zb = self.get_zone(zone)
for source in b:
source.populate(zb)
@ -421,6 +533,25 @@ class Manager(object):
for zone_name, config in self.config['zones'].items():
zone = Zone(zone_name, self.configured_sub_zones(zone_name))
source_zone = config.get('alias')
if source_zone:
if source_zone not in self.config['zones']:
self.log.exception('Invalid alias zone')
raise ManagerException('Invalid alias zone {}: '
'source zone {} does not exist'
.format(zone_name, source_zone))
if 'alias' in self.config['zones'][source_zone]:
self.log.exception('Invalid alias zone')
raise ManagerException('Invalid alias zone {}: '
'source zone {} is an alias zone'
.format(zone_name, source_zone))
# this is just here to satisfy coverage, see
# https://github.com/nedbat/coveragepy/issues/198
source_zone = source_zone
continue
try:
sources = config['sources']
except KeyError:
@ -428,9 +559,9 @@ class Manager(object):
.format(zone_name))
try:
# rather than using a list comprehension, we break this loop
# out so that the `except` block below can reference the
# `source`
# rather than using a list comprehension, we break this
# loop out so that the `except` block below can reference
# the `source`
collected = []
for source in sources:
collected.append(self.providers[source])
@ -442,3 +573,24 @@ class Manager(object):
for source in sources:
if isinstance(source, YamlProvider):
source.populate(zone)
# check that processors are in order if any are specified
processors = config.get('processors', [])
try:
# same as above, but for processors this time
for processor in processors:
collected.append(self.processors[processor])
except KeyError:
raise ManagerException('Zone {}, unknown processor: {}'
.format(zone_name, processor))
def get_zone(self, zone_name):
if not zone_name[-1] == '.':
raise ManagerException('Invalid zone name {}, missing ending dot'
.format(zone_name))
for name, config in self.config['zones'].items():
if name == zone_name:
return Zone(name, self.configured_sub_zones(name))
raise ManagerException('Unknown zone name {}'.format(zone_name))

+ 6
- 0
octodns/processor/__init__.py View File

@ -0,0 +1,6 @@
#
#
#
from __future__ import absolute_import, division, print_function, \
unicode_literals

+ 61
- 0
octodns/processor/acme.py View File

@ -0,0 +1,61 @@
#
#
#
from __future__ import absolute_import, division, print_function, \
unicode_literals
from logging import getLogger
from .base import BaseProcessor
class AcmeMangingProcessor(BaseProcessor):
log = getLogger('AcmeMangingProcessor')
def __init__(self, name):
'''
processors:
acme:
class: octodns.processor.acme.AcmeMangingProcessor
...
zones:
something.com.:
...
processors:
- acme
...
'''
super(AcmeMangingProcessor, self).__init__(name)
self._owned = set()
def process_source_zone(self, desired, *args, **kwargs):
for record in desired.records:
if record._type == 'TXT' and \
record.name.startswith('_acme-challenge'):
# We have a managed acme challenge record (owned by octoDNS) so
# we should mark it as such
record = record.copy()
record.values.append('*octoDNS*')
record.values.sort()
# This assumes we'll see things as sources before targets,
# which is the case...
self._owned.add(record)
desired.add_record(record, replace=True)
return desired
def process_target_zone(self, existing, *args, **kwargs):
for record in existing.records:
# Uses a startswith rather than == to ignore subdomain challenges,
# e.g. _acme-challenge.foo.domain.com when managing domain.com
if record._type == 'TXT' and \
record.name.startswith('_acme-challenge') and \
'*octoDNS*' not in record.values and \
record not in self._owned:
self.log.info('_process: ignoring %s', record.fqdn)
existing.remove_record(record)
return existing

+ 69
- 0
octodns/processor/base.py View File

@ -0,0 +1,69 @@
#
#
#
from __future__ import absolute_import, division, print_function, \
unicode_literals
class BaseProcessor(object):
def __init__(self, name):
self.name = name
def process_source_zone(self, desired, sources):
'''
Called after all sources have completed populate. Provides an
opportunity for the processor to modify the desired `Zone` that targets
will recieve.
- Will see `desired` after any modifications done by
`Provider._process_desired_zone` and processors configured to run
before this one.
- May modify `desired` directly.
- Must return `desired` which will normally be the `desired` param.
- Must not modify records directly, `record.copy` should be called,
the results of which can be modified, and then `Zone.add_record` may
be used with `replace=True`.
- May call `Zone.remove_record` to remove records from `desired`.
- Sources may be empty, as will be the case for aliased zones.
'''
return desired
def process_target_zone(self, existing, target):
'''
Called after a target has completed `populate`, before changes are
computed between `existing` and `desired`. This provides an opportunity
to modify the `existing` `Zone`.
- Will see `existing` after any modifrications done by processors
configured to run before this one.
- May modify `existing` directly.
- Must return `existing` which will normally be the `existing` param.
- Must not modify records directly, `record.copy` should be called,
the results of which can be modified, and then `Zone.add_record` may
be used with `replace=True`.
- May call `Zone.remove_record` to remove records from `existing`.
'''
return existing
def process_plan(self, plan, sources, target):
'''
Called after the planning phase has completed. Provides an opportunity
for the processors to modify the plan thus changing the actions that
will be displayed and potentially applied.
- `plan` may be None if no changes were detected, if so a `Plan` may
still be created and returned.
- May modify `plan.changes` directly or create a new `Plan`.
- Does not have to modify `plan.desired` and/or `plan.existing` to line
up with any modifications made to `plan.changes`.
- Should copy over `plan.exists`, `plan.update_pcent_threshold`, and
`plan.delete_pcent_threshold` when creating a new `Plan`.
- Must return a `Plan` which may be `plan` or can be a newly created
one `plan.desired` and `plan.existing` copied over as-is or modified.
'''
# plan may be None if no changes were detected up until now, the
# process may still create a plan.
# sources may be empty, as will be the case for aliased zones
return plan

+ 42
- 0
octodns/processor/filter.py View File

@ -0,0 +1,42 @@
#
#
#
from __future__ import absolute_import, division, print_function, \
unicode_literals
from .base import BaseProcessor
class TypeAllowlistFilter(BaseProcessor):
def __init__(self, name, allowlist):
super(TypeAllowlistFilter, self).__init__(name)
self.allowlist = set(allowlist)
def _process(self, zone, *args, **kwargs):
for record in zone.records:
if record._type not in self.allowlist:
zone.remove_record(record)
return zone
process_source_zone = _process
process_target_zone = _process
class TypeRejectlistFilter(BaseProcessor):
def __init__(self, name, rejectlist):
super(TypeRejectlistFilter, self).__init__(name)
self.rejectlist = set(rejectlist)
def _process(self, zone, *args, **kwargs):
for record in zone.records:
if record._type in self.rejectlist:
zone.remove_record(record)
return zone
process_source_zone = _process
process_target_zone = _process

+ 100
- 0
octodns/processor/ownership.py View File

@ -0,0 +1,100 @@
#
#
#
from __future__ import absolute_import, division, print_function, \
unicode_literals
from collections import defaultdict
from ..provider.plan import Plan
from ..record import Record
from .base import BaseProcessor
# Mark anything octoDNS is managing that way it can know it's safe to modify or
# delete. We'll take ownership of existing records that we're told to manage
# and thus "own" them going forward.
class OwnershipProcessor(BaseProcessor):
def __init__(self, name, txt_name='_owner', txt_value='*octodns*'):
super(OwnershipProcessor, self).__init__(name)
self.txt_name = txt_name
self.txt_value = txt_value
self._txt_values = [txt_value]
def process_source_zone(self, desired, *args, **kwargs):
for record in desired.records:
# Then create and add an ownership TXT for each of them
record_name = record.name.replace('*', '_wildcard')
if record.name:
name = '{}.{}.{}'.format(self.txt_name, record._type,
record_name)
else:
name = '{}.{}'.format(self.txt_name, record._type)
txt = Record.new(desired, name, {
'type': 'TXT',
'ttl': 60,
'value': self.txt_value,
})
desired.add_record(txt)
return desired
def _is_ownership(self, record):
return record._type == 'TXT' and \
record.name.startswith(self.txt_name) \
and record.values == self._txt_values
def process_plan(self, plan, *args, **kwargs):
if not plan:
# If we don't have any change there's nothing to do
return plan
# First find all the ownership info
owned = defaultdict(dict)
# We need to look for ownership in both the desired and existing
# states, many things will show up in both, but that's fine.
for record in list(plan.existing.records) + list(plan.desired.records):
if self._is_ownership(record):
pieces = record.name.split('.', 2)
if len(pieces) > 2:
_, _type, name = pieces
name = name.replace('_wildcard', '*')
else:
_type = pieces[1]
name = ''
owned[name][_type.upper()] = True
# Cases:
# - Configured in source
# - We'll fully CRU/manage it adding ownership TXT,
# thanks to process_source_zone, if needed
# - Not in source
# - Has an ownership TXT - delete it & the ownership TXT
# - Does not have an ownership TXT - don't delete it
# - Special records like octodns-meta
# - Should be left alone and should not have ownerthis TXTs
filtered_changes = []
for change in plan.changes:
record = change.record
if not self._is_ownership(record) and \
record._type not in owned[record.name] and \
record.name != 'octodns-meta':
# It's not an ownership TXT, it's not owned, and it's not
# special we're going to ignore it
continue
# We own this record or owned it up until now so whatever the
# change is we should do
filtered_changes.append(change)
if plan.changes != filtered_changes:
return Plan(plan.existing, plan.desired, filtered_changes,
plan.exists, plan.update_pcent_threshold,
plan.delete_pcent_threshold)
return plan

+ 8
- 0
octodns/provider/__init__.py View File

@ -4,3 +4,11 @@
from __future__ import absolute_import, division, print_function, \
unicode_literals
class ProviderException(Exception):
pass
class SupportsException(ProviderException):
pass

+ 946
- 92
octodns/provider/azuredns.py
File diff suppressed because it is too large
View File


+ 60
- 3
octodns/provider/base.py View File

@ -10,13 +10,15 @@ from six import text_type
from ..source.base import BaseSource
from ..zone import Zone
from .plan import Plan
from . import SupportsException
class BaseProvider(BaseSource):
def __init__(self, id, apply_disabled=False,
update_pcent_threshold=Plan.MAX_SAFE_UPDATE_PCENT,
delete_pcent_threshold=Plan.MAX_SAFE_DELETE_PCENT):
delete_pcent_threshold=Plan.MAX_SAFE_DELETE_PCENT,
strict_supports=False):
super(BaseProvider, self).__init__(id)
self.log.debug('__init__: id=%s, apply_disabled=%s, '
'update_pcent_threshold=%.2f, '
@ -28,6 +30,43 @@ class BaseProvider(BaseSource):
self.apply_disabled = apply_disabled
self.update_pcent_threshold = update_pcent_threshold
self.delete_pcent_threshold = delete_pcent_threshold
self.strict_supports = strict_supports
def _process_desired_zone(self, desired):
'''
An opportunity for providers to modify the desired zone records before
planning. `desired` is a "shallow" copy, see `Zone.copy` for more
information
- Must call `super` at an appropriate point for their work, generally
that means as the final step of the method, returning the result of
the `super` call.
- May modify `desired` directly.
- Must not modify records directly, `record.copy` should be called,
the results of which can be modified, and then `Zone.add_record` may
be used with `replace=True`.
- May call `Zone.remove_record` to remove records from `desired`.
- Must call supports_warn_or_except with information about any changes
that are made to have them logged or throw errors depending on the
provider configuration.
'''
if self.SUPPORTS_MUTLIVALUE_PTR:
# nothing do here
return desired
for record in desired.records:
if record._type == 'PTR' and len(record.values) > 1:
# replace with a single-value copy
msg = 'multi-value PTR records not supported for {}' \
.format(record.fqdn)
fallback = 'falling back to single value, {}' \
.format(record.value)
self.supports_warn_or_except(msg, fallback)
record = record.copy()
record.values = [record.value]
desired.add_record(record, replace=True)
return desired
def _include_change(self, change):
'''
@ -44,9 +83,21 @@ class BaseProvider(BaseSource):
'''
return []
def plan(self, desired):
def supports_warn_or_except(self, msg, fallback):
if self.strict_supports:
raise SupportsException('{}: {}'.format(self.id, msg))
self.log.warning('{}; {}'.format(msg, fallback))
def plan(self, desired, processors=[]):
self.log.info('plan: desired=%s', desired.name)
# Make a (shallow) copy of the desired state so that everything from
# now on (in this target) can modify it as they see fit without
# worrying about impacting other targets.
desired = desired.copy()
desired = self._process_desired_zone(desired)
existing = Zone(desired.name, desired.sub_zones)
exists = self.populate(existing, target=True, lenient=True)
if exists is None:
@ -55,6 +106,9 @@ class BaseProvider(BaseSource):
self.log.warn('Provider %s used in target mode did not return '
'exists', self.id)
for processor in processors:
existing = processor.process_target_zone(existing, target=self)
# compute the changes at the zone/record level
changes = existing.changes(desired, self)
@ -91,7 +145,10 @@ class BaseProvider(BaseSource):
self.log.info('apply: disabled')
return 0
self.log.info('apply: making changes')
zone_name = plan.desired.name
num_changes = len(plan.changes)
self.log.info('apply: making %d changes to %s', num_changes,
zone_name)
self._apply(plan)
return len(plan.changes)


+ 246
- 43
octodns/provider/cloudflare.py View File

@ -10,12 +10,14 @@ from copy import deepcopy
from logging import getLogger
from requests import Session
from time import sleep
from urllib.parse import urlsplit
from ..record import Record, Update
from . import ProviderException
from .base import BaseProvider
class CloudflareError(Exception):
class CloudflareError(ProviderException):
def __init__(self, data):
try:
message = data['errors'][0]['message']
@ -75,8 +77,8 @@ class CloudflareProvider(BaseProvider):
'''
SUPPORTS_GEO = False
SUPPORTS_DYNAMIC = False
SUPPORTS = set(('ALIAS', 'A', 'AAAA', 'CAA', 'CNAME', 'MX', 'NS', 'PTR',
'SRV', 'SPF', 'TXT'))
SUPPORTS = set(('ALIAS', 'A', 'AAAA', 'CAA', 'CNAME', 'LOC', 'MX', 'NS',
'PTR', 'SRV', 'SPF', 'TXT', 'URLFWD'))
MIN_TTL = 120
TIMEOUT = 15
@ -133,6 +135,7 @@ class CloudflareProvider(BaseProvider):
timeout=self.TIMEOUT)
self.log.debug('_request: status=%d', resp.status_code)
if resp.status_code == 400:
self.log.debug('_request: data=%s', data)
raise CloudflareError(resp.json())
if resp.status_code == 403:
raise CloudflareAuthenticationError(resp.json())
@ -142,6 +145,11 @@ class CloudflareProvider(BaseProvider):
resp.raise_for_status()
return resp.json()
def _change_keyer(self, change):
key = change.__class__.__name__
order = {'Delete': 0, 'Create': 1, 'Update': 2}
return order[key]
@property
def zones(self):
if self._zones is None:
@ -164,6 +172,9 @@ class CloudflareProvider(BaseProvider):
return self._zones
def _ttl_data(self, ttl):
return 300 if ttl == 1 else ttl
def _data_for_cdn(self, name, _type, records):
self.log.info('CDN rewrite for %s', records[0]['name'])
_type = "CNAME"
@ -171,14 +182,14 @@ class CloudflareProvider(BaseProvider):
_type = "ALIAS"
return {
'ttl': records[0]['ttl'],
'ttl': self._ttl_data(records[0]['ttl']),
'type': _type,
'value': '{}.cdn.cloudflare.net.'.format(records[0]['name']),
}
def _data_for_multiple(self, _type, records):
return {
'ttl': records[0]['ttl'],
'ttl': self._ttl_data(records[0]['ttl']),
'type': _type,
'values': [r['content'] for r in records],
}
@ -189,7 +200,7 @@ class CloudflareProvider(BaseProvider):
def _data_for_TXT(self, _type, records):
return {
'ttl': records[0]['ttl'],
'ttl': self._ttl_data(records[0]['ttl']),
'type': _type,
'values': [r['content'].replace(';', '\\;') for r in records],
}
@ -200,7 +211,7 @@ class CloudflareProvider(BaseProvider):
data = r['data']
values.append(data)
return {
'ttl': records[0]['ttl'],
'ttl': self._ttl_data(records[0]['ttl']),
'type': _type,
'values': values,
}
@ -208,7 +219,7 @@ class CloudflareProvider(BaseProvider):
def _data_for_CNAME(self, _type, records):
only = records[0]
return {
'ttl': only['ttl'],
'ttl': self._ttl_data(only['ttl']),
'type': _type,
'value': '{}.'.format(only['content'])
}
@ -216,6 +227,30 @@ class CloudflareProvider(BaseProvider):
_data_for_ALIAS = _data_for_CNAME
_data_for_PTR = _data_for_CNAME
def _data_for_LOC(self, _type, records):
values = []
for record in records:
r = record['data']
values.append({
'lat_degrees': int(r['lat_degrees']),
'lat_minutes': int(r['lat_minutes']),
'lat_seconds': float(r['lat_seconds']),
'lat_direction': r['lat_direction'],
'long_degrees': int(r['long_degrees']),
'long_minutes': int(r['long_minutes']),
'long_seconds': float(r['long_seconds']),
'long_direction': r['long_direction'],
'altitude': float(r['altitude']),
'size': float(r['size']),
'precision_horz': float(r['precision_horz']),
'precision_vert': float(r['precision_vert']),
})
return {
'ttl': self._ttl_data(records[0]['ttl']),
'type': _type,
'values': values
}
def _data_for_MX(self, _type, records):
values = []
for r in records:
@ -224,14 +259,14 @@ class CloudflareProvider(BaseProvider):
'exchange': '{}.'.format(r['content']),
})
return {
'ttl': records[0]['ttl'],
'ttl': self._ttl_data(records[0]['ttl']),
'type': _type,
'values': values,
}
def _data_for_NS(self, _type, records):
return {
'ttl': records[0]['ttl'],
'ttl': self._ttl_data(records[0]['ttl']),
'type': _type,
'values': ['{}.'.format(r['content']) for r in records],
}
@ -239,15 +274,33 @@ class CloudflareProvider(BaseProvider):
def _data_for_SRV(self, _type, records):
values = []
for r in records:
target = ('{}.'.format(r['data']['target'])
if r['data']['target'] != "." else ".")
values.append({
'priority': r['data']['priority'],
'weight': r['data']['weight'],
'port': r['data']['port'],
'target': '{}.'.format(r['data']['target']),
'target': target,
})
return {
'type': _type,
'ttl': records[0]['ttl'],
'ttl': self._ttl_data(records[0]['ttl']),
'values': values
}
def _data_for_URLFWD(self, _type, records):
values = []
for r in records:
values.append({
'path': r['path'],
'target': r['url'],
'code': r['status_code'],
'masking': 2,
'query': 0,
})
return {
'type': _type,
'ttl': 300, # ttl does not exist for this type, forcing a setting
'values': values
}
@ -270,6 +323,13 @@ class CloudflareProvider(BaseProvider):
else:
page = None
path = '/zones/{}/pagerules'.format(zone_id)
resp = self._try_request('GET', path, params={'status': 'active'})
for r in resp['result']:
# assumption, base on API guide, will only contain 1 action
if r['actions'][0]['id'] == 'forwarding_url':
records += [r]
self._zone_records[zone.name] = records
return self._zone_records[zone.name]
@ -306,10 +366,29 @@ class CloudflareProvider(BaseProvider):
exists = True
values = defaultdict(lambda: defaultdict(list))
for record in records:
name = zone.hostname_from_fqdn(record['name'])
_type = record['type']
if _type in self.SUPPORTS:
values[name][record['type']].append(record)
if 'targets' in record:
# assumption, targets will always contain 1 target
# API documentation only indicates 'url' as the only target
# if record['targets'][0]['target'] == 'url':
uri = record['targets'][0]['constraint']['value']
uri = '//' + uri if not uri.startswith('http') else uri
parsed_uri = urlsplit(uri)
name = zone.hostname_from_fqdn(parsed_uri.netloc)
path = parsed_uri.path
_type = 'URLFWD'
# assumption, actions will always contain 1 action
_values = record['actions'][0]['value']
_values['path'] = path
# no ttl set by pagerule, creating one
_values['ttl'] = 300
values[name][_type].append(_values)
# the dns_records branch
# elif 'name' in record:
else:
name = zone.hostname_from_fqdn(record['name'])
_type = record['type']
if _type in self.SUPPORTS:
values[name][record['type']].append(record)
for name, types in values.items():
for _type, records in types.items():
@ -341,6 +420,11 @@ class CloudflareProvider(BaseProvider):
existing.update({
'ttl': new['ttl']
})
elif change.new._type == 'URLFWD':
existing = deepcopy(change.existing.data)
existing.update({
'ttl': new['ttl']
})
else:
existing = change.existing.data
@ -384,6 +468,25 @@ class CloudflareProvider(BaseProvider):
_contents_for_PTR = _contents_for_CNAME
def _contents_for_LOC(self, record):
for value in record.values:
yield {
'data': {
'lat_degrees': value.lat_degrees,
'lat_minutes': value.lat_minutes,
'lat_seconds': value.lat_seconds,
'lat_direction': value.lat_direction,
'long_degrees': value.long_degrees,
'long_minutes': value.long_minutes,
'long_seconds': value.long_seconds,
'long_direction': value.long_direction,
'altitude': value.altitude,
'size': value.size,
'precision_horz': value.precision_horz,
'precision_vert': value.precision_vert,
}
}
def _contents_for_MX(self, record):
for value in record.values:
yield {
@ -405,6 +508,8 @@ class CloudflareProvider(BaseProvider):
name = subdomain
for value in record.values:
target = value.target[:-1] if value.target != "." else "."
yield {
'data': {
'service': service,
@ -413,10 +518,35 @@ class CloudflareProvider(BaseProvider):
'priority': value.priority,
'weight': value.weight,
'port': value.port,
'target': value.target[:-1],
'target': target,
}
}
def _contents_for_URLFWD(self, record):
name = record.fqdn[:-1]
for value in record.values:
yield {
'targets': [
{
'target': 'url',
'constraint': {
'operator': 'matches',
'value': name + value.path
}
}
],
'actions': [
{
'id': 'forwarding_url',
'value': {
'url': value.target,
'status_code': value.code,
}
}
],
'status': 'active',
}
def _record_is_proxied(self, record):
return (
not self.cdn and
@ -432,20 +562,25 @@ class CloudflareProvider(BaseProvider):
if _type == 'ALIAS':
_type = 'CNAME'
contents_for = getattr(self, '_contents_for_{}'.format(_type))
for content in contents_for(record):
content.update({
'name': name,
'type': _type,
'ttl': ttl,
})
if _type in _PROXIABLE_RECORD_TYPES:
if _type == 'URLFWD':
contents_for = getattr(self, '_contents_for_{}'.format(_type))
for content in contents_for(record):
yield content
else:
contents_for = getattr(self, '_contents_for_{}'.format(_type))
for content in contents_for(record):
content.update({
'proxied': self._record_is_proxied(record)
'name': name,
'type': _type,
'ttl': ttl,
})
yield content
if _type in _PROXIABLE_RECORD_TYPES:
content.update({
'proxied': self._record_is_proxied(record)
})
yield content
def _gen_key(self, data):
# Note that most CF record data has a `content` field the value of
@ -456,10 +591,11 @@ class CloudflareProvider(BaseProvider):
# new records cleanly. In general when there are multiple records for a
# name & type each will have a distinct/consistent `content` that can
# serve as a unique identifier.
# BUT... there are exceptions. MX, CAA, and SRV don't have a simple
# BUT... there are exceptions. MX, CAA, LOC and SRV don't have a simple
# content as things are currently implemented so we need to handle
# those explicitly and create unique/hashable strings for them.
_type = data['type']
# AND... for URLFWD/Redirects additional adventures are created.
_type = data.get('type', 'URLFWD')
if _type == 'MX':
return '{priority} {content}'.format(**data)
elif _type == 'CAA':
@ -468,12 +604,39 @@ class CloudflareProvider(BaseProvider):
elif _type == 'SRV':
data = data['data']
return '{port} {priority} {target} {weight}'.format(**data)
elif _type == 'LOC':
data = data['data']
loc = (
'{lat_degrees}',
'{lat_minutes}',
'{lat_seconds}',
'{lat_direction}',
'{long_degrees}',
'{long_minutes}',
'{long_seconds}',
'{long_direction}',
'{altitude}',
'{size}',
'{precision_horz}',
'{precision_vert}')
return ' '.join(loc).format(**data)
elif _type == 'URLFWD':
uri = data['targets'][0]['constraint']['value']
uri = '//' + uri if not uri.startswith('http') else uri
parsed_uri = urlsplit(uri)
return '{name} {path} {url} {status_code}' \
.format(name=parsed_uri.netloc,
path=parsed_uri.path,
**data['actions'][0]['value'])
return data['content']
def _apply_Create(self, change):
new = change.new
zone_id = self.zones[new.zone.name]
path = '/zones/{}/dns_records'.format(zone_id)
if new._type == 'URLFWD':
path = '/zones/{}/pagerules'.format(zone_id)
else:
path = '/zones/{}/dns_records'.format(zone_id)
for content in self._gen_data(new):
self._try_request('POST', path, data=content)
@ -486,14 +649,27 @@ class CloudflareProvider(BaseProvider):
existing = {}
# Find all of the existing CF records for this name & type
for record in self.zone_records(zone):
name = zone.hostname_from_fqdn(record['name'])
if 'targets' in record:
uri = record['targets'][0]['constraint']['value']
uri = '//' + uri if not uri.startswith('http') else uri
parsed_uri = urlsplit(uri)
name = zone.hostname_from_fqdn(parsed_uri.netloc)
path = parsed_uri.path
# assumption, actions will always contain 1 action
_values = record['actions'][0]['value']
_values['path'] = path
_values['ttl'] = 300
_values['type'] = 'URLFWD'
record.update(_values)
else:
name = zone.hostname_from_fqdn(record['name'])
# Use the _record_for so that we include all of standard
# conversion logic
r = self._record_for(zone, name, record['type'], [record], True)
if hostname == r.name and _type == r._type:
# Round trip the single value through a record to contents flow
# to get a consistent _gen_data result that matches what
# went in to new_contents
# Round trip the single value through a record to contents
# flow to get a consistent _gen_data result that matches
# what went in to new_contents
data = next(self._gen_data(r))
# Record the record_id and data for this existing record
@ -561,7 +737,10 @@ class CloudflareProvider(BaseProvider):
# otherwise required, just makes things deterministic
# Creates
path = '/zones/{}/dns_records'.format(zone_id)
if _type == 'URLFWD':
path = '/zones/{}/pagerules'.format(zone_id)
else:
path = '/zones/{}/dns_records'.format(zone_id)
for _, data in sorted(creates.items()):
self.log.debug('_apply_Update: creating %s', data)
self._try_request('POST', path, data=data)
@ -571,7 +750,10 @@ class CloudflareProvider(BaseProvider):
record_id = info['record_id']
data = info['data']
old_data = info['old_data']
path = '/zones/{}/dns_records/{}'.format(zone_id, record_id)
if _type == 'URLFWD':
path = '/zones/{}/pagerules/{}'.format(zone_id, record_id)
else:
path = '/zones/{}/dns_records/{}'.format(zone_id, record_id)
self.log.debug('_apply_Update: updating %s, %s -> %s',
record_id, data, old_data)
self._try_request('PUT', path, data=data)
@ -580,7 +762,10 @@ class CloudflareProvider(BaseProvider):
for _, info in sorted(deletes.items()):
record_id = info['record_id']
old_data = info['data']
path = '/zones/{}/dns_records/{}'.format(zone_id, record_id)
if _type == 'URLFWD':
path = '/zones/{}/pagerules/{}'.format(zone_id, record_id)
else:
path = '/zones/{}/dns_records/{}'.format(zone_id, record_id)
self.log.debug('_apply_Update: removing %s, %s', record_id,
old_data)
self._try_request('DELETE', path)
@ -592,11 +777,24 @@ class CloudflareProvider(BaseProvider):
existing_type = 'CNAME' if existing._type == 'ALIAS' \
else existing._type
for record in self.zone_records(existing.zone):
if existing_name == record['name'] and \
existing_type == record['type']:
path = '/zones/{}/dns_records/{}'.format(record['zone_id'],
record['id'])
self._try_request('DELETE', path)
if 'targets' in record:
uri = record['targets'][0]['constraint']['value']
uri = '//' + uri if not uri.startswith('http') else uri
parsed_uri = urlsplit(uri)
record_name = parsed_uri.netloc
record_type = 'URLFWD'
zone_id = self.zones.get(existing.zone.name, False)
if existing_name == record_name and \
existing_type == record_type:
path = '/zones/{}/pagerules/{}' \
.format(zone_id, record['id'])
self._try_request('DELETE', path)
else:
if existing_name == record['name'] and \
existing_type == record['type']:
path = '/zones/{}/dns_records/{}' \
.format(record['zone_id'], record['id'])
self._try_request('DELETE', path)
def _apply(self, plan):
desired = plan.desired
@ -616,6 +814,11 @@ class CloudflareProvider(BaseProvider):
self.zones[name] = zone_id
self._zone_records[name] = {}
# Force the operation order to be Delete() -> Create() -> Update()
# This will help avoid problems in updating a CNAME record into an
# A record and vice-versa
changes.sort(key=self._change_keyer)
for change in changes:
class_name = change.__class__.__name__
getattr(self, '_apply_{}'.format(class_name))(change)


+ 2
- 7
octodns/provider/constellix.py View File

@ -8,7 +8,6 @@ from __future__ import absolute_import, division, print_function, \
from collections import defaultdict
from requests import Session
from base64 import b64encode
from ipaddress import ip_address
from six import string_types
import hashlib
import hmac
@ -16,10 +15,11 @@ import logging
import time
from ..record import Record
from . import ProviderException
from .base import BaseProvider
class ConstellixClientException(Exception):
class ConstellixClientException(ProviderException):
pass
@ -139,11 +139,6 @@ class ConstellixClient(object):
v['value'] = self._absolutize_value(v['value'],
zone_name)
# compress IPv6 addresses
if record['type'] == 'AAAA':
for i, v in enumerate(value):
value[i] = str(ip_address(v))
return resp
def record_create(self, zone_name, record_type, params):


+ 7
- 2
octodns/provider/digitalocean.py View File

@ -10,10 +10,11 @@ from requests import Session
import logging
from ..record import Record
from . import ProviderException
from .base import BaseProvider
class DigitalOceanClientException(Exception):
class DigitalOceanClientException(ProviderException):
pass
@ -186,10 +187,14 @@ class DigitalOceanProvider(BaseProvider):
def _data_for_SRV(self, _type, records):
values = []
for record in records:
target = (
'{}.'.format(record['data'])
if record['data'] != "." else "."
)
values.append({
'port': record['port'],
'priority': record['priority'],
'target': '{}.'.format(record['data']),
'target': target,
'weight': record['weight']
})
return {


+ 43
- 6
octodns/provider/dnsimple.py View File

@ -10,10 +10,11 @@ from requests import Session
import logging
from ..record import Record
from . import ProviderException
from .base import BaseProvider
class DnsimpleClientException(Exception):
class DnsimpleClientException(ProviderException):
pass
@ -51,8 +52,8 @@ class DnsimpleClient(object):
resp.raise_for_status()
return resp
def domain(self, name):
path = '/domains/{}'.format(name)
def zone(self, name):
path = '/zones/{}'.format(name)
return self._request('GET', path).json()
def domain_create(self, name):
@ -218,12 +219,23 @@ class DnsimpleProvider(BaseProvider):
try:
weight, port, target = record['content'].split(' ', 2)
except ValueError:
# see _data_for_NAPTR's continue
# their api/website will let you create invalid records, this
# essentially handles that by ignoring them for values
# purposes. That will cause updates to happen to delete them if
# they shouldn't exist or update them if they're wrong
self.log.warning(
'_data_for_SRV: unsupported %s record (%s)',
_type,
record['content']
)
continue
target = '{}.'.format(target) if target != "." else "."
values.append({
'port': port,
'priority': record['priority'],
'target': '{}.'.format(target),
'target': target,
'weight': weight
})
return {
@ -270,6 +282,10 @@ class DnsimpleProvider(BaseProvider):
for record in self.zone_records(zone):
_type = record['type']
if _type not in self.SUPPORTS:
self.log.warning(
'populate: skipping unsupported %s record',
_type
)
continue
elif _type == 'TXT' and record['content'].startswith('ALIAS for'):
# ALIAS has a "ride along" TXT record with 'ALIAS for XXXX',
@ -290,6 +306,27 @@ class DnsimpleProvider(BaseProvider):
len(zone.records) - before, exists)
return exists
def supports(self, record):
# DNSimple does not support empty/NULL SRV records
#
# Fails silently and leaves a corrupt record
#
# Skip the record and continue
if record._type == "SRV":
if 'value' in record.data:
targets = (record.data['value']['target'],)
else:
targets = [value['target'] for value in record.data['values']]
if "." in targets:
self.log.warning(
'supports: unsupported %s record with target (%s)',
record._type, targets
)
return False
return super(DnsimpleProvider, self).supports(record)
def _params_for_multiple(self, record):
for value in record.values:
yield {
@ -406,7 +443,7 @@ class DnsimpleProvider(BaseProvider):
domain_name = desired.name[:-1]
try:
self._client.domain(domain_name)
self._client.zone(domain_name)
except DnsimpleClientNotFound:
self.log.debug('_apply: no matching zone, creating domain')
self._client.domain_create(domain_name)


+ 26
- 1
octodns/provider/dnsmadeeasy.py View File

@ -13,10 +13,11 @@ import hmac
import logging
from ..record import Record
from . import ProviderException
from .base import BaseProvider
class DnsMadeEasyClientException(Exception):
class DnsMadeEasyClientException(ProviderException):
pass
@ -284,6 +285,30 @@ class DnsMadeEasyProvider(BaseProvider):
len(zone.records) - before, exists)
return exists
def supports(self, record):
# DNS Made Easy does not support empty/NULL SRV records
#
# Attempting to sync such a record would generate the following error
#
# octodns.provider.dnsmadeeasy.DnsMadeEasyClientBadRequest:
# - Record value may not be a standalone dot.
#
# Skip the record and continue
if record._type == "SRV":
if 'value' in record.data:
targets = (record.data['value']['target'],)
else:
targets = [value['target'] for value in record.data['values']]
if "." in targets:
self.log.warning(
'supports: unsupported %s record with target (%s)',
record._type, targets
)
return False
return super(DnsMadeEasyProvider, self).supports(record)
def _params_for_multiple(self, record):
for value in record.values:
yield {


+ 6
- 6
octodns/provider/dyn.py View File

@ -604,7 +604,7 @@ class DynProvider(BaseProvider):
return record
def _is_traffic_director_dyanmic(self, td, rulesets):
def _is_traffic_director_dynamic(self, td, rulesets):
for ruleset in rulesets:
try:
pieces = ruleset.label.split(':')
@ -632,7 +632,7 @@ class DynProvider(BaseProvider):
continue
# critical to call rulesets once, each call loads them :-(
rulesets = td.rulesets
if self._is_traffic_director_dyanmic(td, rulesets):
if self._is_traffic_director_dynamic(td, rulesets):
record = \
self._populate_dynamic_traffic_director(zone, fqdn,
_type, td,
@ -705,7 +705,7 @@ class DynProvider(BaseProvider):
label)
extra.append(Update(record, record))
continue
if _monitor_doesnt_match(monitor, record.healthcheck_host,
if _monitor_doesnt_match(monitor, record.healthcheck_host(),
record.healthcheck_path,
record.healthcheck_protocol,
record.healthcheck_port):
@ -828,13 +828,13 @@ class DynProvider(BaseProvider):
self.traffic_director_monitors[label] = \
self.traffic_director_monitors[fqdn]
del self.traffic_director_monitors[fqdn]
if _monitor_doesnt_match(monitor, record.healthcheck_host,
if _monitor_doesnt_match(monitor, record.healthcheck_host(),
record.healthcheck_path,
record.healthcheck_protocol,
record.healthcheck_port):
self.log.info('_traffic_director_monitor: updating monitor '
'for %s', label)
monitor.update(record.healthcheck_host,
monitor.update(record.healthcheck_host(),
record.healthcheck_path,
record.healthcheck_protocol,
record.healthcheck_port)
@ -845,7 +845,7 @@ class DynProvider(BaseProvider):
monitor = DSFMonitor(label, protocol=record.healthcheck_protocol,
response_count=2, probe_interval=60,
retries=2, port=record.healthcheck_port,
active='Y', host=record.healthcheck_host,
active='Y', host=record.healthcheck_host(),
timeout=self.MONITOR_TIMEOUT,
header=self.MONITOR_HEADER,
path=record.healthcheck_path)


+ 3
- 2
octodns/provider/easydns.py View File

@ -12,10 +12,11 @@ import logging
import base64
from ..record import Record
from . import ProviderException
from .base import BaseProvider
class EasyDNSClientException(Exception):
class EasyDNSClientException(ProviderException):
pass
@ -59,7 +60,7 @@ class EasyDNSClient(object):
self.base_path = self.SANDBOX if sandbox else self.LIVE
sess = Session()
sess.headers.update({'Authorization': 'Basic {}'
.format(self.auth_key)})
.format(self.auth_key.decode('utf-8'))})
sess.headers.update({'accept': 'application/json'})
self._sess = sess


+ 2
- 1
octodns/provider/edgedns.py View File

@ -12,10 +12,11 @@ from collections import defaultdict
from logging import getLogger
from ..record import Record
from . import ProviderException
from .base import BaseProvider
class AkamaiClientNotFound(Exception):
class AkamaiClientNotFound(ProviderException):
def __init__(self, resp):
message = "404: Resource not found"


+ 379
- 0
octodns/provider/gandi.py View File

@ -0,0 +1,379 @@
#
#
#
from __future__ import absolute_import, division, print_function, \
unicode_literals
from collections import defaultdict
from requests import Session
import logging
from ..record import Record
from . import ProviderException
from .base import BaseProvider
class GandiClientException(ProviderException):
pass
class GandiClientBadRequest(GandiClientException):
def __init__(self, r):
super(GandiClientBadRequest, self).__init__(r.text)
class GandiClientUnauthorized(GandiClientException):
def __init__(self, r):
super(GandiClientUnauthorized, self).__init__(r.text)
class GandiClientForbidden(GandiClientException):
def __init__(self, r):
super(GandiClientForbidden, self).__init__(r.text)
class GandiClientNotFound(GandiClientException):
def __init__(self, r):
super(GandiClientNotFound, self).__init__(r.text)
class GandiClientUnknownDomainName(GandiClientException):
def __init__(self, msg):
super(GandiClientUnknownDomainName, self).__init__(msg)
class GandiClient(object):
def __init__(self, token):
session = Session()
session.headers.update({'Authorization': 'Apikey {}'.format(token)})
self._session = session
self.endpoint = 'https://api.gandi.net/v5'
def _request(self, method, path, params={}, data=None):
url = '{}{}'.format(self.endpoint, path)
r = self._session.request(method, url, params=params, json=data)
if r.status_code == 400:
raise GandiClientBadRequest(r)
if r.status_code == 401:
raise GandiClientUnauthorized(r)
elif r.status_code == 403:
raise GandiClientForbidden(r)
elif r.status_code == 404:
raise GandiClientNotFound(r)
r.raise_for_status()
return r
def zone(self, zone_name):
return self._request('GET', '/livedns/domains/{}'
.format(zone_name)).json()
def zone_create(self, zone_name):
return self._request('POST', '/livedns/domains', data={
'fqdn': zone_name,
'zone': {}
}).json()
def zone_records(self, zone_name):
records = self._request('GET', '/livedns/domains/{}/records'
.format(zone_name)).json()
for record in records:
if record['rrset_name'] == '@':
record['rrset_name'] = ''
# Change relative targets to absolute ones.
if record['rrset_type'] in ['ALIAS', 'CNAME', 'DNAME', 'MX',
'NS', 'SRV']:
for i, value in enumerate(record['rrset_values']):
if not value.endswith('.'):
record['rrset_values'][i] = '{}.{}.'.format(
value, zone_name)
return records
def record_create(self, zone_name, data):
self._request('POST', '/livedns/domains/{}/records'.format(zone_name),
data=data)
def record_delete(self, zone_name, record_name, record_type):
self._request('DELETE', '/livedns/domains/{}/records/{}/{}'
.format(zone_name, record_name, record_type))
class GandiProvider(BaseProvider):
'''
Gandi provider using API v5.
gandi:
class: octodns.provider.gandi.GandiProvider
# Your API key (required)
token: XXXXXXXXXXXX
'''
SUPPORTS_GEO = False
SUPPORTS_DYNAMIC = False
SUPPORTS = set((['A', 'AAAA', 'ALIAS', 'CAA', 'CNAME', 'DNAME',
'MX', 'NS', 'PTR', 'SPF', 'SRV', 'SSHFP', 'TXT']))
def __init__(self, id, token, *args, **kwargs):
self.log = logging.getLogger('GandiProvider[{}]'.format(id))
self.log.debug('__init__: id=%s, token=***', id)
super(GandiProvider, self).__init__(id, *args, **kwargs)
self._client = GandiClient(token)
self._zone_records = {}
def _data_for_multiple(self, _type, records):
return {
'ttl': records[0]['rrset_ttl'],
'type': _type,
'values': [v.replace(';', '\\;') for v in
records[0]['rrset_values']] if _type == 'TXT' else
records[0]['rrset_values']
}
_data_for_A = _data_for_multiple
_data_for_AAAA = _data_for_multiple
_data_for_TXT = _data_for_multiple
_data_for_SPF = _data_for_multiple
_data_for_NS = _data_for_multiple
def _data_for_CAA(self, _type, records):
values = []
for record in records[0]['rrset_values']:
flags, tag, value = record.split(' ')
values.append({
'flags': flags,
'tag': tag,
# Remove quotes around value.
'value': value[1:-1],
})
return {
'ttl': records[0]['rrset_ttl'],
'type': _type,
'values': values
}
def _data_for_single(self, _type, records):
return {
'ttl': records[0]['rrset_ttl'],
'type': _type,
'value': records[0]['rrset_values'][0]
}
_data_for_ALIAS = _data_for_single
_data_for_CNAME = _data_for_single
_data_for_DNAME = _data_for_single
_data_for_PTR = _data_for_single
def _data_for_MX(self, _type, records):
values = []
for record in records[0]['rrset_values']:
priority, server = record.split(' ')
values.append({
'preference': priority,
'exchange': server
})
return {
'ttl': records[0]['rrset_ttl'],
'type': _type,
'values': values
}
def _data_for_SRV(self, _type, records):
values = []
for record in records[0]['rrset_values']:
priority, weight, port, target = record.split(' ', 3)
values.append({
'priority': priority,
'weight': weight,
'port': port,
'target': target
})
return {
'ttl': records[0]['rrset_ttl'],
'type': _type,
'values': values
}
def _data_for_SSHFP(self, _type, records):
values = []
for record in records[0]['rrset_values']:
algorithm, fingerprint_type, fingerprint = record.split(' ', 2)
values.append({
'algorithm': algorithm,
'fingerprint': fingerprint,
'fingerprint_type': fingerprint_type
})
return {
'ttl': records[0]['rrset_ttl'],
'type': _type,
'values': values
}
def zone_records(self, zone):
if zone.name not in self._zone_records:
try:
self._zone_records[zone.name] = \
self._client.zone_records(zone.name[:-1])
except GandiClientNotFound:
return []
return self._zone_records[zone.name]
def populate(self, zone, target=False, lenient=False):
self.log.debug('populate: name=%s, target=%s, lenient=%s', zone.name,
target, lenient)
values = defaultdict(lambda: defaultdict(list))
for record in self.zone_records(zone):
_type = record['rrset_type']
if _type not in self.SUPPORTS:
continue
values[record['rrset_name']][record['rrset_type']].append(record)
before = len(zone.records)
for name, types in values.items():
for _type, records in types.items():
data_for = getattr(self, '_data_for_{}'.format(_type))
record = Record.new(zone, name, data_for(_type, records),
source=self, lenient=lenient)
zone.add_record(record, lenient=lenient)
exists = zone.name in self._zone_records
self.log.info('populate: found %s records, exists=%s',
len(zone.records) - before, exists)
return exists
def _record_name(self, name):
return name if name else '@'
def _params_for_multiple(self, record):
return {
'rrset_name': self._record_name(record.name),
'rrset_ttl': record.ttl,
'rrset_type': record._type,
'rrset_values': [v.replace('\\;', ';') for v in
record.values] if record._type == 'TXT'
else record.values
}
_params_for_A = _params_for_multiple
_params_for_AAAA = _params_for_multiple
_params_for_NS = _params_for_multiple
_params_for_TXT = _params_for_multiple
_params_for_SPF = _params_for_multiple
def _params_for_CAA(self, record):
return {
'rrset_name': self._record_name(record.name),
'rrset_ttl': record.ttl,
'rrset_type': record._type,
'rrset_values': ['{} {} "{}"'.format(v.flags, v.tag, v.value)
for v in record.values]
}
def _params_for_single(self, record):
return {
'rrset_name': self._record_name(record.name),
'rrset_ttl': record.ttl,
'rrset_type': record._type,
'rrset_values': [record.value]
}
_params_for_ALIAS = _params_for_single
_params_for_CNAME = _params_for_single
_params_for_DNAME = _params_for_single
_params_for_PTR = _params_for_single
def _params_for_MX(self, record):
return {
'rrset_name': self._record_name(record.name),
'rrset_ttl': record.ttl,
'rrset_type': record._type,
'rrset_values': ['{} {}'.format(v.preference, v.exchange)
for v in record.values]
}
def _params_for_SRV(self, record):
return {
'rrset_name': self._record_name(record.name),
'rrset_ttl': record.ttl,
'rrset_type': record._type,
'rrset_values': ['{} {} {} {}'.format(v.priority, v.weight, v.port,
v.target) for v in record.values]
}
def _params_for_SSHFP(self, record):
return {
'rrset_name': self._record_name(record.name),
'rrset_ttl': record.ttl,
'rrset_type': record._type,
'rrset_values': ['{} {} {}'.format(v.algorithm, v.fingerprint_type,
v.fingerprint) for v in record.values]
}
def _apply_create(self, change):
new = change.new
data = getattr(self, '_params_for_{}'.format(new._type))(new)
self._client.record_create(new.zone.name[:-1], data)
def _apply_update(self, change):
self._apply_delete(change)
self._apply_create(change)
def _apply_delete(self, change):
existing = change.existing
zone = existing.zone
self._client.record_delete(zone.name[:-1],
self._record_name(existing.name),
existing._type)
def _apply(self, plan):
desired = plan.desired
changes = plan.changes
zone = desired.name[:-1]
self.log.debug('_apply: zone=%s, len(changes)=%d', desired.name,
len(changes))
try:
self._client.zone(zone)
except GandiClientNotFound:
self.log.info('_apply: no existing zone, trying to create it')
try:
self._client.zone_create(zone)
self.log.info('_apply: zone has been successfully created')
except GandiClientNotFound:
# We suppress existing exception before raising
# GandiClientUnknownDomainName.
e = GandiClientUnknownDomainName('This domain is not '
'registered at Gandi. '
'Please register or '
'transfer it here '
'to be able to manage its '
'DNS zone.')
e.__cause__ = None
raise e
# Force records deletion to be done before creation in order to avoid
# "CNAME record must be the only record" error when an existing CNAME
# record is replaced by an A/AAAA record.
changes.reverse()
for change in changes:
class_name = change.__class__.__name__
getattr(self, '_apply_{}'.format(class_name.lower()))(change)
# Clear out the cache if any
self._zone_records.pop(desired.name, None)

+ 624
- 0
octodns/provider/gcore.py View File

@ -0,0 +1,624 @@
#
#
#
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
from collections import defaultdict
from requests import Session
import http
import logging
import urllib.parse
from ..record import GeoCodes
from ..record import Record
from . import ProviderException
from .base import BaseProvider
class GCoreClientException(ProviderException):
def __init__(self, r):
super(GCoreClientException, self).__init__(r.text)
class GCoreClientBadRequest(GCoreClientException):
def __init__(self, r):
super(GCoreClientBadRequest, self).__init__(r)
class GCoreClientNotFound(GCoreClientException):
def __init__(self, r):
super(GCoreClientNotFound, self).__init__(r)
class GCoreClient(object):
ROOT_ZONES = "zones"
def __init__(
self,
log,
api_url,
auth_url,
token=None,
token_type=None,
login=None,
password=None,
):
self.log = log
self._session = Session()
self._api_url = api_url
if token is not None and token_type is not None:
self._session.headers.update(
{"Authorization": "{} {}".format(token_type, token)}
)
elif login is not None and password is not None:
token = self._auth(auth_url, login, password)
self._session.headers.update(
{"Authorization": "Bearer {}".format(token)}
)
else:
raise ValueError("either token or login & password must be set")
def _auth(self, url, login, password):
# well, can't use _request, since API returns 400 if credentials
# invalid which will be logged, but we don't want do this
r = self._session.request(
"POST",
self._build_url(url, "auth", "jwt", "login"),
json={"username": login, "password": password},
)
r.raise_for_status()
return r.json()["access"]
def _request(self, method, url, params=None, data=None):
r = self._session.request(
method, url, params=params, json=data, timeout=30.0
)
if r.status_code == http.HTTPStatus.BAD_REQUEST:
self.log.error(
"bad request %r has been sent to %r: %s", data, url, r.text
)
raise GCoreClientBadRequest(r)
elif r.status_code == http.HTTPStatus.NOT_FOUND:
self.log.error("resource %r not found: %s", url, r.text)
raise GCoreClientNotFound(r)
elif r.status_code == http.HTTPStatus.INTERNAL_SERVER_ERROR:
self.log.error("server error no %r to %r: %s", data, url, r.text)
raise GCoreClientException(r)
r.raise_for_status()
return r
def zone(self, zone_name):
return self._request(
"GET", self._build_url(self._api_url, self.ROOT_ZONES, zone_name)
).json()
def zone_create(self, zone_name):
return self._request(
"POST",
self._build_url(self._api_url, self.ROOT_ZONES),
data={"name": zone_name},
).json()
def zone_records(self, zone_name):
rrsets = self._request(
"GET",
"{}".format(
self._build_url(
self._api_url, self.ROOT_ZONES, zone_name, "rrsets"
)
),
params={"all": "true"},
).json()
records = rrsets["rrsets"]
return records
def record_create(self, zone_name, rrset_name, type_, data):
self._request(
"POST", self._rrset_url(zone_name, rrset_name, type_), data=data
)
def record_update(self, zone_name, rrset_name, type_, data):
self._request(
"PUT", self._rrset_url(zone_name, rrset_name, type_), data=data
)
def record_delete(self, zone_name, rrset_name, type_):
self._request("DELETE", self._rrset_url(zone_name, rrset_name, type_))
def _rrset_url(self, zone_name, rrset_name, type_):
return self._build_url(
self._api_url, self.ROOT_ZONES, zone_name, rrset_name, type_
)
@staticmethod
def _build_url(base, *items):
for i in items:
base = base.strip("/") + "/"
base = urllib.parse.urljoin(base, i)
return base
class GCoreProvider(BaseProvider):
"""
GCore provider using API v2.
gcore:
class: octodns.provider.gcore.GCoreProvider
# Your API key
token: XXXXXXXXXXXX
# token_type: APIKey
# or login + password
login: XXXXXXXXXXXX
password: XXXXXXXXXXXX
# auth_url: https://api.gcdn.co
# url: https://dnsapi.gcorelabs.com/v2
# records_per_response: 1
"""
SUPPORTS_GEO = False
SUPPORTS_DYNAMIC = True
SUPPORTS = set(("A", "AAAA", "NS", "MX", "TXT", "SRV", "CNAME", "PTR"))
def __init__(self, id, *args, **kwargs):
token = kwargs.pop("token", None)
token_type = kwargs.pop("token_type", "APIKey")
login = kwargs.pop("login", None)
password = kwargs.pop("password", None)
api_url = kwargs.pop("url", "https://dnsapi.gcorelabs.com/v2")
auth_url = kwargs.pop("auth_url", "https://api.gcdn.co")
self.records_per_response = kwargs.pop("records_per_response", 1)
self.log = logging.getLogger("GCoreProvider[{}]".format(id))
self.log.debug("__init__: id=%s", id)
super(GCoreProvider, self).__init__(id, *args, **kwargs)
self._client = GCoreClient(
self.log,
api_url,
auth_url,
token=token,
token_type=token_type,
login=login,
password=password,
)
def _add_dot_if_need(self, value):
return "{}.".format(value) if not value.endswith(".") else value
def _build_pools(self, record, default_pool_name, value_transform_fn):
defaults = []
geo_sets, pool_idx = dict(), 0
pools = defaultdict(lambda: {"values": []})
for rr in record["resource_records"]:
meta = rr.get("meta", {}) or {}
value = {"value": value_transform_fn(rr["content"][0])}
countries = meta.get("countries", []) or []
continents = meta.get("continents", []) or []
if meta.get("default", False):
pools[default_pool_name]["values"].append(value)
defaults.append(value["value"])
continue
# defaults is false or missing and no conties or continents
elif len(continents) == 0 and len(countries) == 0:
defaults.append(value["value"])
continue
# RR with the same set of countries and continents are
# combined in single pool
geo_set = frozenset(
[GeoCodes.country_to_code(cc.upper()) for cc in countries]
) | frozenset(cc.upper() for cc in continents)
if geo_set not in geo_sets:
geo_sets[geo_set] = "pool-{}".format(pool_idx)
pool_idx += 1
pools[geo_sets[geo_set]]["values"].append(value)
return pools, geo_sets, defaults
def _build_rules(self, pools, geo_sets):
rules = []
for name, _ in pools.items():
rule = {"pool": name}
geo_set = next(
(
geo_set
for geo_set, pool_name in geo_sets.items()
if pool_name == name
),
{},
)
if len(geo_set) > 0:
rule["geos"] = list(geo_set)
rules.append(rule)
return sorted(rules, key=lambda x: x["pool"])
def _data_for_dynamic(self, record, value_transform_fn=lambda x: x):
default_pool = "other"
pools, geo_sets, defaults = self._build_pools(
record, default_pool, value_transform_fn
)
if len(pools) == 0:
raise RuntimeError(
"filter is enabled, but no pools where built for {}".format(
record
)
)
# defaults can't be empty, so use first pool values
if len(defaults) == 0:
defaults = [
value_transform_fn(v["value"])
for v in next(iter(pools.values()))["values"]
]
# if at least one default RR was found then setup fallback for
# other pools to default
if default_pool in pools:
for pool_name, pool in pools.items():
if pool_name == default_pool:
continue
pool["fallback"] = default_pool
rules = self._build_rules(pools, geo_sets)
return pools, rules, defaults
def _data_for_single(self, _type, record):
return {
"ttl": record["ttl"],
"type": _type,
"value": self._add_dot_if_need(
record["resource_records"][0]["content"][0]
),
}
_data_for_PTR = _data_for_single
def _data_for_CNAME(self, _type, record):
if record.get("filters") is None:
return self._data_for_single(_type, record)
pools, rules, defaults = self._data_for_dynamic(
record, self._add_dot_if_need
)
return {
"ttl": record["ttl"],
"type": _type,
"dynamic": {"pools": pools, "rules": rules},
"value": self._add_dot_if_need(defaults[0]),
}
def _data_for_multiple(self, _type, record):
extra = dict()
if record.get("filters") is not None:
pools, rules, defaults = self._data_for_dynamic(record)
extra = {
"dynamic": {"pools": pools, "rules": rules},
"values": defaults,
}
else:
extra = {
"values": [
rr_value
for resource_record in record["resource_records"]
for rr_value in resource_record["content"]
]
}
return {
"ttl": record["ttl"],
"type": _type,
**extra,
}
_data_for_A = _data_for_multiple
_data_for_AAAA = _data_for_multiple
def _data_for_TXT(self, _type, record):
return {
"ttl": record["ttl"],
"type": _type,
"values": [
rr_value.replace(";", "\\;")
for resource_record in record["resource_records"]
for rr_value in resource_record["content"]
],
}
def _data_for_MX(self, _type, record):
return {
"ttl": record["ttl"],
"type": _type,
"values": [
dict(
preference=preference,
exchange=self._add_dot_if_need(exchange),
)
for preference, exchange in map(
lambda x: x["content"], record["resource_records"]
)
],
}
def _data_for_NS(self, _type, record):
return {
"ttl": record["ttl"],
"type": _type,
"values": [
self._add_dot_if_need(rr_value)
for resource_record in record["resource_records"]
for rr_value in resource_record["content"]
],
}
def _data_for_SRV(self, _type, record):
return {
"ttl": record["ttl"],
"type": _type,
"values": [
dict(
priority=priority,
weight=weight,
port=port,
target=self._add_dot_if_need(target),
)
for priority, weight, port, target in map(
lambda x: x["content"], record["resource_records"]
)
],
}
def zone_records(self, zone):
try:
return self._client.zone_records(zone.name[:-1]), True
except GCoreClientNotFound:
return [], False
def populate(self, zone, target=False, lenient=False):
self.log.debug(
"populate: name=%s, target=%s, lenient=%s",
zone.name,
target,
lenient,
)
values = defaultdict(defaultdict)
records, exists = self.zone_records(zone)
for record in records:
_type = record["type"].upper()
if _type not in self.SUPPORTS:
continue
if self._should_ignore(record):
continue
rr_name = zone.hostname_from_fqdn(record["name"])
values[rr_name][_type] = record
before = len(zone.records)
for name, types in values.items():
for _type, record in types.items():
data_for = getattr(self, "_data_for_{}".format(_type))
record = Record.new(
zone,
name,
data_for(_type, record),
source=self,
lenient=lenient,
)
zone.add_record(record, lenient=lenient)
self.log.info(
"populate: found %s records, exists=%s",
len(zone.records) - before,
exists,
)
return exists
def _should_ignore(self, record):
name = record.get("name", "name-not-defined")
if record.get("filters") is None:
return False
want_filters = 3
filters = record.get("filters", [])
if len(filters) != want_filters:
self.log.info(
"ignore %s has filters and their count is not %d",
name,
want_filters,
)
return True
types = [v.get("type") for v in filters]
for i, want_type in enumerate(["geodns", "default", "first_n"]):
if types[i] != want_type:
self.log.info(
"ignore %s, filters.%d.type is %s, want %s",
name,
i,
types[i],
want_type,
)
return True
limits = [filters[i].get("limit", 1) for i in [1, 2]]
if limits[0] != limits[1]:
self.log.info(
"ignore %s, filters.1.limit (%d) != filters.2.limit (%d)",
name,
limits[0],
limits[1],
)
return True
return False
def _params_for_dymanic(self, record):
records = []
default_pool_found = False
default_values = set(
record.values if hasattr(record, "values") else [record.value]
)
for rule in record.dynamic.rules:
meta = dict()
# build meta tags if geos information present
if len(rule.data.get("geos", [])) > 0:
for geo_code in rule.data["geos"]:
geo = GeoCodes.parse(geo_code)
country = geo["country_code"]
continent = geo["continent_code"]
if country is not None:
meta.setdefault("countries", []).append(country)
else:
meta.setdefault("continents", []).append(continent)
else:
meta["default"] = True
pool_values = set()
pool_name = rule.data["pool"]
for value in record.dynamic.pools[pool_name].data["values"]:
v = value["value"]
records.append({"content": [v], "meta": meta})
pool_values.add(v)
default_pool_found |= default_values == pool_values
# if default values doesn't match any pool values, then just add this
# values with no any meta
if not default_pool_found:
for value in default_values:
records.append({"content": [value]})
return records
def _params_for_single(self, record):
return {
"ttl": record.ttl,
"resource_records": [{"content": [record.value]}],
}
_params_for_PTR = _params_for_single
def _params_for_CNAME(self, record):
if not record.dynamic:
return self._params_for_single(record)
return {
"ttl": record.ttl,
"resource_records": self._params_for_dymanic(record),
"filters": [
{"type": "geodns"},
{
"type": "default",
"limit": self.records_per_response,
"strict": False,
},
{"type": "first_n", "limit": self.records_per_response},
],
}
def _params_for_multiple(self, record):
extra = dict()
if record.dynamic:
extra["resource_records"] = self._params_for_dymanic(record)
extra["filters"] = [
{"type": "geodns"},
{
"type": "default",
"limit": self.records_per_response,
"strict": False,
},
{"type": "first_n", "limit": self.records_per_response},
]
else:
extra["resource_records"] = [
{"content": [value]} for value in record.values
]
return {
"ttl": record.ttl,
**extra,
}
_params_for_A = _params_for_multiple
_params_for_AAAA = _params_for_multiple
def _params_for_NS(self, record):
return {
"ttl": record.ttl,
"resource_records": [
{"content": [value]} for value in record.values
],
}
def _params_for_TXT(self, record):
return {
"ttl": record.ttl,
"resource_records": [
{"content": [value.replace("\\;", ";")]}
for value in record.values
],
}
def _params_for_MX(self, record):
return {
"ttl": record.ttl,
"resource_records": [
{"content": [rec.preference, rec.exchange]}
for rec in record.values
],
}
def _params_for_SRV(self, record):
return {
"ttl": record.ttl,
"resource_records": [
{"content": [rec.priority, rec.weight, rec.port, rec.target]}
for rec in record.values
],
}
def _apply_create(self, change):
self.log.info("creating: %s", change)
new = change.new
data = getattr(self, "_params_for_{}".format(new._type))(new)
self._client.record_create(
new.zone.name[:-1], new.fqdn, new._type, data
)
def _apply_update(self, change):
self.log.info("updating: %s", change)
new = change.new
data = getattr(self, "_params_for_{}".format(new._type))(new)
self._client.record_update(
new.zone.name[:-1], new.fqdn, new._type, data
)
def _apply_delete(self, change):
self.log.info("deleting: %s", change)
existing = change.existing
self._client.record_delete(
existing.zone.name[:-1], existing.fqdn, existing._type
)
def _apply(self, plan):
desired = plan.desired
changes = plan.changes
zone = desired.name[:-1]
self.log.debug(
"_apply: zone=%s, len(changes)=%d", desired.name, len(changes)
)
try:
self._client.zone(zone)
except GCoreClientNotFound:
self.log.info("_apply: no existing zone, trying to create it")
self._client.zone_create(zone)
self.log.info("_apply: zone has been successfully created")
changes.reverse()
for change in changes:
class_name = change.__class__.__name__
getattr(self, "_apply_{}".format(class_name.lower()))(change)

+ 340
- 0
octodns/provider/hetzner.py View File

@ -0,0 +1,340 @@
#
#
#
from __future__ import absolute_import, division, print_function, \
unicode_literals
from collections import defaultdict
from requests import Session
import logging
from ..record import Record
from . import ProviderException
from .base import BaseProvider
class HetznerClientException(ProviderException):
pass
class HetznerClientNotFound(HetznerClientException):
def __init__(self):
super(HetznerClientNotFound, self).__init__('Not Found')
class HetznerClientUnauthorized(HetznerClientException):
def __init__(self):
super(HetznerClientUnauthorized, self).__init__('Unauthorized')
class HetznerClient(object):
BASE_URL = 'https://dns.hetzner.com/api/v1'
def __init__(self, token):
session = Session()
session.headers.update({'Auth-API-Token': token})
self._session = session
def _do(self, method, path, params=None, data=None):
url = '{}{}'.format(self.BASE_URL, path)
response = self._session.request(method, url, params=params, json=data)
if response.status_code == 401:
raise HetznerClientUnauthorized()
if response.status_code == 404:
raise HetznerClientNotFound()
response.raise_for_status()
return response
def _do_json(self, method, path, params=None, data=None):
return self._do(method, path, params, data).json()
def zone_get(self, name):
params = {'name': name}
return self._do_json('GET', '/zones', params)['zones'][0]
def zone_create(self, name, ttl=None):
data = {'name': name, 'ttl': ttl}
return self._do_json('POST', '/zones', data=data)['zone']
def zone_records_get(self, zone_id):
params = {'zone_id': zone_id}
records = self._do_json('GET', '/records', params=params)['records']
for record in records:
if record['name'] == '@':
record['name'] = ''
return records
def zone_record_create(self, zone_id, name, _type, value, ttl=None):
data = {'name': name or '@', 'ttl': ttl, 'type': _type, 'value': value,
'zone_id': zone_id}
self._do('POST', '/records', data=data)
def zone_record_delete(self, zone_id, record_id):
self._do('DELETE', '/records/{}'.format(record_id))
class HetznerProvider(BaseProvider):
'''
Hetzner DNS provider using API v1
hetzner:
class: octodns.provider.hetzner.HetznerProvider
# Your Hetzner API token (required)
token: foo
'''
SUPPORTS_GEO = False
SUPPORTS_DYNAMIC = False
SUPPORTS = set(('A', 'AAAA', 'CAA', 'CNAME', 'MX', 'NS', 'SRV', 'TXT'))
def __init__(self, id, token, *args, **kwargs):
self.log = logging.getLogger('HetznerProvider[{}]'.format(id))
self.log.debug('__init__: id=%s, token=***', id)
super(HetznerProvider, self).__init__(id, *args, **kwargs)
self._client = HetznerClient(token)
self._zone_records = {}
self._zone_metadata = {}
self._zone_name_to_id = {}
def _append_dot(self, value):
if value == '@' or value[-1] == '.':
return value
return '{}.'.format(value)
def zone_metadata(self, zone_id=None, zone_name=None):
if zone_name is not None:
if zone_name in self._zone_name_to_id:
zone_id = self._zone_name_to_id[zone_name]
else:
zone = self._client.zone_get(name=zone_name[:-1])
zone_id = zone['id']
self._zone_name_to_id[zone_name] = zone_id
self._zone_metadata[zone_id] = zone
return self._zone_metadata[zone_id]
def _record_ttl(self, record):
default_ttl = self.zone_metadata(zone_id=record['zone_id'])['ttl']
return record['ttl'] if 'ttl' in record else default_ttl
def _data_for_multiple(self, _type, records):
values = [record['value'].replace(';', '\\;') for record in records]
return {
'ttl': self._record_ttl(records[0]),
'type': _type,
'values': values
}
_data_for_A = _data_for_multiple
_data_for_AAAA = _data_for_multiple
def _data_for_CAA(self, _type, records):
values = []
for record in records:
value_without_spaces = record['value'].replace(' ', '')
flags = value_without_spaces[0]
tag = value_without_spaces[1:].split('"')[0]
value = record['value'].split('"')[1]
values.append({
'flags': int(flags),
'tag': tag,
'value': value,
})
return {
'ttl': self._record_ttl(records[0]),
'type': _type,
'values': values
}
def _data_for_CNAME(self, _type, records):
record = records[0]
return {
'ttl': self._record_ttl(record),
'type': _type,
'value': self._append_dot(record['value'])
}
def _data_for_MX(self, _type, records):
values = []
for record in records:
value_stripped_split = record['value'].strip().split(' ')
preference = value_stripped_split[0]
exchange = value_stripped_split[-1]
values.append({
'preference': int(preference),
'exchange': self._append_dot(exchange)
})
return {
'ttl': self._record_ttl(records[0]),
'type': _type,
'values': values
}
def _data_for_NS(self, _type, records):
values = []
for record in records:
values.append(self._append_dot(record['value']))
return {
'ttl': self._record_ttl(records[0]),
'type': _type,
'values': values,
}
def _data_for_SRV(self, _type, records):
values = []
for record in records:
value_stripped = record['value'].strip()
priority = value_stripped.split(' ')[0]
weight = value_stripped[len(priority):].strip().split(' ')[0]
target = value_stripped.split(' ')[-1]
port = value_stripped[:-len(target)].strip().split(' ')[-1]
values.append({
'port': int(port),
'priority': int(priority),
'target': self._append_dot(target),
'weight': int(weight)
})
return {
'ttl': self._record_ttl(records[0]),
'type': _type,
'values': values
}
_data_for_TXT = _data_for_multiple
def zone_records(self, zone):
if zone.name not in self._zone_records:
try:
zone_id = self.zone_metadata(zone_name=zone.name)['id']
self._zone_records[zone.name] = \
self._client.zone_records_get(zone_id)
except HetznerClientNotFound:
return []
return self._zone_records[zone.name]
def populate(self, zone, target=False, lenient=False):
self.log.debug('populate: name=%s, target=%s, lenient=%s', zone.name,
target, lenient)
values = defaultdict(lambda: defaultdict(list))
for record in self.zone_records(zone):
_type = record['type']
if _type not in self.SUPPORTS:
self.log.warning('populate: skipping unsupported %s record',
_type)
continue
values[record['name']][record['type']].append(record)
before = len(zone.records)
for name, types in values.items():
for _type, records in types.items():
data_for = getattr(self, '_data_for_{}'.format(_type))
record = Record.new(zone, name, data_for(_type, records),
source=self, lenient=lenient)
zone.add_record(record, lenient=lenient)
exists = zone.name in self._zone_records
self.log.info('populate: found %s records, exists=%s',
len(zone.records) - before, exists)
return exists
def _params_for_multiple(self, record):
for value in record.values:
yield {
'value': value.replace('\\;', ';'),
'name': record.name,
'ttl': record.ttl,
'type': record._type
}
_params_for_A = _params_for_multiple
_params_for_AAAA = _params_for_multiple
def _params_for_CAA(self, record):
for value in record.values:
data = '{} {} "{}"'.format(value.flags, value.tag, value.value)
yield {
'value': data,
'name': record.name,
'ttl': record.ttl,
'type': record._type
}
def _params_for_single(self, record):
yield {
'value': record.value,
'name': record.name,
'ttl': record.ttl,
'type': record._type
}
_params_for_CNAME = _params_for_single
def _params_for_MX(self, record):
for value in record.values:
data = '{} {}'.format(value.preference, value.exchange)
yield {
'value': data,
'name': record.name,
'ttl': record.ttl,
'type': record._type
}
_params_for_NS = _params_for_multiple
def _params_for_SRV(self, record):
for value in record.values:
data = '{} {} {} {}'.format(value.priority, value.weight,
value.port, value.target)
yield {
'value': data,
'name': record.name,
'ttl': record.ttl,
'type': record._type
}
_params_for_TXT = _params_for_multiple
def _apply_Create(self, zone_id, change):
new = change.new
params_for = getattr(self, '_params_for_{}'.format(new._type))
for params in params_for(new):
self._client.zone_record_create(zone_id, params['name'],
params['type'], params['value'],
params['ttl'])
def _apply_Update(self, zone_id, change):
# It's way simpler to delete-then-recreate than to update
self._apply_Delete(zone_id, change)
self._apply_Create(zone_id, change)
def _apply_Delete(self, zone_id, change):
existing = change.existing
zone = existing.zone
for record in self.zone_records(zone):
if existing.name == record['name'] and \
existing._type == record['type']:
self._client.zone_record_delete(zone_id, record['id'])
def _apply(self, plan):
desired = plan.desired
changes = plan.changes
self.log.debug('_apply: zone=%s, len(changes)=%d', desired.name,
len(changes))
try:
zone_id = self.zone_metadata(zone_name=desired.name)['id']
except HetznerClientNotFound:
self.log.debug('_apply: no matching zone, creating domain')
zone_id = self._client.zone_create(desired.name[:-1])['id']
for change in changes:
class_name = change.__class__.__name__
getattr(self, '_apply_{}'.format(class_name))(zone_id, change)
# Clear out the cache if any
self._zone_records.pop(desired.name, None)

+ 6
- 5
octodns/provider/mythicbeasts.py View File

@ -11,6 +11,7 @@ from requests import Session
from logging import getLogger
from ..record import Record
from . import ProviderException
from .base import BaseProvider
from collections import defaultdict
@ -34,7 +35,7 @@ def remove_trailing_dot(value):
return value[:-1]
class MythicBeastsUnauthorizedException(Exception):
class MythicBeastsUnauthorizedException(ProviderException):
def __init__(self, zone, *args):
self.zone = zone
self.message = 'Mythic Beasts unauthorized for zone: {}'.format(
@ -45,7 +46,7 @@ class MythicBeastsUnauthorizedException(Exception):
self.message, self.zone, *args)
class MythicBeastsRecordException(Exception):
class MythicBeastsRecordException(ProviderException):
def __init__(self, zone, command, *args):
self.zone = zone
self.command = command
@ -70,13 +71,13 @@ class MythicBeastsProvider(BaseProvider):
...
mythicbeasts:
class: octodns.provider.mythicbeasts.MythicBeastsProvider
passwords:
my.domain.: 'password'
passwords:
my.domain.: 'DNS API v1 password'
zones:
my.domain.:
targets:
- mythic
- mythicbeasts
'''
RE_MX = re.compile(r'^(?P<preference>[0-9]+)\s+(?P<exchange>\S+)$',


+ 289
- 96
octodns/provider/ns1.py View File

@ -17,10 +17,15 @@ from uuid import uuid4
from six import text_type
from ..record import Record, Update
from . import ProviderException
from .base import BaseProvider
class Ns1Exception(Exception):
def _ensure_endswith_dot(string):
return string if string.endswith('.') else '{}.'.format(string)
class Ns1Exception(ProviderException):
pass
@ -76,9 +81,48 @@ class Ns1Client(object):
self._datasource = client.datasource()
self._datafeed = client.datafeed()
self.reset_caches()
def reset_caches(self):
self._datasource_id = None
self._feeds_for_monitors = None
self._monitors_cache = None
self._notifylists_cache = None
self._zones_cache = {}
self._records_cache = {}
def update_record_cache(func):
def call(self, zone, domain, _type, **params):
if zone in self._zones_cache:
# remove record's zone from cache
del self._zones_cache[zone]
cached = self._records_cache.setdefault(zone, {}) \
.setdefault(domain, {})
if _type in cached:
# remove record from cache
del cached[_type]
# write record to cache if its not a delete
new_record = func(self, zone, domain, _type, **params)
if new_record:
cached[_type] = new_record
return new_record
return call
def read_or_set_record_cache(func):
def call(self, zone, domain, _type):
cached = self._records_cache.setdefault(zone, {}) \
.setdefault(domain, {})
if _type not in cached:
cached[_type] = func(self, zone, domain, _type)
return cached[_type]
return call
@property
def datasource_id(self):
@ -121,6 +165,14 @@ class Ns1Client(object):
{m['id']: m for m in self.monitors_list()}
return self._monitors_cache
@property
def notifylists(self):
if self._notifylists_cache is None:
self.log.debug('notifylists: fetching & building')
self._notifylists_cache = \
{l['name']: l for l in self.notifylists_list()}
return self._notifylists_cache
def datafeed_create(self, sourceid, name, config):
ret = self._try(self._datafeed.create, sourceid, name, config)
self.feeds_for_monitors[config['jobid']] = ret['id']
@ -163,31 +215,45 @@ class Ns1Client(object):
return ret
def notifylists_delete(self, nlid):
for name, nl in self.notifylists.items():
if nl['id'] == nlid:
del self._notifylists_cache[name]
break
return self._try(self._notifylists.delete, nlid)
def notifylists_create(self, **body):
return self._try(self._notifylists.create, body)
nl = self._try(self._notifylists.create, body)
# cache it
self.notifylists[nl['name']] = nl
return nl
def notifylists_list(self):
return self._try(self._notifylists.list)
@update_record_cache
def records_create(self, zone, domain, _type, **params):
return self._try(self._records.create, zone, domain, _type, **params)
@update_record_cache
def records_delete(self, zone, domain, _type):
return self._try(self._records.delete, zone, domain, _type)
@read_or_set_record_cache
def records_retrieve(self, zone, domain, _type):
return self._try(self._records.retrieve, zone, domain, _type)
@update_record_cache
def records_update(self, zone, domain, _type, **params):
return self._try(self._records.update, zone, domain, _type, **params)
def zones_create(self, name):
return self._try(self._zones.create, name)
self._zones_cache[name] = self._try(self._zones.create, name)
return self._zones_cache[name]
def zones_retrieve(self, name):
return self._try(self._zones.retrieve, name)
if name not in self._zones_cache:
self._zones_cache[name] = self._try(self._zones.retrieve, name)
return self._zones_cache[name]
def _try(self, method, *args, **kwargs):
tries = self.retry_count
@ -216,6 +282,13 @@ class Ns1Provider(BaseProvider):
# Only required if using dynamic records
monitor_regions:
- lga
# Optional. Default: false. true is Recommended, but not the default
# for backwards compatibility reasons. If true, all NS1 monitors will
# use a shared notify list rather than one per record & value
# combination. See CHANGELOG,
# https://github.com/octodns/octodns/blob/master/CHANGELOG.md, for more
# information before enabling this behavior.
shared_notifylist: false
# Optional. Default: None. If set, back off in advance to avoid 429s
# from rate-limiting. Generally this should be set to the number
# of processes or workers hitting the API, e.g. the value of
@ -233,10 +306,12 @@ class Ns1Provider(BaseProvider):
'''
SUPPORTS_GEO = True
SUPPORTS_DYNAMIC = True
SUPPORTS_MUTLIVALUE_PTR = True
SUPPORTS = set(('A', 'AAAA', 'ALIAS', 'CAA', 'CNAME', 'MX', 'NAPTR',
'NS', 'PTR', 'SPF', 'SRV', 'TXT'))
'NS', 'PTR', 'SPF', 'SRV', 'TXT', 'URLFWD'))
ZONE_NOT_FOUND_MESSAGE = 'server error: zone not found'
SHARED_NOTIFYLIST_NAME = 'octoDNS NS1 Notify List'
def _update_filter(self, filter, with_disabled):
if with_disabled:
@ -341,6 +416,9 @@ class Ns1Provider(BaseProvider):
'ASIAPAC': 'AS',
'EUROPE': 'EU',
'SOUTH-AMERICA': 'SA',
# continent NA has been handled as part of Geofence Country filter
# starting from v0.9.13. These below US-* just need to continue to
# exist here so it doesn't break the ugrade path
'US-CENTRAL': 'NA',
'US-EAST': 'NA',
'US-WEST': 'NA',
@ -350,8 +428,6 @@ class Ns1Provider(BaseProvider):
'AS': ('ASIAPAC',),
'EU': ('EUROPE',),
'SA': ('SOUTH-AMERICA',),
# TODO: what about CA, MX, and all the other NA countries?
'NA': ('US-CENTRAL', 'US-EAST', 'US-WEST'),
}
# Necessary for handling unsupported continents in _CONTINENT_TO_REGIONS
@ -359,10 +435,16 @@ class Ns1Provider(BaseProvider):
'OC': {'FJ', 'NC', 'PG', 'SB', 'VU', 'AU', 'NF', 'NZ', 'FM', 'GU',
'KI', 'MH', 'MP', 'NR', 'PW', 'AS', 'CK', 'NU', 'PF', 'PN',
'TK', 'TO', 'TV', 'WF', 'WS'},
'NA': {'DO', 'DM', 'BB', 'BL', 'BM', 'HT', 'KN', 'JM', 'VC', 'HN',
'BS', 'BZ', 'PR', 'NI', 'LC', 'TT', 'VG', 'PA', 'TC', 'PM',
'GT', 'AG', 'GP', 'AI', 'VI', 'CA', 'GD', 'AW', 'CR', 'GL',
'CU', 'MF', 'SV', 'US', 'MQ', 'MS', 'KY', 'MX', 'CW', 'BQ',
'SX', 'UM'}
}
def __init__(self, id, api_key, retry_count=4, monitor_regions=None,
parallelism=None, client_config=None, *args, **kwargs):
parallelism=None, client_config=None, shared_notifylist=False,
*args, **kwargs):
self.log = getLogger('Ns1Provider[{}]'.format(id))
self.log.debug('__init__: id=%s, api_key=***, retry_count=%d, '
'monitor_regions=%s, parallelism=%s, client_config=%s',
@ -370,6 +452,7 @@ class Ns1Provider(BaseProvider):
client_config)
super(Ns1Provider, self).__init__(id, *args, **kwargs)
self.monitor_regions = monitor_regions
self.shared_notifylist = shared_notifylist
self._client = Ns1Client(api_key, parallelism, retry_count,
client_config)
@ -406,7 +489,7 @@ class Ns1Provider(BaseProvider):
for piece in note.split(' '):
try:
k, v = piece.split(':', 1)
data[k] = v
data[k] = v if v != '' else None
except ValueError:
pass
return data
@ -464,10 +547,10 @@ class Ns1Provider(BaseProvider):
pass
return pool_name
def _data_for_dynamic_A(self, _type, record):
def _data_for_dynamic(self, _type, record):
# First make sure we have the expected filters config
if not self._valid_filter_config(record['filters'], record['domain']):
self.log.error('_data_for_dynamic_A: %s %s has unsupported '
self.log.error('_data_for_dynamic: %s %s has unsupported '
'filters', record['domain'], _type)
raise Ns1Exception('Unrecognized advanced record')
@ -479,31 +562,45 @@ class Ns1Provider(BaseProvider):
# region.
pools = defaultdict(lambda: {'fallback': None, 'values': []})
for answer in record['answers']:
# region (group name in the UI) is the pool name
pool_name = answer['region']
# Get the actual pool name by removing the type
pool_name = self._parse_dynamic_pool_name(pool_name)
pool = pools[pool_name]
meta = answer['meta']
notes = self._parse_notes(meta.get('note', ''))
value = text_type(answer['answer'][0])
if meta['priority'] == 1:
# priority 1 means this answer is part of the pools own values
value_dict = {
'value': value,
'weight': int(meta.get('weight', 1)),
}
# If we have the original pool name and the catchall pool name
# in the answers, they point at the same pool. Add values only
# once
if value_dict not in pool['values']:
pool['values'].append(value_dict)
if notes.get('from', False) == '--default--':
# It's a final/default value, record it and move on
default.add(value)
continue
# NS1 pool names can be found in notes > v0.9.11, in order to allow
# us to find fallback-only pools/values. Before that we used
# `region` (group name in the UI) and only paid attention to
# priority=1 (first level)
notes_pool_name = notes.get('pool', None)
if notes_pool_name is None:
# < v0.9.11
if meta['priority'] != 1:
# Ignore all but priority 1
continue
# And use region's pool name as the pool name
pool_name = self._parse_dynamic_pool_name(answer['region'])
else:
# It's a fallback, we only care about it if it's a
# final/default
notes = self._parse_notes(meta.get('note', ''))
if notes.get('from', False) == '--default--':
default.add(value)
# > v0.9.11, use the notes-based name and consider all values
pool_name = notes_pool_name
pool = pools[pool_name]
value_dict = {
'value': value,
'weight': int(meta.get('weight', 1)),
}
if value_dict not in pool['values']:
# If we haven't seen this value before add it to the pool
pool['values'].append(value_dict)
# If there's a fallback recorded in the value for its pool go ahead
# and use it, another v0.9.11 thing
fallback = notes.get('fallback', None)
if fallback is not None:
pool['fallback'] = fallback
# The regions objects map to rules, but it's a bit fuzzy since they're
# tied to pools on the NS1 side, e.g. we can only have 1 rule per pool,
@ -528,55 +625,61 @@ class Ns1Provider(BaseProvider):
rules[rule_order] = rule
# The group notes field in the UI is a `note` on the region here,
# that's where we can find our pool's fallback.
# that's where we can find our pool's fallback in < v0.9.11 anyway
if 'fallback' in notes:
# set the fallback pool name
pools[pool_name]['fallback'] = notes['fallback']
geos = set()
# continents are mapped (imperfectly) to regions, but what about
# Canada/North America
for georegion in meta.get('georegion', []):
geos.add(self._REGION_TO_CONTINENT[georegion])
# Countries are easy enough to map, we just have to find their
# continent
#
# NOTE: Special handling for Oceania
# NS1 doesn't support Oceania as a region. So the Oceania countries
# will be present in meta['country']. If all the countries in the
# Oceania countries list are found, set the region to OC and remove
# individual oceania country entries
oc_countries = set()
# NOTE: Some continents need special handling since NS1
# does not supprt them as regions. These are defined under
# _CONTINENT_TO_LIST_OF_COUNTRIES. So the countries for these
# regions will be present in meta['country']. If all the countries
# in _CONTINENT_TO_LIST_OF_COUNTRIES[<region>] list are found,
# set the continent as the region and remove individual countries
special_continents = dict()
for country in meta.get('country', []):
# country_alpha2_to_continent_code fails for Pitcairn ('PN')
# country_alpha2_to_continent_code fails for Pitcairn ('PN'),
# United States Minor Outlying Islands ('UM') and
# Sint Maarten ('SX')
if country == 'PN':
con = 'OC'
elif country in ['SX', 'UM']:
con = 'NA'
else:
con = country_alpha2_to_continent_code(country)
if con == 'OC':
oc_countries.add(country)
if con in self._CONTINENT_TO_LIST_OF_COUNTRIES:
special_continents.setdefault(con, set()).add(country)
else:
# Adding only non-OC countries here to geos
geos.add('{}-{}'.format(con, country))
if oc_countries:
if oc_countries == self._CONTINENT_TO_LIST_OF_COUNTRIES['OC']:
# All OC countries found, so add 'OC' to geos
geos.add('OC')
for continent, countries in special_continents.items():
if countries == self._CONTINENT_TO_LIST_OF_COUNTRIES[
continent]:
# All countries found, so add it to geos
geos.add(continent)
else:
# Partial OC countries found, just add them as-is to geos
for c in oc_countries:
geos.add('{}-{}'.format('OC', c))
# Partial countries found, so just add them as-is to geos
for c in countries:
geos.add('{}-{}'.format(continent, c))
# States are easy too, just assume NA-US (CA providences aren't
# supported by octoDNS currently)
# States and provinces are easy too,
# just assume NA-US or NA-CA
for state in meta.get('us_state', []):
geos.add('NA-US-{}'.format(state))
for province in meta.get('ca_province', []):
geos.add('NA-CA-{}'.format(province))
if geos:
# There are geos, combine them with any existing geos for this
# pool and recorded the sorted unique set of them
@ -588,16 +691,22 @@ class Ns1Provider(BaseProvider):
rules = list(rules.values())
rules.sort(key=lambda r: (r['_order'], r['pool']))
return {
data = {
'dynamic': {
'pools': pools,
'rules': rules,
},
'ttl': record['ttl'],
'type': _type,
'values': sorted(default),
}
if _type == 'CNAME':
data['value'] = default[0]
else:
data['values'] = default
return data
def _data_for_A(self, _type, record):
if record.get('tier', 1) > 1:
# Advanced record, see if it's first answer has a note
@ -607,7 +716,7 @@ class Ns1Provider(BaseProvider):
first_answer_note = ''
# If that note includes a `from` (pool name) it's a dynamic record
if 'from:' in first_answer_note:
return self._data_for_dynamic_A(_type, record)
return self._data_for_dynamic(_type, record)
# If not it's an old geo record
return self._data_for_geo_A(_type, record)
@ -646,6 +755,10 @@ class Ns1Provider(BaseProvider):
}
def _data_for_CNAME(self, _type, record):
if record.get('tier', 1) > 1:
# Advanced dynamic record
return self._data_for_dynamic(_type, record)
try:
value = record['short_answers'][0]
except IndexError:
@ -657,7 +770,6 @@ class Ns1Provider(BaseProvider):
}
_data_for_ALIAS = _data_for_CNAME
_data_for_PTR = _data_for_CNAME
def _data_for_MX(self, _type, record):
values = []
@ -696,10 +808,11 @@ class Ns1Provider(BaseProvider):
return {
'ttl': record['ttl'],
'type': _type,
'values': [a if a.endswith('.') else '{}.'.format(a)
for a in record['short_answers']],
'values': record['short_answers'],
}
_data_for_PTR = _data_for_NS
def _data_for_SRV(self, _type, record):
values = []
for answer in record['short_answers']:
@ -716,6 +829,23 @@ class Ns1Provider(BaseProvider):
'values': values,
}
def _data_for_URLFWD(self, _type, record):
values = []
for answer in record['short_answers']:
path, target, code, masking, query = answer.split(' ', 4)
values.append({
'path': path,
'target': target,
'code': code,
'masking': masking,
'query': query,
})
return {
'ttl': record['ttl'],
'type': _type,
'values': values,
}
def populate(self, zone, target=False, lenient=False):
self.log.debug('populate: name=%s, target=%s, lenient=%s',
zone.name,
@ -732,9 +862,10 @@ class Ns1Provider(BaseProvider):
for record in ns1_zone['records']:
if record['type'] in ['ALIAS', 'CNAME', 'MX', 'NS', 'PTR',
'SRV']:
for i, a in enumerate(record['short_answers']):
if not a.endswith('.'):
record['short_answers'][i] = '{}.'.format(a)
record['short_answers'] = [
_ensure_endswith_dot(a)
for a in record['short_answers']
]
if record.get('tier', 1) > 1:
# Need to get the full record data for geo records
@ -817,11 +948,17 @@ class Ns1Provider(BaseProvider):
for monitor in self._client.monitors.values():
data = self._parse_notes(monitor['notes'])
if not data:
continue
if expected_host == data['host'] and \
expected_type == data['type']:
# This monitor does not belong to this record
config = monitor['config']
value = config['host']
if record._type == 'CNAME':
# Append a trailing dot for CNAME records so that
# lookup by a CNAME answer works
value = value + '.'
monitors[value] = monitor
return monitors
@ -832,7 +969,6 @@ class Ns1Provider(BaseProvider):
def _feed_create(self, monitor):
monitor_id = monitor['id']
self.log.debug('_feed_create: monitor=%s', monitor_id)
# TODO: looks like length limit is 64 char
name = '{} - {}'.format(monitor['name'], self._uuid()[:6])
# Create the data feed
@ -846,22 +982,36 @@ class Ns1Provider(BaseProvider):
return feed_id
def _notifylists_find_or_create(self, name):
self.log.debug('_notifylists_find_or_create: name="%s"', name)
try:
nl = self._client.notifylists[name]
self.log.debug('_notifylists_find_or_create: existing=%s',
nl['id'])
except KeyError:
notify_list = [{
'config': {
'sourceid': self._client.datasource_id,
},
'type': 'datafeed',
}]
nl = self._client.notifylists_create(name=name,
notify_list=notify_list)
self.log.debug('_notifylists_find_or_create: created=%s',
nl['id'])
return nl
def _monitor_create(self, monitor):
self.log.debug('_monitor_create: monitor="%s"', monitor['name'])
# Create the notify list
notify_list = [{
'config': {
'sourceid': self._client.datasource_id,
},
'type': 'datafeed',
}]
nl = self._client.notifylists_create(name=monitor['name'],
notify_list=notify_list)
nl_id = nl['id']
self.log.debug('_monitor_create: notify_list=%s', nl_id)
# Find the right notifylist
nl_name = self.SHARED_NOTIFYLIST_NAME \
if self.shared_notifylist else monitor['name']
nl = self._notifylists_find_or_create(nl_name)
# Create the monitor
monitor['notify_list'] = nl_id
monitor['notify_list'] = nl['id']
monitor = self._client.monitors_create(**monitor)
monitor_id = monitor['id']
self.log.debug('_monitor_create: monitor=%s', monitor_id)
@ -872,6 +1022,10 @@ class Ns1Provider(BaseProvider):
host = record.fqdn[:-1]
_type = record._type
if _type == 'CNAME':
# NS1 does not accept a host value with a trailing dot
value = value[:-1]
ret = {
'active': True,
'config': {
@ -894,10 +1048,13 @@ class Ns1Provider(BaseProvider):
'regions': self.monitor_regions,
}
if _type == 'AAAA':
ret['config']['ipv6'] = True
if record.healthcheck_protocol != 'TCP':
# IF it's HTTP we need to send the request string
path = record.healthcheck_path
host = record.healthcheck_host
host = record.healthcheck_host(value=value)
request = r'GET {path} HTTP/1.0\r\nHost: {host}\r\n' \
r'User-agent: NS1\r\n\r\n'.format(path=path, host=host)
ret['config']['send'] = request
@ -968,7 +1125,13 @@ class Ns1Provider(BaseProvider):
self._client.monitors_delete(monitor_id)
notify_list_id = monitor['notify_list']
self._client.notifylists_delete(notify_list_id)
for nl_name, nl in self._client.notifylists.items():
if nl['id'] == notify_list_id:
# We've found the that might need deleting
if nl['name'] != self.SHARED_NOTIFYLIST_NAME:
# It's not shared so is safe to delete
self._client.notifylists_delete(notify_list_id)
break
def _add_answers_for_pool(self, answers, default_answers, pool_name,
pool_label, pool_answers, pools, priority):
@ -978,12 +1141,15 @@ class Ns1Provider(BaseProvider):
seen.add(current_pool_name)
pool = pools[current_pool_name]
for answer in pool_answers[current_pool_name]:
fallback = pool.data['fallback']
answer = {
'answer': answer['answer'],
'meta': {
'priority': priority,
'note': self._encode_notes({
'from': pool_label,
'pool': current_pool_name,
'fallback': fallback or '',
}),
'up': {
'feed': answer['feed_id'],
@ -1013,7 +1179,7 @@ class Ns1Provider(BaseProvider):
}
answers.append(answer)
def _params_for_dynamic_A(self, record):
def _params_for_dynamic(self, record):
pools = record.dynamic.pools
# Convert rules to regions
@ -1035,12 +1201,15 @@ class Ns1Provider(BaseProvider):
country = set()
georegion = set()
us_state = set()
ca_province = set()
for geo in rule.data.get('geos', []):
n = len(geo)
if n == 8:
# US state, e.g. NA-US-KY
us_state.add(geo[-2:])
# CA province, e.g. NA-CA-NL
us_state.add(geo[-2:]) if "NA-US" in geo \
else ca_province.add(geo[-2:])
# For filtering. State filtering is done by the country
# filter
has_country = True
@ -1073,7 +1242,7 @@ class Ns1Provider(BaseProvider):
'meta': georegion_meta,
}
if country or us_state:
if country or us_state or ca_province:
# If there's country and/or states its a country pool,
# countries and states can coexist as they're handled by the
# same step in the filterchain (countries and georegions
@ -1084,11 +1253,12 @@ class Ns1Provider(BaseProvider):
country_state_meta['country'] = sorted(country)
if us_state:
country_state_meta['us_state'] = sorted(us_state)
if ca_province:
country_state_meta['ca_province'] = sorted(ca_province)
regions['{}__country'.format(pool_name)] = {
'meta': country_state_meta,
}
if not georegion and not country and not us_state:
elif not georegion:
# If there's no targeting it's a catchall
regions['{}__catchall'.format(pool_name)] = {
'meta': meta,
@ -1099,25 +1269,35 @@ class Ns1Provider(BaseProvider):
# Build a list of primary values for each pool, including their
# feed_id (monitor)
value_feed = dict()
pool_answers = defaultdict(list)
for pool_name, pool in sorted(pools.items()):
for value in pool.data['values']:
weight = value['weight']
value = value['value']
existing = existing_monitors.get(value)
monitor_id, feed_id = self._monitor_sync(record, value,
existing)
active_monitors.add(monitor_id)
feed_id = value_feed.get(value)
# check for identical monitor and skip creating one if found
if not feed_id:
existing = existing_monitors.get(value)
monitor_id, feed_id = self._monitor_sync(record, value,
existing)
value_feed[value] = feed_id
active_monitors.add(monitor_id)
pool_answers[pool_name].append({
'answer': [value],
'weight': weight,
'feed_id': feed_id,
})
if record._type == 'CNAME':
default_values = [record.value]
else:
default_values = record.values
default_answers = [{
'answer': [v],
'weight': 1,
} for v in record.values]
} for v in default_values]
# Build our list of answers
# The regions dictionary built above already has the required pool
@ -1146,7 +1326,7 @@ class Ns1Provider(BaseProvider):
def _params_for_A(self, record):
if getattr(record, 'dynamic', False):
return self._params_for_dynamic_A(record)
return self._params_for_dynamic(record)
elif hasattr(record, 'geo'):
return self._params_for_geo_A(record)
@ -1171,12 +1351,13 @@ class Ns1Provider(BaseProvider):
values = [(v.flags, v.tag, v.value) for v in record.values]
return {'answers': values, 'ttl': record.ttl}, None
# TODO: dynamic CNAME support
def _params_for_CNAME(self, record):
if getattr(record, 'dynamic', False):
return self._params_for_dynamic(record)
return {'answers': [record.value], 'ttl': record.ttl}, None
_params_for_ALIAS = _params_for_CNAME
_params_for_PTR = _params_for_CNAME
def _params_for_MX(self, record):
values = [(v.preference, v.exchange) for v in record.values]
@ -1187,11 +1368,22 @@ class Ns1Provider(BaseProvider):
v.replacement) for v in record.values]
return {'answers': values, 'ttl': record.ttl}, None
def _params_for_PTR(self, record):
return {
'answers': record.values,
'ttl': record.ttl,
}, None
def _params_for_SRV(self, record):
values = [(v.priority, v.weight, v.port, v.target)
for v in record.values]
return {'answers': values, 'ttl': record.ttl}, None
def _params_for_URLFWD(self, record):
values = [(v.path, v.target, v.code, v.masking, v.query)
for v in record.values]
return {'answers': values, 'ttl': record.ttl}, None
def _get_ns1_filters(self, ns1_zone_name):
ns1_filters = {}
ns1_zone = {}
@ -1250,8 +1442,7 @@ class Ns1Provider(BaseProvider):
extra.append(Update(record, record))
continue
for have in self._monitors_for(record).values():
value = have['config']['host']
for value, have in self._monitors_for(record).items():
expected = self._monitor_gen(record, value)
# TODO: find values which have missing monitors
if not self._monitor_is_match(expected, have):
@ -1285,7 +1476,9 @@ class Ns1Provider(BaseProvider):
params, active_monitor_ids = \
getattr(self, '_params_for_{}'.format(_type))(new)
self._client.records_update(zone, domain, _type, **params)
self._monitors_gc(new, active_monitor_ids)
# If we're cleaning up we need to send in the old record since it'd
# have anything that needs cleaning up
self._monitors_gc(change.existing, active_monitor_ids)
def _apply_Delete(self, ns1_zone, change):
existing = change.existing


+ 7
- 3
octodns/provider/ovh.py View File

@ -370,11 +370,15 @@ class OvhProvider(BaseProvider):
@staticmethod
def _is_valid_dkim_key(key):
result = True
base64_decode = getattr(base64, 'decodestring', None)
base64_decode = getattr(base64, 'decodebytes', base64_decode)
try:
base64.decodestring(bytearray(key, 'utf-8'))
result = base64_decode(bytearray(key, 'utf-8'))
except binascii.Error:
return False
return True
result = False
return result
def get_records(self, zone_name):
"""


+ 1
- 1
octodns/provider/plan.py View File

@ -50,7 +50,7 @@ class Plan(object):
except AttributeError:
existing_n = 0
self.log.debug('__init__: Creates=%d, Updates=%d, Deletes=%d'
self.log.debug('__init__: Creates=%d, Updates=%d, Deletes=%d '
'Existing=%d',
self.change_counts['Create'],
self.change_counts['Update'],


+ 61
- 3
octodns/provider/powerdns.py View File

@ -6,6 +6,7 @@ from __future__ import absolute_import, division, print_function, \
unicode_literals
from requests import HTTPError, Session
from operator import itemgetter
import logging
from ..record import Create, Record
@ -15,8 +16,8 @@ from .base import BaseProvider
class PowerDnsBaseProvider(BaseProvider):
SUPPORTS_GEO = False
SUPPORTS_DYNAMIC = False
SUPPORTS = set(('A', 'AAAA', 'ALIAS', 'CAA', 'CNAME', 'MX', 'NAPTR', 'NS',
'PTR', 'SPF', 'SSHFP', 'SRV', 'TXT'))
SUPPORTS = set(('A', 'AAAA', 'ALIAS', 'CAA', 'CNAME', 'LOC', 'MX', 'NAPTR',
'NS', 'PTR', 'SPF', 'SSHFP', 'SRV', 'TXT'))
TIMEOUT = 5
def __init__(self, id, host, api_key, port=8081,
@ -102,6 +103,33 @@ class PowerDnsBaseProvider(BaseProvider):
_data_for_SPF = _data_for_quoted
_data_for_TXT = _data_for_quoted
def _data_for_LOC(self, rrset):
values = []
for record in rrset['records']:
lat_degrees, lat_minutes, lat_seconds, lat_direction, \
long_degrees, long_minutes, long_seconds, long_direction, \
altitude, size, precision_horz, precision_vert = \
record['content'].replace('m', '').split(' ', 11)
values.append({
'lat_degrees': int(lat_degrees),
'lat_minutes': int(lat_minutes),
'lat_seconds': float(lat_seconds),
'lat_direction': lat_direction,
'long_degrees': int(long_degrees),
'long_minutes': int(long_minutes),
'long_seconds': float(long_seconds),
'long_direction': long_direction,
'altitude': float(altitude),
'size': float(size),
'precision_horz': float(precision_horz),
'precision_vert': float(precision_vert),
})
return {
'ttl': rrset['ttl'],
'type': rrset['type'],
'values': values
}
def _data_for_MX(self, rrset):
values = []
for record in rrset['records']:
@ -183,7 +211,10 @@ class PowerDnsBaseProvider(BaseProvider):
version = resp.json()['version']
self.log.debug('powerdns_version: got version %s from server',
version)
self._powerdns_version = [int(p) for p in version.split('.')]
# The extra `-` split is to handle pre-release and source built
# versions like 4.5.0-alpha0.435.master.gcb114252b
self._powerdns_version = [
int(p.split('-')[0]) for p in version.split('.')[:3]]
return self._powerdns_version
@ -282,6 +313,27 @@ class PowerDnsBaseProvider(BaseProvider):
_records_for_SPF = _records_for_quoted
_records_for_TXT = _records_for_quoted
def _records_for_LOC(self, record):
return [{
'content':
'%d %d %0.3f %s %d %d %.3f %s %0.2fm %0.2fm %0.2fm %0.2fm' %
(
int(v.lat_degrees),
int(v.lat_minutes),
float(v.lat_seconds),
v.lat_direction,
int(v.long_degrees),
int(v.long_minutes),
float(v.long_seconds),
v.long_direction,
float(v.altitude),
float(v.size),
float(v.precision_horz),
float(v.precision_vert)
),
'disabled': False
} for v in record.values]
def _records_for_MX(self, record):
return [{
'content': '{} {}'.format(v.preference, v.exchange),
@ -378,6 +430,12 @@ class PowerDnsBaseProvider(BaseProvider):
for change in changes:
class_name = change.__class__.__name__
mods.append(getattr(self, '_mod_{}'.format(class_name))(change))
# Ensure that any DELETE modifications always occur before any REPLACE
# modifications. This ensures that an A record can be replaced by a
# CNAME record and vice-versa.
mods.sort(key=itemgetter('changetype'))
self.log.debug('_apply: sending change request')
try:


+ 51
- 7
octodns/provider/route53.py View File

@ -19,6 +19,7 @@ from six import text_type
from ..equality import EqualityTupleMixin
from ..record import Record, Update
from ..record.geo import GeoCodes
from . import ProviderException
from .base import BaseProvider
octal_re = re.compile(r'\\(\d\d\d)')
@ -512,7 +513,7 @@ class _Route53GeoRecord(_Route53Record):
self.values)
class Route53ProviderException(Exception):
class Route53ProviderException(ProviderException):
pass
@ -924,6 +925,43 @@ class Route53Provider(BaseProvider):
return data
def _process_desired_zone(self, desired):
for record in desired.records:
if getattr(record, 'dynamic', False):
# Make a copy of the record in case we have to muck with it
dynamic = record.dynamic
rules = []
for i, rule in enumerate(dynamic.rules):
geos = rule.data.get('geos', [])
if not geos:
rules.append(rule)
continue
filtered_geos = [g for g in geos
if not g.startswith('NA-CA-')]
if not filtered_geos:
# We've removed all geos, we'll have to skip this rule
msg = 'NA-CA-* not supported for {}' \
.format(record.fqdn)
fallback = 'skipping rule {}'.format(i)
self.supports_warn_or_except(msg, fallback)
continue
elif geos != filtered_geos:
msg = 'NA-CA-* not supported for {}' \
.format(record.fqdn)
fallback = 'filtering rule {} from ({}) to ({})' \
.format(i, ', '.join(geos),
', '.join(filtered_geos))
self.supports_warn_or_except(msg, fallback)
rule.data['geos'] = filtered_geos
rules.append(rule)
if rules != dynamic.rules:
record = record.copy()
record.dynamic.rules = rules
desired.add_record(record, replace=True)
return super(Route53Provider, self)._process_desired_zone(desired)
def populate(self, zone, target=False, lenient=False):
self.log.debug('populate: name=%s, target=%s, lenient=%s', zone.name,
target, lenient)
@ -1051,10 +1089,11 @@ class Route53Provider(BaseProvider):
health_check, value=None):
config = health_check['HealthCheckConfig']
# So interestingly Route53 normalizes IPAddress which will cause us to
# fail to find see things as equivalent. To work around this we'll
# ip_address's returned object for equivalence
# E.g 2001:4860:4860::8842 -> 2001:4860:4860:0:0:0:0:8842
# So interestingly Route53 normalizes IPv6 addresses to a funky, but
# valid, form which will cause us to fail to find see things as
# equivalent. To work around this we'll ip_address's returned objects
# for equivalence.
# E.g 2001:4860:4860:0:0:0:0:8842 -> 2001:4860:4860::8842
if value:
value = ip_address(text_type(value))
config_ip_address = ip_address(text_type(config['IPAddress']))
@ -1084,7 +1123,7 @@ class Route53Provider(BaseProvider):
try:
ip_address(text_type(value))
# We're working with an IP, host is the Host header
healthcheck_host = record.healthcheck_host
healthcheck_host = record.healthcheck_host(value=value)
except (AddressValueError, ValueError):
# This isn't an IP, host is the value, value should be None
healthcheck_host = value
@ -1253,7 +1292,12 @@ class Route53Provider(BaseProvider):
return self._gen_mods('DELETE', existing_records, existing_rrsets)
def _extra_changes_update_needed(self, record, rrset):
healthcheck_host = record.healthcheck_host
if record._type == 'CNAME':
# For CNAME, healthcheck host by default points to the CNAME value
healthcheck_host = rrset['ResourceRecords'][0]['Value']
else:
healthcheck_host = record.healthcheck_host()
healthcheck_path = record.healthcheck_path
healthcheck_protocol = record.healthcheck_protocol
healthcheck_port = record.healthcheck_port


+ 2
- 1
octodns/provider/selectel.py View File

@ -12,6 +12,7 @@ from logging import getLogger
from requests import Session
from ..record import Record, Update
from . import ProviderException
from .base import BaseProvider
@ -20,7 +21,7 @@ def escape_semicolon(s):
return s.replace(';', '\\;')
class SelectelAuthenticationRequired(Exception):
class SelectelAuthenticationRequired(ProviderException):
def __init__(self, msg):
message = 'Authorization failed. Invalid or empty token.'
super(SelectelAuthenticationRequired, self).__init__(message)


+ 4
- 3
octodns/provider/transip.py View File

@ -8,6 +8,7 @@ from __future__ import absolute_import, division, print_function, \
from suds import WebFault
from collections import defaultdict
from . import ProviderException
from .base import BaseProvider
from logging import getLogger
from ..record import Record
@ -15,7 +16,7 @@ from transip.service.domain import DomainService
from transip.service.objects import DnsEntry
class TransipException(Exception):
class TransipException(ProviderException):
pass
@ -49,8 +50,8 @@ class TransipProvider(BaseProvider):
'''
SUPPORTS_GEO = False
SUPPORTS_DYNAMIC = False
SUPPORTS = set(
('A', 'AAAA', 'CNAME', 'MX', 'SRV', 'SPF', 'TXT', 'SSHFP', 'CAA'))
SUPPORTS = set(('A', 'AAAA', 'CNAME', 'MX', 'NS', 'SRV', 'SPF', 'TXT',
'SSHFP', 'CAA'))
# unsupported by OctoDNS: 'TLSA'
MIN_TTL = 120
TIMEOUT = 15


+ 34
- 13
octodns/provider/ultra.py View File

@ -1,13 +1,13 @@
from collections import defaultdict
from ipaddress import ip_address
from logging import getLogger
from requests import Session
from ..record import Record
from . import ProviderException
from .base import BaseProvider
class UltraClientException(Exception):
class UltraClientException(ProviderException):
'''
Base Ultra exception type
'''
@ -36,12 +36,12 @@ class UltraProvider(BaseProvider):
'''
Neustar UltraDNS provider
Documentation for Ultra REST API requires a login:
https://portal.ultradns.com/static/docs/REST-API_User_Guide.pdf
Implemented to the May 20, 2020 version of the document (dated on page ii)
Also described as Version 2.83.0 (title page)
Documentation for Ultra REST API:
https://ultra-portalstatic.ultradns.com/static/docs/REST-API_User_Guide.pdf
Implemented to the May 26, 2021 version of the document (dated on page ii)
Also described as Version 3.18.0 (title page)
Tested against 3.0.0-20200627220036.81047f5
Tested against 3.20.1-20210521075351.36b9297
As determined by querying https://api.ultradns.com/version
ultra:
@ -57,6 +57,7 @@ class UltraProvider(BaseProvider):
RECORDS_TO_TYPE = {
'A (1)': 'A',
'AAAA (28)': 'AAAA',
'APEXALIAS (65282)': 'ALIAS',
'CAA (257)': 'CAA',
'CNAME (5)': 'CNAME',
'MX (15)': 'MX',
@ -72,6 +73,7 @@ class UltraProvider(BaseProvider):
SUPPORTS_GEO = False
SUPPORTS_DYNAMIC = False
TIMEOUT = 5
ZONE_REQUEST_LIMIT = 100
def _request(self, method, path, params=None,
data=None, json=None, json_response=True):
@ -151,7 +153,7 @@ class UltraProvider(BaseProvider):
def zones(self):
if self._zones is None:
offset = 0
limit = 100
limit = self.ZONE_REQUEST_LIMIT
zones = []
paging = True
while paging:
@ -194,8 +196,6 @@ class UltraProvider(BaseProvider):
}
def _data_for_AAAA(self, _type, records):
for i, v in enumerate(records['rdata']):
records['rdata'][i] = str(ip_address(v))
return {
'ttl': records['ttl'],
'type': _type,
@ -211,6 +211,7 @@ class UltraProvider(BaseProvider):
_data_for_PTR = _data_for_single
_data_for_CNAME = _data_for_single
_data_for_ALIAS = _data_for_single
def _data_for_CAA(self, _type, records):
return {
@ -287,7 +288,13 @@ class UltraProvider(BaseProvider):
name = zone.hostname_from_fqdn(record['ownerName'])
if record['rrtype'] == 'SOA (6)':
continue
_type = self.RECORDS_TO_TYPE[record['rrtype']]
try:
_type = self.RECORDS_TO_TYPE[record['rrtype']]
except KeyError:
self.log.warning('populate: ignoring record with '
'unsupported rrtype, %s %s',
name, record['rrtype'])
continue
values[name][_type] = record
for name, types in values.items():
@ -368,6 +375,7 @@ class UltraProvider(BaseProvider):
}
_contents_for_PTR = _contents_for_CNAME
_contents_for_ALIAS = _contents_for_CNAME
def _contents_for_SRV(self, record):
return {
@ -395,8 +403,15 @@ class UltraProvider(BaseProvider):
def _gen_data(self, record):
zone_name = self._remove_prefix(record.fqdn, record.name + '.')
# UltraDNS treats the `APEXALIAS` type as the octodns `ALIAS`.
if record._type == "ALIAS":
record_type = "APEXALIAS"
else:
record_type = record._type
path = '/v2/zones/{}/rrsets/{}/{}'.format(zone_name,
record._type,
record_type,
record.fqdn)
contents_for = getattr(self, '_contents_for_{}'.format(record._type))
return path, contents_for(record)
@ -438,7 +453,13 @@ class UltraProvider(BaseProvider):
existing._type == self.RECORDS_TO_TYPE[record['rrtype']]:
zone_name = self._remove_prefix(existing.fqdn,
existing.name + '.')
# UltraDNS treats the `APEXALIAS` type as the octodns `ALIAS`.
existing_type = existing._type
if existing_type == "ALIAS":
existing_type = "APEXALIAS"
path = '/v2/zones/{}/rrsets/{}/{}'.format(zone_name,
existing._type,
existing_type,
existing.fqdn)
self._delete(path, json_response=False)

+ 8
- 4
octodns/provider/yaml.py View File

@ -104,8 +104,10 @@ class YamlProvider(BaseProvider):
'''
SUPPORTS_GEO = True
SUPPORTS_DYNAMIC = True
SUPPORTS = set(('A', 'AAAA', 'ALIAS', 'CAA', 'CNAME', 'MX', 'NAPTR', 'NS',
'PTR', 'SSHFP', 'SPF', 'SRV', 'TXT'))
SUPPORTS_MUTLIVALUE_PTR = True
SUPPORTS = set(('A', 'AAAA', 'ALIAS', 'CAA', 'CNAME', 'DNAME', 'LOC', 'MX',
'NAPTR', 'NS', 'PTR', 'SSHFP', 'SPF', 'SRV', 'TXT',
'URLFWD'))
def __init__(self, id, directory, default_ttl=3600, enforce_order=True,
populate_should_replace=False, *args, **kwargs):
@ -239,11 +241,13 @@ class SplitYamlProvider(YamlProvider):
# instead of a file matching the record name.
CATCHALL_RECORD_NAMES = ('*', '')
def __init__(self, id, directory, *args, **kwargs):
def __init__(self, id, directory, extension='.', *args, **kwargs):
super(SplitYamlProvider, self).__init__(id, directory, *args, **kwargs)
self.extension = extension
def _zone_directory(self, zone):
return join(self.directory, zone.name)
filename = '{}{}'.format(zone.name[:-1], self.extension)
return join(self.directory, filename)
def populate(self, zone, target=False, lenient=False):
self.log.debug('populate: name=%s, target=%s, lenient=%s', zone.name,


+ 372
- 20
octodns/record/__init__.py View File

@ -10,6 +10,7 @@ from logging import getLogger
import re
from six import string_types, text_type
from fqdn import FQDN
from ..equality import EqualityTupleMixin
from .geo import GeoCodes
@ -95,6 +96,8 @@ class Record(EqualityTupleMixin):
'ALIAS': AliasRecord,
'CAA': CaaRecord,
'CNAME': CnameRecord,
'DNAME': DnameRecord,
'LOC': LocRecord,
'MX': MxRecord,
'NAPTR': NaptrRecord,
'NS': NsRecord,
@ -103,6 +106,7 @@ class Record(EqualityTupleMixin):
'SRV': SrvRecord,
'SSHFP': SshfpRecord,
'TXT': TxtRecord,
'URLFWD': UrlfwdRecord,
}[_type]
except KeyError:
raise Exception('Unknown record type: "{}"'.format(_type))
@ -125,10 +129,11 @@ class Record(EqualityTupleMixin):
if n > 253:
reasons.append('invalid fqdn, "{}" is too long at {} chars, max '
'is 253'.format(fqdn, n))
n = len(name)
if n > 63:
reasons.append('invalid name, "{}" is too long at {} chars, max '
'is 63'.format(name, n))
for label in name.split('.'):
n = len(label)
if n > 63:
reasons.append('invalid label, "{}" is too long at {} chars, '
'max is 63'.format(label, n))
try:
ttl = int(data['ttl'])
if ttl < 0:
@ -179,15 +184,11 @@ class Record(EqualityTupleMixin):
def included(self):
return self._octodns.get('included', [])
@property
def healthcheck_host(self):
def healthcheck_host(self, value=None):
healthcheck = self._octodns.get('healthcheck', {})
if healthcheck.get('protocol', None) == 'TCP':
return None
try:
return healthcheck['host']
except KeyError:
return self.fqdn[:-1]
return healthcheck.get('host', self.fqdn[:-1]) or value
@property
def healthcheck_path(self):
@ -218,6 +219,18 @@ class Record(EqualityTupleMixin):
if self.ttl != other.ttl:
return Update(self, other)
def copy(self, zone=None):
data = self.data
data['type'] = self._type
return Record.new(
zone if zone else self.zone,
self.name,
data,
self.source,
lenient=True
)
# NOTE: we're using __hash__ and ordering methods that consider Records
# equivalent if they have the same name & _type. Values are ignored. This
# is useful when computing diffs/changes.
@ -401,6 +414,7 @@ class _ValueMixin(object):
class _DynamicPool(object):
log = getLogger('_DynamicPool')
def __init__(self, _id, data):
self._id = _id
@ -413,6 +427,15 @@ class _DynamicPool(object):
]
values.sort(key=lambda d: d['value'])
# normalize weight of a single-value pool
if len(values) == 1:
weight = data['values'][0].get('weight', 1)
if weight != 1:
self.log.warn(
'Using weight=1 instead of %s for single-value pool %s',
weight, _id)
values[0]['weight'] = 1
fallback = data.get('fallback', None)
self.data = {
'fallback': fallback if fallback != 'default' else None,
@ -515,6 +538,7 @@ class _DynamicMixin(object):
pools_exist = set()
pools_seen = set()
pools_seen_as_fallback = set()
if not isinstance(pools, dict):
reasons.append('pools must be a dict')
elif not pools:
@ -556,10 +580,17 @@ class _DynamicMixin(object):
reasons.append('missing value in pool "{}" '
'value {}'.format(_id, value_num))
if len(values) == 1 and values[0].get('weight', 1) != 1:
reasons.append('pool "{}" has single value with '
'weight!=1'.format(_id))
fallback = pool.get('fallback', None)
if fallback is not None and fallback not in pools:
reasons.append('undefined fallback "{}" for pool "{}"'
.format(fallback, _id))
if fallback is not None:
if fallback in pools:
pools_seen_as_fallback.add(fallback)
else:
reasons.append('undefined fallback "{}" for pool "{}"'
.format(fallback, _id))
# Check for loops
fallback = pools[_id].get('fallback', None)
@ -587,7 +618,6 @@ class _DynamicMixin(object):
else:
seen_default = False
# TODO: don't allow 'default' as a pool name, reserved
for i, rule in enumerate(rules):
rule_num = i + 1
try:
@ -608,7 +638,6 @@ class _DynamicMixin(object):
if pool not in pools:
reasons.append('rule {} undefined pool "{}"'
.format(rule_num, pool))
pools_seen.add(pool)
elif pool in pools_seen and geos:
reasons.append('rule {} invalid, target pool "{}" '
'reused'.format(rule_num, pool))
@ -628,7 +657,7 @@ class _DynamicMixin(object):
reasons.extend(GeoCodes.validate(geo, 'rule {} '
.format(rule_num)))
unused = pools_exist - pools_seen
unused = pools_exist - pools_seen - pools_seen_as_fallback
if unused:
unused = '", "'.join(sorted(unused))
reasons.append('unused pools: "{}"'.format(unused))
@ -720,8 +749,13 @@ class _IpList(object):
@classmethod
def process(cls, values):
# Translating None into '' so that the list will be sortable in python3
return [v if v is not None else '' for v in values]
# Translating None into '' so that the list will be sortable in
# python3, get everything to str first
values = [text_type(v) if v is not None else '' for v in values]
# Now round trip all non-'' through the address type and back to a str
# to normalize the address representation.
return [text_type(cls._address_type(v)) if v != '' else ''
for v in values]
class Ipv4List(_IpList):
@ -743,6 +777,11 @@ class _TargetValue(object):
reasons.append('empty value')
elif not data:
reasons.append('missing value')
# NOTE: FQDN complains if the data it receives isn't a str, it doesn't
# allow unicode... This is likely specific to 2.7
elif not FQDN(str(data), allow_underscores=True).is_valid:
reasons.append('{} value "{}" is not a valid FQDN'
.format(_type, data))
elif not data.endswith('.'):
reasons.append('{} value "{}" missing trailing .'
.format(_type, data))
@ -759,6 +798,10 @@ class CnameValue(_TargetValue):
pass
class DnameValue(_TargetValue):
pass
class ARecord(_DynamicMixin, _GeoMixin, Record):
_type = 'A'
_value_type = Ipv4List
@ -777,6 +820,14 @@ class AliasRecord(_ValueMixin, Record):
_type = 'ALIAS'
_value_type = AliasValue
@classmethod
def validate(cls, name, fqdn, data):
reasons = []
if name != '':
reasons.append('non-root ALIAS not allowed')
reasons.extend(super(AliasRecord, cls).validate(name, fqdn, data))
return reasons
class CaaValue(EqualityTupleMixin):
# https://tools.ietf.org/html/rfc6844#page-5
@ -842,6 +893,200 @@ class CnameRecord(_DynamicMixin, _ValueMixin, Record):
return reasons
class DnameRecord(_DynamicMixin, _ValueMixin, Record):
_type = 'DNAME'
_value_type = DnameValue
class LocValue(EqualityTupleMixin):
# TODO: work out how to do defaults per RFC
@classmethod
def validate(cls, data, _type):
int_keys = [
'lat_degrees',
'lat_minutes',
'long_degrees',
'long_minutes',
]
float_keys = [
'lat_seconds',
'long_seconds',
'altitude',
'size',
'precision_horz',
'precision_vert',
]
direction_keys = [
'lat_direction',
'long_direction',
]
if not isinstance(data, (list, tuple)):
data = (data,)
reasons = []
for value in data:
for key in int_keys:
try:
int(value[key])
if (
(
key == 'lat_degrees' and
not 0 <= int(value[key]) <= 90
) or (
key == 'long_degrees' and
not 0 <= int(value[key]) <= 180
) or (
key in ['lat_minutes', 'long_minutes'] and
not 0 <= int(value[key]) <= 59
)
):
reasons.append('invalid value for {} "{}"'
.format(key, value[key]))
except KeyError:
reasons.append('missing {}'.format(key))
except ValueError:
reasons.append('invalid {} "{}"'
.format(key, value[key]))
for key in float_keys:
try:
float(value[key])
if (
(
key in ['lat_seconds', 'long_seconds'] and
not 0 <= float(value[key]) <= 59.999
) or (
key == 'altitude' and
not -100000.00 <= float(value[key]) <= 42849672.95
) or (
key in ['size',
'precision_horz',
'precision_vert'] and
not 0 <= float(value[key]) <= 90000000.00
)
):
reasons.append('invalid value for {} "{}"'
.format(key, value[key]))
except KeyError:
reasons.append('missing {}'.format(key))
except ValueError:
reasons.append('invalid {} "{}"'
.format(key, value[key]))
for key in direction_keys:
try:
str(value[key])
if (
key == 'lat_direction' and
value[key] not in ['N', 'S']
):
reasons.append('invalid direction for {} "{}"'
.format(key, value[key]))
if (
key == 'long_direction' and
value[key] not in ['E', 'W']
):
reasons.append('invalid direction for {} "{}"'
.format(key, value[key]))
except KeyError:
reasons.append('missing {}'.format(key))
return reasons
@classmethod
def process(cls, values):
return [LocValue(v) for v in values]
def __init__(self, value):
self.lat_degrees = int(value['lat_degrees'])
self.lat_minutes = int(value['lat_minutes'])
self.lat_seconds = float(value['lat_seconds'])
self.lat_direction = value['lat_direction'].upper()
self.long_degrees = int(value['long_degrees'])
self.long_minutes = int(value['long_minutes'])
self.long_seconds = float(value['long_seconds'])
self.long_direction = value['long_direction'].upper()
self.altitude = float(value['altitude'])
self.size = float(value['size'])
self.precision_horz = float(value['precision_horz'])
self.precision_vert = float(value['precision_vert'])
@property
def data(self):
return {
'lat_degrees': self.lat_degrees,
'lat_minutes': self.lat_minutes,
'lat_seconds': self.lat_seconds,
'lat_direction': self.lat_direction,
'long_degrees': self.long_degrees,
'long_minutes': self.long_minutes,
'long_seconds': self.long_seconds,
'long_direction': self.long_direction,
'altitude': self.altitude,
'size': self.size,
'precision_horz': self.precision_horz,
'precision_vert': self.precision_vert,
}
def __hash__(self):
return hash((
self.lat_degrees,
self.lat_minutes,
self.lat_seconds,
self.lat_direction,
self.long_degrees,
self.long_minutes,
self.long_seconds,
self.long_direction,
self.altitude,
self.size,
self.precision_horz,
self.precision_vert,
))
def _equality_tuple(self):
return (
self.lat_degrees,
self.lat_minutes,
self.lat_seconds,
self.lat_direction,
self.long_degrees,
self.long_minutes,
self.long_seconds,
self.long_direction,
self.altitude,
self.size,
self.precision_horz,
self.precision_vert,
)
def __repr__(self):
loc_format = "'{0} {1} {2:.3f} {3} " + \
"{4} {5} {6:.3f} {7} " + \
"{8:.2f}m {9:.2f}m {10:.2f}m {11:.2f}m'"
return loc_format.format(
self.lat_degrees,
self.lat_minutes,
self.lat_seconds,
self.lat_direction,
self.long_degrees,
self.long_minutes,
self.long_seconds,
self.long_direction,
self.altitude,
self.size,
self.precision_horz,
self.precision_vert,
)
class LocRecord(_ValuesMixin, Record):
_type = 'LOC'
_value_type = LocValue
class MxValue(EqualityTupleMixin):
@classmethod
@ -1016,13 +1261,37 @@ class NsRecord(_ValuesMixin, Record):
class PtrValue(_TargetValue):
pass
@classmethod
def validate(cls, values, _type):
if not isinstance(values, list):
values = [values]
reasons = []
if not values:
reasons.append('missing values')
for value in values:
reasons.extend(super(PtrValue, cls).validate(value, _type))
class PtrRecord(_ValueMixin, Record):
return reasons
@classmethod
def process(cls, values):
return [super(PtrValue, cls).process(v) for v in values]
class PtrRecord(_ValuesMixin, Record):
_type = 'PTR'
_value_type = PtrValue
# This is for backward compatibility with providers that don't support
# multi-value PTR records.
@property
def value(self):
return self.values[0]
class SshfpValue(EqualityTupleMixin):
VALID_ALGORITHMS = (1, 2, 3, 4)
@ -1227,3 +1496,86 @@ class _TxtValue(_ChunkedValue):
class TxtRecord(_ChunkedValuesMixin, Record):
_type = 'TXT'
_value_type = _TxtValue
class UrlfwdValue(EqualityTupleMixin):
VALID_CODES = (301, 302)
VALID_MASKS = (0, 1, 2)
VALID_QUERY = (0, 1)
@classmethod
def validate(cls, data, _type):
if not isinstance(data, (list, tuple)):
data = (data,)
reasons = []
for value in data:
try:
code = int(value['code'])
if code not in cls.VALID_CODES:
reasons.append('unrecognized return code "{}"'
.format(code))
except KeyError:
reasons.append('missing code')
except ValueError:
reasons.append('invalid return code "{}"'
.format(value['code']))
try:
masking = int(value['masking'])
if masking not in cls.VALID_MASKS:
reasons.append('unrecognized masking setting "{}"'
.format(masking))
except KeyError:
reasons.append('missing masking')
except ValueError:
reasons.append('invalid masking setting "{}"'
.format(value['masking']))
try:
query = int(value['query'])
if query not in cls.VALID_QUERY:
reasons.append('unrecognized query setting "{}"'
.format(query))
except KeyError:
reasons.append('missing query')
except ValueError:
reasons.append('invalid query setting "{}"'
.format(value['query']))
for k in ('path', 'target'):
if k not in value:
reasons.append('missing {}'.format(k))
return reasons
@classmethod
def process(cls, values):
return [UrlfwdValue(v) for v in values]
def __init__(self, value):
self.path = value['path']
self.target = value['target']
self.code = int(value['code'])
self.masking = int(value['masking'])
self.query = int(value['query'])
@property
def data(self):
return {
'path': self.path,
'target': self.target,
'code': self.code,
'masking': self.masking,
'query': self.query,
}
def __hash__(self):
return hash(self.__repr__())
def _equality_tuple(self):
return (self.path, self.target, self.code, self.masking, self.query)
def __repr__(self):
return '"{}" "{}" {} {} {}'.format(self.path, self.target, self.code,
self.masking, self.query)
class UrlfwdRecord(_ValuesMixin, Record):
_type = 'URLFWD'
_value_type = UrlfwdValue

+ 57
- 7
octodns/source/axfr.py View File

@ -26,8 +26,8 @@ class AxfrBaseSource(BaseSource):
SUPPORTS_GEO = False
SUPPORTS_DYNAMIC = False
SUPPORTS = set(('A', 'AAAA', 'CNAME', 'MX', 'NS', 'PTR', 'SPF',
'SRV', 'TXT'))
SUPPORTS = set(('A', 'AAAA', 'CAA', 'CNAME', 'LOC', 'MX', 'NS', 'PTR',
'SPF', 'SRV', 'TXT'))
def __init__(self, id):
super(AxfrBaseSource, self).__init__(id)
@ -43,6 +43,48 @@ class AxfrBaseSource(BaseSource):
_data_for_AAAA = _data_for_multiple
_data_for_NS = _data_for_multiple
def _data_for_CAA(self, _type, records):
values = []
for record in records:
flags, tag, value = record['value'].split(' ', 2)
values.append({
'flags': flags,
'tag': tag,
'value': value.replace('"', '')
})
return {
'ttl': records[0]['ttl'],
'type': _type,
'values': values
}
def _data_for_LOC(self, _type, records):
values = []
for record in records:
lat_degrees, lat_minutes, lat_seconds, lat_direction, \
long_degrees, long_minutes, long_seconds, long_direction, \
altitude, size, precision_horz, precision_vert = \
record['value'].replace('m', '').split(' ', 11)
values.append({
'lat_degrees': lat_degrees,
'lat_minutes': lat_minutes,
'lat_seconds': lat_seconds,
'lat_direction': lat_direction,
'long_degrees': long_degrees,
'long_minutes': long_minutes,
'long_seconds': long_seconds,
'long_direction': long_direction,
'altitude': altitude,
'size': size,
'precision_horz': precision_horz,
'precision_vert': precision_vert,
})
return {
'ttl': records[0]['ttl'],
'type': _type,
'values': values
}
def _data_for_MX(self, _type, records):
values = []
for record in records:
@ -191,26 +233,34 @@ class ZoneFileSource(AxfrBaseSource):
class: octodns.source.axfr.ZoneFileSource
# The directory holding the zone files
# Filenames should match zone name (eg. example.com.)
# with optional extension specified with file_extension
directory: ./zonefiles
# File extension on zone files
# Appended to zone name to locate file
# (optional, default None)
file_extension: zone
# Should sanity checks of the origin node be done
# (optional, default true)
check_origin: false
'''
def __init__(self, id, directory, check_origin=True):
def __init__(self, id, directory, file_extension='.', check_origin=True):
self.log = logging.getLogger('ZoneFileSource[{}]'.format(id))
self.log.debug('__init__: id=%s, directory=%s, check_origin=%s', id,
directory, check_origin)
self.log.debug('__init__: id=%s, directory=%s, file_extension=%s, '
'check_origin=%s', id,
directory, file_extension, check_origin)
super(ZoneFileSource, self).__init__(id)
self.directory = directory
self.file_extension = file_extension
self.check_origin = check_origin
self._zone_records = {}
def _load_zone_file(self, zone_name):
zone_filename = '{}{}'.format(zone_name[:-1], self.file_extension)
zonefiles = listdir(self.directory)
if zone_name in zonefiles:
if zone_filename in zonefiles:
try:
z = dns.zone.from_file(join(self.directory, zone_name),
z = dns.zone.from_file(join(self.directory, zone_filename),
zone_name, relativize=False,
check_origin=self.check_origin)
except DNSException as error:


+ 2
- 0
octodns/source/base.py View File

@ -8,6 +8,8 @@ from __future__ import absolute_import, division, print_function, \
class BaseSource(object):
SUPPORTS_MUTLIVALUE_PTR = False
def __init__(self, id):
self.id = id
if not getattr(self, 'log', False):


+ 53
- 2
octodns/zone.py View File

@ -49,16 +49,26 @@ class Zone(object):
# optional trailing . b/c some sources don't have it on their fqdn
self._name_re = re.compile(r'\.?{}?$'.format(name))
# Copy-on-write semantics support, when `not None` this property will
# point to a location with records for this `Zone`. Once `hydrated`
# this property will be set to None
self._origin = None
self.log.debug('__init__: zone=%s, sub_zones=%s', self, sub_zones)
@property
def records(self):
if self._origin:
return self._origin.records
return set([r for _, node in self._records.items() for r in node])
def hostname_from_fqdn(self, fqdn):
return self._name_re.sub('', fqdn)
def add_record(self, record, replace=False, lenient=False):
if self._origin:
self.hydrate()
name = record.name
last = name.split('.')[-1]
@ -94,10 +104,14 @@ class Zone(object):
node.add(record)
def _remove_record(self, record):
'Only for use in tests'
def remove_record(self, record):
if self._origin:
self.hydrate()
self._records[record.name].discard(record)
# TODO: delete this
_remove_record = remove_record
def changes(self, desired, target):
self.log.debug('changes: zone=%s, target=%s', self, target)
@ -184,5 +198,42 @@ class Zone(object):
return changes
def hydrate(self):
'''
Take a shallow copy Zone and make it a deeper copy holding its own
reference to records. These records will still be the originals and
they should not be modified. Changes should be made by calling
`add_record`, often with `replace=True`, and/or `remove_record`.
Note: This method does not need to be called under normal circumstances
as `add_record` and `remove_record` will automatically call it when
appropriate.
'''
origin = self._origin
if origin is None:
return False
# Need to clear this before the copy to prevent recursion
self._origin = None
for record in origin.records:
# Use lenient as we're copying origin and should take its records
# regardless
self.add_record(record, lenient=True)
return True
def copy(self):
'''
Copy-on-write semantics support. This method will create a shallow
clone of the zone which will be hydrated the first time `add_record` or
`remove_record` is called.
This allows low-cost copies of things to be made in situations where
changes are unlikely and only incurs the "expense" of actually
copying the records when required. The actual record copy will not be
"deep" meaning that records should not be modified directly.
'''
copy = Zone(self.name, self.sub_zones)
copy._origin = self
return copy
def __repr__(self):
return 'Zone<{}>'.format(self.name)

+ 1
- 1
requirements-dev.txt View File

@ -5,4 +5,4 @@ pycodestyle==2.6.0
pyflakes==2.2.0
readme_renderer[md]==26.0
requests_mock
twine==1.15.0
twine==3.2.0; python_version >= '3.2'

+ 6
- 3
requirements.txt View File

@ -1,12 +1,15 @@
PyYaml==5.3.1
azure-common==1.1.25
azure-mgmt-dns==3.0.0
PyYaml==5.4
azure-common==1.1.27
azure-identity==1.5.0
azure-mgmt-dns==8.0.0
azure-mgmt-trafficmanager==0.51.0
boto3==1.15.9
botocore==1.18.9
dnspython==1.16.0
docutils==0.16
dyn==1.8.1
edgegrid-python==1.1.1
fqdn==1.5.0
futures==3.2.0; python_version < '3.2'
google-cloud-core==1.4.1
google-cloud-dns==0.32.0


+ 5
- 1
script/coverage View File

@ -25,9 +25,13 @@ export DYN_CUSTOMER=
export DYN_PASSWORD=
export DYN_USERNAME=
export GOOGLE_APPLICATION_CREDENTIALS=
export ARM_CLIENT_ID=
export ARM_CLIENT_SECRET=
export ARM_TENANT_ID=
export ARM_SUBSCRIPTION_ID=
# Don't allow disabling coverage
grep -r -I --line-number "# pragma: nocover" octodns && {
grep -r -I --line-number "# pragma: +no.*cover" octodns && {
echo "Code coverage should not be disabled"
exit 1
}


+ 4
- 0
script/test View File

@ -25,5 +25,9 @@ export DYN_CUSTOMER=
export DYN_PASSWORD=
export DYN_USERNAME=
export GOOGLE_APPLICATION_CREDENTIALS=
export ARM_CLIENT_ID=
export ARM_CLIENT_SECRET=
export ARM_TENANT_ID=
export ARM_SUBSCRIPTION_ID=
nosetests "$@"

+ 2
- 1
setup.py View File

@ -69,6 +69,7 @@ setup(
'PyYaml>=4.2b1',
'dnspython>=1.15.0',
'futures>=3.2.0; python_version<"3.2"',
'fqdn>=1.5.0',
'ipaddress>=1.0.22; python_version<"3.3"',
'natsort>=5.5.0',
'pycountry>=19.8.18',
@ -81,6 +82,6 @@ setup(
long_description_content_type='text/markdown',
name='octodns',
packages=find_packages(),
url='https://github.com/github/octodns',
url='https://github.com/octodns/octodns',
version=octodns.__VERSION__,
)

+ 21
- 0
tests/config/alias-zone-loop.yaml View File

@ -0,0 +1,21 @@
manager:
max_workers: 2
providers:
in:
class: octodns.provider.yaml.YamlProvider
directory: tests/config
dump:
class: octodns.provider.yaml.YamlProvider
directory: env/YAML_TMP_DIR
zones:
unit.tests.:
sources:
- in
targets:
- dump
alias.tests.:
alias: unit.tests.
alias-loop.tests.:
alias: alias.tests.

+ 23
- 0
tests/config/dynamic.tests.yaml View File

@ -109,6 +109,29 @@ cname:
- pool: iad
type: CNAME
value: target.unit.tests.
pool-only-in-fallback:
dynamic:
pools:
one:
fallback: two
values:
- value: 1.1.1.1
three:
values:
- value: 3.3.3.3
two:
values:
- value: 2.2.2.2
rules:
- geos:
- NA-US
pool: one
- geos:
- AS-SG
pool: three
ttl: 300
type: A
values: [4.4.4.4]
real-ish-a:
dynamic:
pools:


+ 6
- 0
tests/config/plan-output-filehandle.yaml View File

@ -0,0 +1,6 @@
manager:
plan_outputs:
"doesntexist":
class: octodns.provider.plan.DoesntExist
providers: {}
zones: {}

+ 23
- 0
tests/config/processors-missing-class.yaml View File

@ -0,0 +1,23 @@
providers:
config:
class: octodns.provider.yaml.YamlProvider
directory: tests/config
dump:
class: octodns.provider.yaml.YamlProvider
directory: env/YAML_TMP_DIR
geo:
class: helpers.GeoProvider
nosshfp:
class: helpers.NoSshFpProvider
processors:
no-class: {}
zones:
unit.tests.:
processors:
- noop
sources:
- in
targets:
- dump

+ 25
- 0
tests/config/processors-wants-config.yaml View File

@ -0,0 +1,25 @@
providers:
config:
class: octodns.provider.yaml.YamlProvider
directory: tests/config
dump:
class: octodns.provider.yaml.YamlProvider
directory: env/YAML_TMP_DIR
geo:
class: helpers.GeoProvider
nosshfp:
class: helpers.NoSshFpProvider
processors:
# valid class, but it wants a param and we're not passing it
wants-config:
class: helpers.WantsConfigProcessor
zones:
unit.tests.:
processors:
- noop
sources:
- in
targets:
- dump

+ 33
- 0
tests/config/processors.yaml View File

@ -0,0 +1,33 @@
providers:
config:
class: octodns.provider.yaml.YamlProvider
directory: tests/config
dump:
class: octodns.provider.yaml.YamlProvider
directory: env/YAML_TMP_DIR
geo:
class: helpers.GeoProvider
nosshfp:
class: helpers.NoSshFpProvider
processors:
# Just testing config so any processor will do
noop:
class: octodns.processor.base.BaseProcessor
zones:
unit.tests.:
processors:
- noop
sources:
- config
targets:
- dump
bad.unit.tests.:
processors:
- doesnt-exist
sources:
- in
targets:
- dump

+ 19
- 0
tests/config/simple-alias-zone.yaml View File

@ -0,0 +1,19 @@
manager:
max_workers: 2
providers:
in:
class: octodns.provider.yaml.YamlProvider
directory: tests/config
dump:
class: octodns.provider.yaml.YamlProvider
directory: env/YAML_TMP_DIR
zones:
unit.tests.:
sources:
- in
targets:
- dump
alias.tests.:
alias: unit.tests.

+ 3
- 0
tests/config/simple-split.yaml View File

@ -4,14 +4,17 @@ providers:
in:
class: octodns.provider.yaml.SplitYamlProvider
directory: tests/config/split
extension: .tst
dump:
class: octodns.provider.yaml.SplitYamlProvider
directory: env/YAML_TMP_DIR
extension: .tst
# This is sort of ugly, but it shouldn't hurt anything. It'll just write out
# the target file twice where it and dump are both used
dump2:
class: octodns.provider.yaml.SplitYamlProvider
directory: env/YAML_TMP_DIR
extension: .tst
simple:
class: helpers.SimpleProvider
geo:


tests/config/split/dynamic.tests./a.yaml → tests/config/split/dynamic.tests.tst/a.yaml View File


tests/config/split/dynamic.tests./aaaa.yaml → tests/config/split/dynamic.tests.tst/aaaa.yaml View File


tests/config/split/dynamic.tests./cname.yaml → tests/config/split/dynamic.tests.tst/cname.yaml View File


tests/config/split/dynamic.tests./real-ish-a.yaml → tests/config/split/dynamic.tests.tst/real-ish-a.yaml View File


tests/config/split/dynamic.tests./simple-weighted.yaml → tests/config/split/dynamic.tests.tst/simple-weighted.yaml View File


tests/config/split/empty./.gitkeep → tests/config/split/empty.tst/.gitkeep View File


tests/config/split/subzone.unit.tests./12.yaml → tests/config/split/subzone.unit.tests.tst/12.yaml View File


tests/config/split/subzone.unit.tests./2.yaml → tests/config/split/subzone.unit.tests.tst/2.yaml View File


tests/config/split/subzone.unit.tests./test.yaml → tests/config/split/subzone.unit.tests.tst/test.yaml View File


tests/config/split/unit.tests./$unit.tests.yaml → tests/config/split/unit.tests.tst/$unit.tests.yaml View File


tests/config/split/unit.tests./_srv._tcp.yaml → tests/config/split/unit.tests.tst/_srv._tcp.yaml View File


tests/config/split/unit.tests./aaaa.yaml → tests/config/split/unit.tests.tst/aaaa.yaml View File


tests/config/split/unit.tests./cname.yaml → tests/config/split/unit.tests.tst/cname.yaml View File


+ 5
- 0
tests/config/split/unit.tests.tst/dname.yaml View File

@ -0,0 +1,5 @@
---
dname:
ttl: 300
type: DNAME
value: unit.tests.

tests/config/split/unit.tests./excluded.yaml → tests/config/split/unit.tests.tst/excluded.yaml View File


tests/config/split/unit.tests./ignored.yaml → tests/config/split/unit.tests.tst/ignored.yaml View File


tests/config/split/unit.tests./included.yaml → tests/config/split/unit.tests.tst/included.yaml View File


tests/config/split/unit.tests./mx.yaml → tests/config/split/unit.tests.tst/mx.yaml View File


tests/config/split/unit.tests./naptr.yaml → tests/config/split/unit.tests.tst/naptr.yaml View File


tests/config/split/unit.tests./ptr.yaml → tests/config/split/unit.tests.tst/ptr.yaml View File


tests/config/split/unit.tests./spf.yaml → tests/config/split/unit.tests.tst/spf.yaml View File


tests/config/split/unit.tests./sub.yaml → tests/config/split/unit.tests.tst/sub.yaml View File


tests/config/split/unit.tests./txt.yaml → tests/config/split/unit.tests.tst/txt.yaml View File


+ 15
- 0
tests/config/split/unit.tests.tst/urlfwd.yaml View File

@ -0,0 +1,15 @@
---
urlfwd:
ttl: 300
type: URLFWD
values:
- code: 302
masking: 2
path: '/'
query: 0
target: 'http://www.unit.tests'
- code: 301
masking: 2
path: '/target'
query: 0
target: 'http://target.unit.tests'

tests/config/split/unit.tests./www.sub.yaml → tests/config/split/unit.tests.tst/www.sub.yaml View File


tests/config/split/unit.tests./www.yaml → tests/config/split/unit.tests.tst/www.yaml View File


tests/config/split/unordered./abc.yaml → tests/config/split/unordered.tst/abc.yaml View File


tests/config/split/unordered./xyz.yaml → tests/config/split/unordered.tst/xyz.yaml View File


+ 63
- 1
tests/config/unit.tests.yaml View File

@ -36,6 +36,22 @@
- flags: 0
tag: issue
value: ca.unit.tests
_imap._tcp:
ttl: 600
type: SRV
values:
- port: 0
priority: 0
target: .
weight: 0
_pop3._tcp:
ttl: 600
type: SRV
values:
- port: 0
priority: 0
target: .
weight: 0
_srv._tcp:
ttl: 600
type: SRV
@ -56,6 +72,10 @@ cname:
ttl: 300
type: CNAME
value: unit.tests.
dname:
ttl: 300
type: DNAME
value: unit.tests.
excluded:
octodns:
excluded:
@ -73,6 +93,34 @@ included:
- test
type: CNAME
value: unit.tests.
loc:
ttl: 300
type: LOC
values:
- altitude: 20
lat_degrees: 31
lat_direction: S
lat_minutes: 58
lat_seconds: 52.1
long_degrees: 115
long_direction: E
long_minutes: 49
long_seconds: 11.7
precision_horz: 10
precision_vert: 2
size: 10
- altitude: 20
lat_degrees: 53
lat_direction: N
lat_minutes: 13
lat_seconds: 10
long_degrees: 2
long_direction: W
long_minutes: 18
long_seconds: 26
precision_horz: 1000
precision_vert: 2
size: 10
mx:
ttl: 300
type: MX
@ -104,7 +152,7 @@ naptr:
ptr:
ttl: 300
type: PTR
value: foo.bar.com.
values: [foo.bar.com.]
spf:
ttl: 600
type: SPF
@ -121,6 +169,20 @@ txt:
- Bah bah black sheep
- have you any wool.
- 'v=DKIM1\;k=rsa\;s=email\;h=sha256\;p=A/kinda+of/long/string+with+numb3rs'
urlfwd:
ttl: 300
type: URLFWD
values:
- code: 302
masking: 2
path: '/'
query: 0
target: 'http://www.unit.tests'
- code: 301
masking: 2
path: '/target'
query: 0
target: 'http://target.unit.tests'
www:
ttl: 300
type: A


+ 17
- 0
tests/config/unknown-processor.yaml View File

@ -0,0 +1,17 @@
manager:
max_workers: 2
providers:
in:
class: octodns.provider.yaml.YamlProvider
directory: tests/config
dump:
class: octodns.provider.yaml.YamlProvider
directory: env/YAML_TMP_DIR
zones:
unit.tests.:
sources:
- in
processors:
- missing
targets:
- dump

+ 18
- 0
tests/config/unknown-source-zone.yaml View File

@ -0,0 +1,18 @@
manager:
max_workers: 2
providers:
in:
class: octodns.provider.yaml.YamlProvider
directory: tests/config
dump:
class: octodns.provider.yaml.YamlProvider
directory: env/YAML_TMP_DIR
zones:
unit.tests.:
sources:
- in
targets:
- dump
alias.tests.:
alias: does-not-exists.tests.

+ 16
- 16
tests/fixtures/cloudflare-dns_records-page-2.json View File

@ -177,15 +177,15 @@
{
"id": "fc12ab34cd5611334422ab3322997656",
"type": "SRV",
"name": "_srv._tcp.unit.tests",
"name": "_imap._tcp.unit.tests",
"data": {
"service": "_srv",
"service": "_imap",
"proto": "_tcp",
"name": "unit.tests",
"priority": 12,
"weight": 20,
"port": 30,
"target": "foo-2.unit.tests"
"priority": 0,
"weight": 0,
"port": 0,
"target": "."
},
"proxiable": true,
"proxied": false,
@ -202,15 +202,15 @@
{
"id": "fc12ab34cd5611334422ab3322997656",
"type": "SRV",
"name": "_srv._tcp.unit.tests",
"name": "_pop3._tcp.unit.tests",
"data": {
"service": "_srv",
"proto": "_tcp",
"service": "_imap",
"proto": "_pop3",
"name": "unit.tests",
"priority": 10,
"weight": 20,
"port": 30,
"target": "foo-1.unit.tests"
"priority": 0,
"weight": 0,
"port": 0,
"target": "."
},
"proxiable": true,
"proxied": false,
@ -227,10 +227,10 @@
],
"result_info": {
"page": 2,
"per_page": 11,
"total_pages": 2,
"per_page": 10,
"total_pages": 3,
"count": 10,
"total_count": 20
"total_count": 24
},
"success": true,
"errors": [],


+ 128
- 0
tests/fixtures/cloudflare-dns_records-page-3.json View File

@ -0,0 +1,128 @@
{
"result": [
{
"id": "fc12ab34cd5611334422ab3322997656",
"type": "SRV",
"name": "_srv._tcp.unit.tests",
"data": {
"service": "_srv",
"proto": "_tcp",
"name": "unit.tests",
"priority": 12,
"weight": 20,
"port": 30,
"target": "foo-2.unit.tests"
},
"proxiable": true,
"proxied": false,
"ttl": 600,
"locked": false,
"zone_id": "ff12ab34cd5611334422ab3322997650",
"zone_name": "unit.tests",
"modified_on": "2017-03-11T18:01:43.940682Z",
"created_on": "2017-03-11T18:01:43.940682Z",
"meta": {
"auto_added": false
}
},
{
"id": "fc12ab34cd5611334422ab3322997656",
"type": "SRV",
"name": "_srv._tcp.unit.tests",
"data": {
"service": "_srv",
"proto": "_tcp",
"name": "unit.tests",
"priority": 10,
"weight": 20,
"port": 30,
"target": "foo-1.unit.tests"
},
"proxiable": true,
"proxied": false,
"ttl": 600,
"locked": false,
"zone_id": "ff12ab34cd5611334422ab3322997650",
"zone_name": "unit.tests",
"modified_on": "2017-03-11T18:01:43.940682Z",
"created_on": "2017-03-11T18:01:43.940682Z",
"meta": {
"auto_added": false
}
},
{
"id": "372e67954025e0ba6aaa6d586b9e0b59",
"type": "LOC",
"name": "loc.unit.tests",
"content": "IN LOC 31 58 52.1 S 115 49 11.7 E 20m 10m 10m 2m",
"proxiable": true,
"proxied": false,
"ttl": 300,
"locked": false,
"zone_id": "ff12ab34cd5611334422ab3322997650",
"zone_name": "unit.tests",
"created_on": "2020-01-28T05:20:00.12345Z",
"modified_on": "2020-01-28T05:20:00.12345Z",
"data": {
"lat_degrees": 31,
"lat_minutes": 58,
"lat_seconds": 52.1,
"lat_direction": "S",
"long_degrees": 115,
"long_minutes": 49,
"long_seconds": 11.7,
"long_direction": "E",
"altitude": 20,
"size": 10,
"precision_horz": 10,
"precision_vert": 2
},
"meta": {
"auto_added": true,
"source": "primary"
}
},
{
"id": "372e67954025e0ba6aaa6d586b9e0b59",
"type": "LOC",
"name": "loc.unit.tests",
"content": "IN LOC 53 14 10 N 2 18 26 W 20m 10m 1000m 2m",
"proxiable": true,
"proxied": false,
"ttl": 300,
"locked": false,
"zone_id": "ff12ab34cd5611334422ab3322997650",
"zone_name": "unit.tests",
"created_on": "2020-01-28T05:20:00.12345Z",
"modified_on": "2020-01-28T05:20:00.12345Z",
"data": {
"lat_degrees": 53,
"lat_minutes": 13,
"lat_seconds": 10,
"lat_direction": "N",
"long_degrees": 2,
"long_minutes": 18,
"long_seconds": 26,
"long_direction": "W",
"altitude": 20,
"size": 10,
"precision_horz": 1000,
"precision_vert": 2
},
"meta": {
"auto_added": true,
"source": "primary"
}
}
],
"result_info": {
"page": 3,
"per_page": 10,
"total_pages": 3,
"count": 4,
"total_count": 24
},
"success": true,
"errors": [],
"messages": []
}

+ 103
- 0
tests/fixtures/cloudflare-pagerules.json View File

@ -0,0 +1,103 @@
{
"result": [
{
"id": "2b1ec1793185213139f22059a165376e",
"targets": [
{
"target": "url",
"constraint": {
"operator": "matches",
"value": "urlfwd0.unit.tests/"
}
}
],
"actions": [
{
"id": "always_use_https"
}
],
"priority": 4,
"status": "active",
"created_on": "2021-06-29T17:14:28.000000Z",
"modified_on": "2021-06-29T17:15:33.000000Z"
},
{
"id": "2b1ec1793185213139f22059a165376f",
"targets": [
{
"target": "url",
"constraint": {
"operator": "matches",
"value": "urlfwd0.unit.tests/*"
}
}
],
"actions": [
{
"id": "forwarding_url",
"value": {
"url": "https://www.unit.tests/",
"status_code": 301
}
}
],
"priority": 3,
"status": "active",
"created_on": "2021-06-29T17:07:12.000000Z",
"modified_on": "2021-06-29T17:15:12.000000Z"
},
{
"id": "2b1ec1793185213139f22059a165377e",
"targets": [
{
"target": "url",
"constraint": {
"operator": "matches",
"value": "urlfwd1.unit.tests/*"
}
}
],
"actions": [
{
"id": "forwarding_url",
"value": {
"url": "https://www.unit.tests/",
"status_code": 302
}
}
],
"priority": 2,
"status": "active",
"created_on": "2021-06-28T22:42:27.000000Z",
"modified_on": "2021-06-28T22:43:13.000000Z"
},
{
"id": "2a9140b17ffb0e6aed826049eec970b8",
"targets": [
{
"target": "url",
"constraint": {
"operator": "matches",
"value": "urlfwd2.unit.tests/*"
}
}
],
"actions": [
{
"id": "forwarding_url",
"value": {
"url": "https://www.unit.tests/",
"status_code": 301
}
}
],
"priority": 1,
"status": "active",
"created_on": "2021-06-25T20:10:50.000000Z",
"modified_on": "2021-06-28T22:38:10.000000Z"
}
],
"success": true,
"errors": [],
"messages": []
}

+ 56
- 37
tests/fixtures/constellix-records.json View File

@ -64,6 +64,62 @@
"roundRobinFailover": [],
"pools": [],
"poolsDetail": []
}, {
"id": 1898527,
"type": "SRV",
"recordType": "srv",
"name": "_imap._tcp",
"recordOption": "roundRobin",
"noAnswer": false,
"note": "",
"ttl": 600,
"gtdRegion": 1,
"parentId": 123123,
"parent": "domain",
"source": "Domain",
"modifiedTs": 1565149714387,
"value": [{
"value": ".",
"priority": 0,
"weight": 0,
"port": 0,
"disableFlag": false
}],
"roundRobin": [{
"value": ".",
"priority": 0,
"weight": 0,
"port": 0,
"disableFlag": false
}]
}, {
"id": 1898528,
"type": "SRV",
"recordType": "srv",
"name": "_pop3._tcp",
"recordOption": "roundRobin",
"noAnswer": false,
"note": "",
"ttl": 600,
"gtdRegion": 1,
"parentId": 123123,
"parent": "domain",
"source": "Domain",
"modifiedTs": 1565149714387,
"value": [{
"value": ".",
"priority": 0,
"weight": 0,
"port": 0,
"disableFlag": false
}],
"roundRobin": [{
"value": ".",
"priority": 0,
"weight": 0,
"port": 0,
"disableFlag": false
}]
}, {
"id": 1808527,
"type": "SRV",
@ -523,43 +579,6 @@
"roundRobinFailover": [],
"pools": [],
"poolsDetail": []
}, {
"id": 1808603,
"type": "ANAME",
"recordType": "aname",
"name": "sub",
"recordOption": "roundRobin",
"noAnswer": false,
"note": "",
"ttl": 1800,
"gtdRegion": 1,
"parentId": 123123,
"parent": "domain",
"source": "Domain",
"modifiedTs": 1565153387855,
"value": [{
"value": "aname.unit.tests.",
"disableFlag": false
}],
"roundRobin": [{
"value": "aname.unit.tests.",
"disableFlag": false
}],
"geolocation": null,
"recordFailover": {
"disabled": false,
"failoverType": 1,
"failoverTypeStr": "Normal (always lowest level)",
"values": []
},
"failover": {
"disabled": false,
"failoverType": 1,
"failoverTypeStr": "Normal (always lowest level)",
"values": []
},
"pools": [],
"poolsDetail": []
}, {
"id": 1808520,
"type": "A",


+ 22
- 0
tests/fixtures/digitalocean-page-2.json View File

@ -76,6 +76,28 @@
"weight": null,
"flags": null,
"tag": null
}, {
"id": 11189896,
"type": "SRV",
"name": "_imap._tcp",
"data": ".",
"priority": 0,
"port": 0,
"ttl": 600,
"weight": 0,
"flags": null,
"tag": null
}, {
"id": 11189897,
"type": "SRV",
"name": "_pop3._tcp",
"data": ".",
"priority": 0,
"port": 0,
"ttl": 600,
"weight": 0,
"flags": null,
"tag": null
}],
"links": {
"pages": {


+ 0
- 14
tests/fixtures/dnsmadeeasy-records.json View File

@ -320,20 +320,6 @@
"name": "",
"value": "aname.unit.tests.",
"id": 11189895,
"type": "ANAME"
}, {
"failover": false,
"monitor": false,
"sourceId": 123123,
"dynamicDns": false,
"failed": false,
"gtdLocation": "DEFAULT",
"hardLink": false,
"ttl": 1800,
"source": 1,
"name": "sub",
"value": "aname",
"id": 11189896,
"type": "ANAME"
}, {
"failover": false,


+ 24
- 2
tests/fixtures/easydns-records.json View File

@ -264,10 +264,32 @@
"rdata": "v=DKIM1;k=rsa;s=email;h=sha256;p=A\/kinda+of\/long\/string+with+numb3rs",
"geozone_id": "0",
"last_mod": "2020-01-01 01:01:01"
},
{
"id": "12340025",
"domain": "unit.tests",
"host": "_imap._tcp",
"ttl": "600",
"prio": "0",
"type": "SRV",
"rdata": "0 0 0 .",
"geozone_id": "0",
"last_mod": "2020-01-01 01:01:01"
},
{
"id": "12340026",
"domain": "unit.tests",
"host": "_pop3._tcp",
"ttl": "600",
"prio": "0",
"type": "SRV",
"rdata": "0 0 0 .",
"geozone_id": "0",
"last_mod": "2020-01-01 01:01:01"
}
],
"count": 24,
"total": 24,
"count": 26,
"total": 26,
"start": 0,
"max": 1000,
"status": 200


+ 18
- 2
tests/fixtures/edgedns-records.json View File

@ -9,6 +9,22 @@
"name": "_srv._tcp.unit.tests",
"ttl": 600
},
{
"rdata": [
"0 0 0 ."
],
"type": "SRV",
"name": "_imap._tcp.unit.tests",
"ttl": 600
},
{
"rdata": [
"0 0 0 ."
],
"type": "SRV",
"name": "_pop3._tcp.unit.tests",
"ttl": 600
},
{
"rdata": [
"2601:644:500:e210:62f8:1dff:feb8:947a"
@ -151,7 +167,7 @@
}
],
"metadata": {
"totalElements": 16,
"totalElements": 18,
"showAll": true
}
}
}

+ 154
- 0
tests/fixtures/gandi-no-changes.json View File

@ -0,0 +1,154 @@
[
{
"rrset_type": "A",
"rrset_ttl": 300,
"rrset_name": "@",
"rrset_href": "https://api.gandi.net/v5/livedns/domains/unit.tests/records/%40/A",
"rrset_values": [
"1.2.3.4",
"1.2.3.5"
]
},
{
"rrset_type": "CAA",
"rrset_ttl": 3600,
"rrset_name": "@",
"rrset_href": "https://api.gandi.net/v5/livedns/domains/unit.tests/records/%40/CAA",
"rrset_values": [
"0 issue \"ca.unit.tests\""
]
},
{
"rrset_type": "SSHFP",
"rrset_ttl": 3600,
"rrset_name": "@",
"rrset_href": "https://api.gandi.net/v5/livedns/domains/unit.tests/records/%40/SSHFP",
"rrset_values": [
"1 1 7491973e5f8b39d5327cd4e08bc81b05f7710b49",
"1 1 bf6b6825d2977c511a475bbefb88aad54a92ac73"
]
},
{
"rrset_type": "AAAA",
"rrset_ttl": 600,
"rrset_name": "aaaa",
"rrset_href": "https://api.gandi.net/v5/livedns/domains/unit.tests/records/aaaa/AAAA",
"rrset_values": [
"2601:644:500:e210:62f8:1dff:feb8:947a"
]
},
{
"rrset_type": "CNAME",
"rrset_ttl": 300,
"rrset_name": "cname",
"rrset_href": "https://api.gandi.net/v5/livedns/domains/unit.tests/records/cname/CNAME",
"rrset_values": [
"unit.tests."
]
},
{
"rrset_type": "DNAME",
"rrset_ttl": 300,
"rrset_name": "dname",
"rrset_href": "https://api.gandi.net/v5/livedns/domains/unit.tests/records/dname/DNAME",
"rrset_values": [
"unit.tests."
]
},
{
"rrset_type": "CNAME",
"rrset_ttl": 3600,
"rrset_name": "excluded",
"rrset_href": "https://api.gandi.net/v5/livedns/domains/unit.tests/records/excluded/CNAME",
"rrset_values": [
"unit.tests."
]
},
{
"rrset_type": "MX",
"rrset_ttl": 300,
"rrset_name": "mx",
"rrset_href": "https://api.gandi.net/v5/livedns/domains/unit.tests/records/mx/MX",
"rrset_values": [
"10 smtp-4.unit.tests.",
"20 smtp-2.unit.tests.",
"30 smtp-3.unit.tests.",
"40 smtp-1.unit.tests."
]
},
{
"rrset_type": "PTR",
"rrset_ttl": 300,
"rrset_name": "ptr",
"rrset_href": "https://api.gandi.net/v5/livedns/domains/unit.tests/records/ptr/PTR",
"rrset_values": [
"foo.bar.com."
]
},
{
"rrset_type": "SPF",
"rrset_ttl": 600,
"rrset_name": "spf",
"rrset_href": "https://api.gandi.net/v5/livedns/domains/unit.tests/records/spf/SPF",
"rrset_values": [
"\"v=spf1 ip4:192.168.0.1/16-all\""
]
},
{
"rrset_type": "TXT",
"rrset_ttl": 600,
"rrset_name": "txt",
"rrset_href": "https://api.gandi.net/v5/livedns/domains/unit.tests/records/txt/TXT",
"rrset_values": [
"\"Bah bah black sheep\"",
"\"have you any wool.\"",
"\"v=DKIM1;k=rsa;s=email;h=sha256;p=A/kinda+of/long/string+with+numb3rs\""
]
},
{
"rrset_type": "A",
"rrset_ttl": 300,
"rrset_name": "www",
"rrset_href": "https://api.gandi.net/v5/livedns/domains/unit.tests/records/www/A",
"rrset_values": [
"2.2.3.6"
]
},
{
"rrset_type": "A",
"rrset_ttl": 300,
"rrset_name": "www.sub",
"rrset_href": "https://api.gandi.net/v5/livedns/domains/unit.tests/records/www.sub/A",
"rrset_values": [
"2.2.3.6"
]
},
{
"rrset_type": "SRV",
"rrset_ttl": 600,
"rrset_name": "_imap._tcp",
"rrset_href": "https://api.gandi.net/v5/livedns/domains/unit.tests/records/_imap._tcp/SRV",
"rrset_values": [
"0 0 0 ."
]
},
{
"rrset_type": "SRV",
"rrset_ttl": 600,
"rrset_name": "_pop3._tcp",
"rrset_href": "https://api.gandi.net/v5/livedns/domains/unit.tests/records/_pop3._tcp/SRV",
"rrset_values": [
"0 0 0 ."
]
},
{
"rrset_type": "SRV",
"rrset_ttl": 600,
"rrset_name": "_srv._tcp",
"rrset_href": "https://api.gandi.net/v5/livedns/domains/unit.tests/records/_srv._tcp/SRV",
"rrset_values": [
"10 20 30 foo-1.unit.tests.",
"12 20 30 foo-2.unit.tests."
]
}
]

+ 111
- 0
tests/fixtures/gandi-records.json View File

@ -0,0 +1,111 @@
[
{
"rrset_type": "A",
"rrset_ttl": 10800,
"rrset_name": "@",
"rrset_href": "https://api.gandi.net/v5/livedns/domains/unit.tests/records/%40/A",
"rrset_values": [
"217.70.184.38"
]
},
{
"rrset_type": "MX",
"rrset_ttl": 10800,
"rrset_name": "@",
"rrset_href": "https://api.gandi.net/v5/livedns/domains/unit.tests/records/%40/MX",
"rrset_values": [
"10 spool.mail.gandi.net.",
"50 fb.mail.gandi.net."
]
},
{
"rrset_type": "TXT",
"rrset_ttl": 10800,
"rrset_name": "@",
"rrset_href": "https://api.gandi.net/v5/livedns/domains/unit.tests/records/%40/TXT",
"rrset_values": [
"\"v=spf1 include:_mailcust.gandi.net ?all\""
]
},
{
"rrset_type": "CNAME",
"rrset_ttl": 10800,
"rrset_name": "webmail",
"rrset_href": "https://api.gandi.net/v5/livedns/domains/unit.tests/records/webmail/CNAME",
"rrset_values": [
"webmail.gandi.net."
]
},
{
"rrset_type": "CNAME",
"rrset_ttl": 10800,
"rrset_name": "www",
"rrset_href": "https://api.gandi.net/v5/livedns/domains/unit.tests/records/www/CNAME",
"rrset_values": [
"webredir.vip.gandi.net."
]
},
{
"rrset_type": "SRV",
"rrset_ttl": 10800,
"rrset_name": "_imap._tcp",
"rrset_href": "https://api.gandi.net/v5/livedns/domains/unit.tests/records/_imap._tcp/SRV",
"rrset_values": [
"0 0 0 ."
]
},
{
"rrset_type": "SRV",
"rrset_ttl": 10800,
"rrset_name": "_imaps._tcp",
"rrset_href": "https://api.gandi.net/v5/livedns/domains/unit.tests/records/_imaps._tcp/SRV",
"rrset_values": [
"0 1 993 mail.gandi.net."
]
},
{
"rrset_type": "SRV",
"rrset_ttl": 10800,
"rrset_name": "_pop3._tcp",
"rrset_href": "https://api.gandi.net/v5/livedns/domains/unit.tests/records/_pop3._tcp/SRV",
"rrset_values": [
"0 0 0 ."
]
},
{
"rrset_type": "SRV",
"rrset_ttl": 10800,
"rrset_name": "_pop3s._tcp",
"rrset_href": "https://api.gandi.net/v5/livedns/domains/unit.tests/records/_pop3s._tcp/SRV",
"rrset_values": [
"10 1 995 mail.gandi.net."
]
},
{
"rrset_type": "SRV",
"rrset_ttl": 10800,
"rrset_name": "_submission._tcp",
"rrset_href": "https://api.gandi.net/v5/livedns/domains/unit.tests/records/_submission._tcp/SRV",
"rrset_values": [
"0 1 465 mail.gandi.net."
]
},
{
"rrset_type": "CDS",
"rrset_ttl": 10800,
"rrset_name": "sub",
"rrset_href": "https://api.gandi.net/v5/livedns/domains/unit.tests/records/sub/CDS",
"rrset_values": [
"32128 13 1 6823D9BB1B03DF714DD0EB163E20B341C96D18C0"
]
},
{
"rrset_type": "CNAME",
"rrset_ttl": 10800,
"rrset_name": "relative",
"rrset_href": "https://api.gandi.net/v5/livedns/domains/unit.tests/records/relative/CNAME",
"rrset_values": [
"target"
]
}
]

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save