diff --git a/.git_hooks_pre-commit b/.git_hooks_pre-commit
index 6b3b02e..1fb621f 100755
--- a/.git_hooks_pre-commit
+++ b/.git_hooks_pre-commit
@@ -2,10 +2,10 @@
set -e
-HOOKS=`dirname $0`
-GIT=`dirname $HOOKS`
-ROOT=`dirname $GIT`
+HOOKS=$(dirname "$0")
+GIT=$(dirname "$HOOKS")
+ROOT=$(dirname "$GIT")
-. $ROOT/env/bin/activate
-$ROOT/script/lint
-$ROOT/script/coverage
+. "$ROOT/env/bin/activate"
+"$ROOT/script/lint"
+"$ROOT/script/coverage"
diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml
new file mode 100644
index 0000000..047c9ed
--- /dev/null
+++ b/.github/FUNDING.yml
@@ -0,0 +1,3 @@
+# These are supported funding model platforms
+
+github: ross
diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml
index 726e2e8..b68df8d 100644
--- a/.github/workflows/main.yml
+++ b/.github/workflows/main.yml
@@ -6,7 +6,8 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
- python-version: [2.7, 3.7]
+ # Tested versions based on dates in https://devguide.python.org/devcycle/#end-of-life-branches,
+ python-version: [3.6, 3.7, 3.8, 3.9]
steps:
- uses: actions/checkout@master
- name: Setup python
diff --git a/.gitignore b/.gitignore
index 715b687..5192821 100644
--- a/.gitignore
+++ b/.gitignore
@@ -5,8 +5,8 @@
*.pyc
.coverage
.env
-/config/
/build/
+/config/
coverage.xml
dist/
env/
@@ -14,4 +14,5 @@ htmlcov/
nosetests.xml
octodns.egg-info/
output/
+tests/zones/unit.tests.
tmp/
diff --git a/CHANGELOG.md b/CHANGELOG.md
index a0a92a9..caf5e00 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,6 +1,104 @@
-## v0.9.11 - 2020-??-?? - ???????????????
-
-* Added support for TCP health checking to dynamic records
+## v0.9.14 - 2021-??-?? - A new supports system
+
+#### Noteworthy changes
+
+* Provider `strict_supports` param added, currently defaults to `false`, along
+ with Provider._process_desired_zone this forms the foundations of a new
+ "supports" system where providers will warn or error (depending on the value
+ of `strict_supports`) during planning about their inability to do what
+ they're being asked. When `false` they will warn and "adjust" the desired
+ records. When true they will abort with an error indicating the problem. Over
+ time it is expected that all "supports" checking/handling will move into this
+ paradigm and `strict_supports` will likely be changed to default to `true`.
+* Zone shallow copy support, reworking of Processors (alpha) semantics
+* NS1 NA target now includes `SX` and `UM`. If `NA` continent is in use in
+ dynamic records care must be taken to upgrade/downgrade to v0.9.13.
+* Ns1Provider now supports a new parameter, shared_notifylist, which results in
+ all dynamic record monitors using a shared notify list named 'octoDNS NS1
+ Notify List'. Only newly created record values will use the shared notify
+ list. It should be safe to enable this functionality, but existing records
+ will not be converted. Note: Once this option is enabled downgrades to
+ previous versions of octoDNS are discouraged and may result in undefined
+ behavior and broken records. See https://github.com/octodns/octodns/pull/749
+ for related discussion.
+
+## v0.9.13 - 2021-07-18 - Processors Alpha
+
+#### Noteworthy changes
+
+* Alpha support for Processors has been added. Processors allow for hooking
+ into the source, target, and planing process to make nearly arbitrary changes
+ to data. See the [octodns/processor/](/octodns/processor) directory for
+ examples. The change has been designed to have no impact on the process
+ unless the `processors` key is present in zone configs.
+* Fixes NS1 provider's geotarget limitation of using `NA` continent. Now, when
+ `NA` is used in geos it considers **all** the countries of `North America`
+ insted of just `us-east`, `us-west` and `us-central` regions
+* `SX' & 'UM` country support added to NS1Provider, not yet in the North
+ America list for backwards compatibility reasons. They will be added in the
+ next releaser.
+
+#### Stuff
+
+* Lots of progress on the partial/beta support for dynamic records in Azure,
+ still not production ready.
+* NS1 fix for when a pool only exists as a fallback
+* Zone level lenient flag
+* Validate weight makes sense for pools with a single record
+* UltraDNS support for aliases and general fixes/improvements
+* Misc doc fixes and improvements
+
+## v0.9.12 - 2021-04-30 - Enough time has passed
+
+#### Noteworthy changes
+
+* Formal Python 2.7 support removed, deps and tooling were becoming
+ unmaintainable
+* octodns/octodns move, from github/octodns, more to come
+
+#### Stuff
+
+* ZoneFileSource supports specifying an extension & no files end in . to better
+ support Windows
+* LOC record type support added
+* Support for pre-release versions of PowerDNS
+* PowerDNS delete before create which allows A <-> CNAME etc.
+* Improved validation of fqdn's in ALIAS, CNAME, etc.
+* Transip support for NS records
+* Support for sending plan output to a file
+* DNSimple uses zone api rather than domain to support non-registered stuff,
+ e.g. reverse zones.
+* Support for fallback-only dynamic pools and related fixes to NS1 provider
+* Initial Hetzner provider
+
+## v0.9.11 - 2020-11-05 - We still don't know edition
+
+#### Noteworthy changes
+
+* ALIAS records only allowed at the root of zones - see `leient` in record docs
+ for work-arounds if you really need them.
+
+#### New Providers
+
+* Gandi LiveDNS
+* UltraDNS
+* easyDNS
+
+#### Stuff
+
+* Add support for zones aliases
+* octodns-compare: Prefix filtering and status code on on mismatch
+* Implement octodns-sync --source
+* Adding environment variable record injection
+* Add support for wildcard SRV records, as shown in RFC 2782
+* Add healthcheck option 'request_interval' for Route53 provider
+* NS1 georegion, country, and catchall need to be separate groups
+* Add the ability to mark a zone as lenient
+* Add support for geo-targeting of CA provinces
+* Update geo_data to pick up a couple renames
+* Cloudflare: Add PTR Support, update rate-limit handling and pagination
+* Support PowerDNS 4.3.x
+* Added support for TCP health checking of dynamic records
## v0.9.10 - 2020-04-20 - Dynamic NS1 and lots of misc
@@ -30,7 +128,7 @@
* Explicit ordering of changes by (name, type) to address inconsistent
ordering for a number of providers that just convert changes into API
calls as they come. Python 2 sets ordered consistently, Python 3 they do
- not. https://github.com/github/octodns/pull/384/commits/7958233fccf9ea22d95e2fd06c48d7d0a4529e26
+ not. https://github.com/octodns/octodns/pull/384/commits/7958233fccf9ea22d95e2fd06c48d7d0a4529e26
* Route53 `_mod_keyer` ordering wasn't 100% complete and thus unreliable and
random in Python 3. This has been addressed and may result in value
reordering on next plan, no actual changes in behavior should occur.
@@ -127,10 +225,10 @@ recreating all health checks. This process has been tested pretty thoroughly to
try and ensure a seemless upgrade without any traffic shifting around. It's
probably best to take extra care when updating and to try and make sure that
all health checks are passing before the first sync with `--doit`. See
-[#67](https://github.com/github/octodns/pull/67) for more information.
+[#67](https://github.com/octodns/octodns/pull/67) for more information.
* Major update to geo healthchecks to allow configuring host (header), path,
- protocol, and port [#67](https://github.com/github/octodns/pull/67)
+ protocol, and port [#67](https://github.com/octodns/octodns/pull/67)
* SSHFP algorithm type 4
* NS1 and DNSimple support skipping unsupported record types
* Revert back to old style setup.py & requirements.txt, setup.cfg was
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index ea891ac..019caa3 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -4,7 +4,7 @@ Hi there! We're thrilled that you'd like to contribute to OctoDNS. Your help is
Please note that this project adheres to the [Contributor Covenant Code of Conduct](/CODE_OF_CONDUCT.md). By participating in this project you agree to abide by its terms.
-If you have questions, or you'd like to check with us before embarking on a major development effort, please [open an issue](https://github.com/github/octodns/issues/new).
+If you have questions, or you'd like to check with us before embarking on a major development effort, please [open an issue](https://github.com/octodns/octodns/issues/new).
## How to contribute
diff --git a/MANIFEST.in b/MANIFEST.in
index cda90ed..9e3dc38 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,7 +1,11 @@
-include README.md
+include CHANGELOG.md
+include CODE_OF_CONDUCT.md
include CONTRIBUTING.md
include LICENSE
-include docs/*
-include octodns/*
+include README.md
+include requirements-dev.txt
+include requirements.txt
include script/*
-include tests/*
+recursive-include docs *.png *.md
+recursive-include tests *.json *.py *.txt *.yaml
+recursive-include tests/zones *
diff --git a/README.md b/README.md
index 23ac0e8..28d9e7f 100644
--- a/README.md
+++ b/README.md
@@ -1,4 +1,4 @@
-
+
## DNS as code - Tools for managing DNS across multiple providers
@@ -28,6 +28,7 @@ It is similar to [Netflix/denominator](https://github.com/Netflix/denominator).
- [Dynamic sources](#dynamic-sources)
- [Contributing](#contributing)
- [Getting help](#getting-help)
+- [Related Projects & Resources](#related-projects--resources)
- [License](#license)
- [Authors](#authors)
@@ -37,7 +38,7 @@ It is similar to [Netflix/denominator](https://github.com/Netflix/denominator).
Running through the following commands will install the latest release of OctoDNS and set up a place for your config files to live. To determine if provider specific requirements are necessary see the [Supported providers table](#supported-providers) below.
-```
+```shell
$ mkdir dns
$ cd dns
$ virtualenv env
@@ -47,6 +48,14 @@ $ pip install octodns
$ mkdir config
```
+#### Installing a specific commit SHA
+
+If you'd like to install a version that has not yet been released in a repetable/safe manner you can do the following. In general octoDNS is fairly stable inbetween releases thanks to the plan and apply process, but care should be taken regardless.
+
+```shell
+$ pip install -e git+https://git@github.com/octodns/octodns.git@#egg=octodns
+```
+
### Config
We start by creating a config file to tell OctoDNS about our providers and the zone(s) we want it to manage. Below we're setting up a `YamlProvider` to source records from our config files and both a `Route53Provider` and `DynProvider` to serve as the targets for those records. You can have any number of zones set up and any number of sources of data and targets for records for each. You can also have multiple config files, that make use of separate accounts and each manage a distinct set of zones. A good example of this this might be `./config/staging.yaml` & `./config/production.yaml`. We'll focus on a `config/production.yaml`.
@@ -79,6 +88,9 @@ zones:
targets:
- dyn
- route53
+
+ example.net.:
+ alias: example.com.
```
`class` is a special key that tells OctoDNS what python class should be loaded. Any other keys will be passed as configuration values to that provider. In general any sensitive or frequently rotated values should come from environmental variables. When OctoDNS sees a value that starts with `env/` it will look for that value in the process's environment and pass the result along.
@@ -87,7 +99,9 @@ Further information can be found in the `docstring` of each source and provider
The `max_workers` key in the `manager` section of the config enables threading to parallelize the planning portion of the sync.
-Now that we have something to tell OctoDNS about our providers & zones we need to tell it about or records. We'll keep it simple for now and just create a single `A` record at the top-level of the domain.
+In this example, `example.net` is an alias of zone `example.com`, which means they share the same sources and targets. They will therefore have identical records.
+
+Now that we have something to tell OctoDNS about our providers & zones we need to tell it about our records. We'll keep it simple for now and just create a single `A` record at the top-level of the domain.
`config/example.com.yaml`
@@ -97,8 +111,8 @@ Now that we have something to tell OctoDNS about our providers & zones we need t
ttl: 60
type: A
values:
- - 1.2.3.4
- - 1.2.3.5
+ - 1.2.3.4
+ - 1.2.3.5
```
Further information can be found in [Records Documentation](/docs/records.md).
@@ -107,7 +121,7 @@ Further information can be found in [Records Documentation](/docs/records.md).
We're ready to do a dry-run with our new setup to see what changes it would make. Since we're pretending here we'll act like there are no existing records for `example.com.` in our accounts on either provider.
-```
+```shell
$ octodns-sync --config-file=./config/production.yaml
...
********************************************************************************
@@ -131,7 +145,7 @@ There will be other logging information presented on the screen, but successful
Now it's time to tell OctoDNS to make things happen. We'll invoke it again with the same options and add a `--doit` on the end to tell it this time we actually want it to try and make the specified changes.
-```
+```shell
$ octodns-sync --config-file=./config/production.yaml --doit
...
```
@@ -144,17 +158,17 @@ In the above case we manually ran OctoDNS from the command line. That works and
The first step is to create a PR with your changes.
-
+
Assuming the code tests and config validation statuses are green the next step is to do a noop deploy and verify that the changes OctoDNS plans to make are the ones you expect.
-
+
After that comes a set of reviews. One from a teammate who should have full context on what you're trying to accomplish and visibility in to the changes you're making to do it. The other is from a member of the team here at GitHub that owns DNS, mostly as a sanity check and to make sure that best practices are being followed. As much of that as possible is baked into `octodns-validate`.
After the reviews it's time to branch deploy the change.
-
+
If that goes smoothly, you again see the expected changes, and verify them with `dig` and/or `octodns-report` you're good to hit the merge button. If there are problems you can quickly do a `.deploy dns/master` to go back to the previous state.
@@ -162,7 +176,7 @@ If that goes smoothly, you again see the expected changes, and verify them with
Very few situations will involve starting with a blank slate which is why there's tooling built in to pull existing data out of providers into a matching config file.
-```
+```shell
$ octodns-dump --config-file=config/production.yaml --output-dir=tmp/ example.com. route53
2017-03-15T13:33:34 INFO Manager __init__: config_file=tmp/production.yaml
2017-03-15T13:33:34 INFO Manager dump: zone=example.com., sources=('route53',)
@@ -178,9 +192,9 @@ The above command pulled the existing data out of Route53 and placed the results
| Provider | Requirements | Record Support | Dynamic | Notes |
|--|--|--|--|--|
-| [AzureProvider](/octodns/provider/azuredns.py) | azure-mgmt-dns | A, AAAA, CAA, CNAME, MX, NS, PTR, SRV, TXT | No | |
+| [AzureProvider](/octodns/provider/azuredns.py) | azure-identity, azure-mgmt-dns, azure-mgmt-trafficmanager | A, AAAA, CAA, CNAME, MX, NS, PTR, SRV, TXT | Alpha (A, AAAA, CNAME) | |
| [Akamai](/octodns/provider/edgedns.py) | edgegrid-python | A, AAAA, CNAME, MX, NAPTR, NS, PTR, SPF, SRV, SSHFP, TXT | No | |
-| [CloudflareProvider](/octodns/provider/cloudflare.py) | | A, AAAA, ALIAS, CAA, CNAME, MX, NS, PTR, SPF, SRV, TXT | No | CAA tags restricted |
+| [CloudflareProvider](/octodns/provider/cloudflare.py) | | A, AAAA, ALIAS, CAA, CNAME, LOC, MX, NS, PTR, SPF, SRV, TXT | No | CAA tags restricted |
| [ConstellixProvider](/octodns/provider/constellix.py) | | A, AAAA, ALIAS (ANAME), CAA, CNAME, MX, NS, PTR, SPF, SRV, TXT | No | CAA tags restricted |
| [DigitalOceanProvider](/octodns/provider/digitalocean.py) | | A, AAAA, CAA, CNAME, MX, NS, TXT, SRV | No | CAA tags restricted |
| [DnsMadeEasyProvider](/octodns/provider/dnsmadeeasy.py) | | A, AAAA, ALIAS (ANAME), CAA, CNAME, MX, NS, PTR, SPF, SRV, TXT | No | CAA tags restricted |
@@ -189,18 +203,21 @@ The above command pulled the existing data out of Route53 and placed the results
| [EasyDNSProvider](/octodns/provider/easydns.py) | | A, AAAA, CAA, CNAME, MX, NAPTR, NS, SRV, TXT | No | |
| [EtcHostsProvider](/octodns/provider/etc_hosts.py) | | A, AAAA, ALIAS, CNAME | No | |
| [EnvVarSource](/octodns/source/envvar.py) | | TXT | No | read-only environment variable injection |
+| [GandiProvider](/octodns/provider/gandi.py) | | A, AAAA, ALIAS, CAA, CNAME, DNAME, MX, NS, PTR, SPF, SRV, SSHFP, TXT | No | |
+| [GCoreProvider](/octodns/provider/gcore.py) | | A, AAAA, NS, MX, TXT, SRV, CNAME, PTR | Dynamic | |
| [GoogleCloudProvider](/octodns/provider/googlecloud.py) | google-cloud-dns | A, AAAA, CAA, CNAME, MX, NAPTR, NS, PTR, SPF, SRV, TXT | No | |
+| [HetznerProvider](/octodns/provider/hetzner.py) | | A, AAAA, CAA, CNAME, MX, NS, SRV, TXT | No | |
| [MythicBeastsProvider](/octodns/provider/mythicbeasts.py) | Mythic Beasts | A, AAAA, ALIAS, CNAME, MX, NS, SRV, SSHFP, CAA, TXT | No | |
-| [Ns1Provider](/octodns/provider/ns1.py) | ns1-python | All | Yes | Missing `NA` geo target |
+| [Ns1Provider](/octodns/provider/ns1.py) | ns1-python | All | Yes | |
| [OVH](/octodns/provider/ovh.py) | ovh | A, AAAA, CAA, CNAME, MX, NAPTR, NS, PTR, SPF, SRV, SSHFP, TXT, DKIM | No | |
| [PowerDnsProvider](/octodns/provider/powerdns.py) | | All | No | |
| [Rackspace](/octodns/provider/rackspace.py) | | A, AAAA, ALIAS, CNAME, MX, NS, PTR, SPF, TXT | No | |
| [Route53](/octodns/provider/route53.py) | boto3 | A, AAAA, CAA, CNAME, MX, NAPTR, NS, PTR, SPF, SRV, TXT | Both | CNAME health checks don't support a Host header |
| [Selectel](/octodns/provider/selectel.py) | | A, AAAA, CNAME, MX, NS, SPF, SRV, TXT | No | |
-| [Transip](/octodns/provider/transip.py) | transip | A, AAAA, CNAME, MX, SRV, SPF, TXT, SSHFP, CAA | No | |
+| [Transip](/octodns/provider/transip.py) | transip | A, AAAA, CNAME, MX, NS, SRV, SPF, TXT, SSHFP, CAA | No | |
| [UltraDns](/octodns/provider/ultra.py) | | A, AAAA, CAA, CNAME, MX, NS, PTR, SPF, SRV, TXT | No | |
-| [AxfrSource](/octodns/source/axfr.py) | | A, AAAA, CNAME, MX, NS, PTR, SPF, SRV, TXT | No | read-only |
-| [ZoneFileSource](/octodns/source/axfr.py) | | A, AAAA, CNAME, MX, NS, PTR, SPF, SRV, TXT | No | read-only |
+| [AxfrSource](/octodns/source/axfr.py) | | A, AAAA, CAA, CNAME, LOC, MX, NS, PTR, SPF, SRV, TXT | No | read-only |
+| [ZoneFileSource](/octodns/source/axfr.py) | | A, AAAA, CAA, CNAME, MX, NS, PTR, SPF, SRV, TXT | No | read-only |
| [TinyDnsFileSource](/octodns/source/tinydns.py) | | A, CNAME, MX, NS, PTR | No | read-only |
| [YamlProvider](/octodns/provider/yaml.py) | | All | Yes | config |
@@ -211,6 +228,32 @@ The above command pulled the existing data out of Route53 and placed the results
* Dnsimple's uses the configured TTL when serving things through the ALIAS, there's also a secondary TXT record created alongside the ALIAS that octoDNS ignores
* octoDNS itself supports non-ASCII character sets, but in testing Cloudflare is the only provider where that is currently functional end-to-end. Others have failures either in the client libraries or API calls
+## Compatibilty & Compliance
+
+### `lenient`
+
+`lenient` mostly focuses on the details of `Record`s and standards compliance. When set to `true` octoDNS will allow allow non-compliant configurations & values where possible. For example CNAME values that don't end with a `.`, label length restrictions, and invalid geo codes on `dynamic` records. When in lenient mode octoDNS will log validation problems at `WARNING` and try and continue with the configuration or source data as it exists. See [Lenience](/docs/records.md#lenience) for more information on the concept and how it can be configured.
+
+### `strict_supports` (Work In Progress)
+
+`strict_supports` is a `Provider` level parameter that comes into play when a provider has been asked to create a record that it is unable to support. The simplest case of this would be record type, e.g. `SSHFP` not being supported by `AzureProvider`. If such a record is passed to an `AzureProvider` as a target the provider will take action based on the `strict_supports`. When `true` it will throw an exception saying that it's unable to create the record, when set to `false` it will log at `WARNING` with information about what it's unable to do and how it is attempting to working around it. Other examples of things that cannot be supported would be `dynamic` records on a provider that only supports simple or the lack of support for specific geos in a provider, e.g. Route53Provider does not support `NA-CA-*`.
+
+It is worth noting that these errors will happen during the plan phase of things so that problems will be visible without having to make changes.
+
+This concept is currently a work in progress and only partially implemented. While work is on-going `strict_supports` will default to `false`. Once the work is considered complete & ready the default will change to `true` as it's a much safer and less surprising default as what you configure is what you'll get unless an error is throw telling you why it cannot be done. You will then have the choice to explicitly request that things continue with work-arounds with `strict_supports` set to false`. In the meantime it is encouraged that you manually configure the parameter to `true` in your provider configs.
+
+### Configuring `strict_supports`
+
+The `strict_supports` parameter is available on all providers and can be configured in YAML as follows:
+
+```yaml
+providers:
+ someprovider:
+ class: whatever.TheProvider
+ ...
+ strict_supports: true
+```
+
## Custom Sources and Providers
You can check out the [source](/octodns/source/) and [provider](/octodns/provider/) directory to see what's currently supported. Sources act as a source of record information. AxfrSource and TinyDnsFileSource are currently the only OSS sources, though we have several others internally that are specific to our environment. These include something to pull host data from [gPanel](https://githubengineering.com/githubs-metal-cloud/) and a similar provider that sources information about our network gear to create both `A` & `PTR` records for their interfaces. Things that might make good OSS sources might include an `ElbSource` that pulls information about [AWS Elastic Load Balancers](https://aws.amazon.com/elasticloadbalancing/) and dynamically creates `CNAME`s for them, or `Ec2Source` that pulls instance information so that records can be created for hosts similar to how our `GPanelProvider` works.
@@ -219,6 +262,8 @@ Most of the things included in OctoDNS are providers, the obvious difference bei
The `class` key in the providers config section can be used to point to arbitrary classes in the python path so internal or 3rd party providers can easily be included with no coordination beyond getting them into PYTHONPATH, most likely installed into the virtualenv with OctoDNS.
+For examples of building third-party sources and providers, see [Related Projects & Resources](#related-projects--resources).
+
## Other Uses
### Syncing between providers
@@ -278,13 +323,36 @@ Please see our [contributing document](/CONTRIBUTING.md) if you would like to pa
## Getting help
-If you have a problem or suggestion, please [open an issue](https://github.com/github/octodns/issues/new) in this repository, and we will do our best to help. Please note that this project adheres to the [Contributor Covenant Code of Conduct](/CODE_OF_CONDUCT.md).
+If you have a problem or suggestion, please [open an issue](https://github.com/octodns/octodns/issues/new) in this repository, and we will do our best to help. Please note that this project adheres to the [Contributor Covenant Code of Conduct](/CODE_OF_CONDUCT.md).
+
+## Related Projects & Resources
+
+- **GitHub Action:** [OctoDNS-Sync](https://github.com/marketplace/actions/octodns-sync)
+- **Sample Implementations.** See how others are using it
+ - [`hackclub/dns`](https://github.com/hackclub/dns)
+ - [`kubernetes/k8s.io:/dns`](https://github.com/kubernetes/k8s.io/tree/main/dns)
+ - [`g0v-network/domains`](https://github.com/g0v-network/domains)
+ - [`jekyll/dns`](https://github.com/jekyll/dns)
+- **Custom Sources & Providers.**
+ - [`octodns/octodns-ddns`](https://github.com/octodns/octodns-ddns): A simple Dynamic DNS source.
+ - [`doddo/octodns-lexicon`](https://github.com/doddo/octodns-lexicon): Use [Lexicon](https://github.com/AnalogJ/lexicon) providers as octoDNS providers.
+ - [`asyncon/octoblox`](https://github.com/asyncon/octoblox): [Infoblox](https://www.infoblox.com/) provider.
+ - [`sukiyaki/octodns-netbox`](https://github.com/sukiyaki/octodns-netbox): [NetBox](https://github.com/netbox-community/netbox) source.
+ - [`kompetenzbolzen/octodns-custom-provider`](https://github.com/kompetenzbolzen/octodns-custom-provider): zonefile provider & phpIPAM source.
+- **Resources.**
+ - Article: [Visualising DNS records with Neo4j](https://medium.com/@costask/querying-and-visualising-octodns-records-with-neo4j-f4f72ab2d474) + code
+ - Video: [FOSDEM 2019 - DNS as code with octodns](https://archive.fosdem.org/2019/schedule/event/dns_octodns/)
+ - GitHub Blog: [Enabling DNS split authority with OctoDNS](https://github.blog/2017-04-27-enabling-split-authority-dns-with-octodns/)
+ - Tutorial: [How To Deploy and Manage Your DNS using OctoDNS on Ubuntu 18.04](https://www.digitalocean.com/community/tutorials/how-to-deploy-and-manage-your-dns-using-octodns-on-ubuntu-18-04)
+ - Cloudflare Blog: [Improving the Resiliency of Our Infrastructure DNS Zone](https://blog.cloudflare.com/improving-the-resiliency-of-our-infrastructure-dns-zone/)
+
+If you know of any other resources, please do let us know!
## License
OctoDNS is licensed under the [MIT license](LICENSE).
-The MIT license grant is not for GitHub's trademarks, which include the logo designs. GitHub reserves all trademark and copyright rights in and to all GitHub trademarks. GitHub's logos include, for instance, the stylized designs that include "logo" in the file title in the following folder: https://github.com/github/octodns/tree/master/docs/logos/
+The MIT license grant is not for GitHub's trademarks, which include the logo designs. GitHub reserves all trademark and copyright rights in and to all GitHub trademarks. GitHub's logos include, for instance, the stylized designs that include "logo" in the file title in the following folder: https://github.com/octodns/octodns/tree/master/docs/logos/
GitHub® and its stylized versions and the Invertocat mark are GitHub's Trademarks or registered Trademarks. When using GitHub's logos, be sure to follow the GitHub logo guidelines.
diff --git a/docs/geo_records.md b/docs/geo_records.md
index ba99260..3777564 100644
--- a/docs/geo_records.md
+++ b/docs/geo_records.md
@@ -1,6 +1,6 @@
## Geo Record Support
-Note: Geo DNS records are still supported for the time being, but it is still strongy encouraged that you look at [Dynamic Records](/docs/dynamic_records.md) instead as they are a superset of functionality.
+Note: Geo DNS records are still supported for the time being, but it is still strongly encouraged that you look at [Dynamic Records](/docs/dynamic_records.md) instead as they are a superset of functionality.
GeoDNS is currently supported for `A` and `AAAA` records on the Dyn (via Traffic Directors) and Route53 providers. Records with geo information pushed to providers without support for them will be managed as non-geo records using the base values.
diff --git a/docs/records.md b/docs/records.md
index 609383c..f210846 100644
--- a/docs/records.md
+++ b/docs/records.md
@@ -6,15 +6,20 @@ OctoDNS supports the following record types:
* `A`
* `AAAA`
+* `ALIAS`
+* `CAA`
* `CNAME`
+* `DNAME`
+* `LOC`
* `MX`
* `NAPTR`
* `NS`
* `PTR`
-* `SSHFP`
* `SPF`
* `SRV`
+* `SSHFP`
* `TXT`
+* `URLFWD`
Underlying provider support for each of these varies and some providers have extra requirements or limitations. In cases where a record type is not supported by a provider OctoDNS will ignore it there and continue to manage the record elsewhere. For example `SSHFP` is supported by Dyn, but not Route53. If your source data includes an SSHFP record OctoDNS will keep it in sync on Dyn, but not consider it when evaluating the state of Route53. The best way to find out what types are supported by a provider is to look for its `supports` method. If that method exists the logic will drive which records are supported and which are ignored. If the provider does not implement the method it will fall back to `BaseProvider.supports` which indicates full support.
@@ -81,3 +86,53 @@ In the above example each name had a single record, but there are cases where a
Each record type has a corresponding set of required data. The easiest way to determine what's required is probably to look at the record object in [`octodns/record/__init__.py`](/octodns/record/__init__.py). You may also utilize `octodns-validate` which will throw errors about what's missing when run.
`type` is required for all records. `ttl` is optional. When TTL is not specified the `YamlProvider`'s default will be used. In any situation where an array of `values` can be used you can opt to go with `value` as a single item if there's only one.
+
+### Lenience
+
+octoDNS is fairly strict in terms of standards compliance and is opinionated in terms of best practices. Examples of former include SRV record naming requirements and the latter that ALIAS records are constrained to the root of zones. The strictness and support of providers varies so you may encounter existing records that fail validation when you try to dump them or you may even have use cases for which you need to create or preserve records that don't validate. octoDNS's solution to this is the `lenient` flag.
+
+It's best to think of the `lenient` flag as "I know what I'm doing and accept any problems I run across." The main reason being is that some providers may allow the non-compliant setup and others may not. The behavior of the non-compliant records may even vary from one provider to another. Caveat emptor.
+
+#### octodns-dump
+
+If you're trying to import a zone into octoDNS config file using `octodns-dump` which fails due to validation errors you can supply the `--lenient` argument to tell octoDNS that you acknowledge that things aren't lining up with its expectations, but you'd like it to go ahead anyway. This will do its best to populate the zone and dump the results out into an octoDNS zone file and include the non-compliant bits. If you go to use that config file octoDNS will again complain about the validation problems. You can correct them in cases where that makes sense, but if you need to preserve the non-compliant records read on for options.
+
+#### Record level lenience
+
+When there are non-compliant records configured in Yaml you can add the following to tell octoDNS to do it's best to proceed with them anyway. If you use `--lenient` above to dump a zone and you'd like to sync it as-is you can mark the problematic records this way.
+
+```yaml
+'not-root':
+ octodns:
+ lenient: true
+ type: ALIAS
+ values: something.else.com.
+```
+
+#### Zone level lenience
+
+If you'd like to enable lenience for a whole zone you can do so with the following, thought it's strongly encouraged to mark things at record level when possible. The most common case where things may need to be done at the zone level is when using something other than `YamlProvider` as a source, e.g. syncing from `Route53Provider` to `Ns1Provider` when there are non-compliant records in the zone in Route53.
+
+```yaml
+ non-compliant-zone.com.:
+ lenient: true
+ sources:
+ - route53
+ targets:
+ - ns1
+```
+
+#### Restrict Record manipulations
+
+OctoDNS currently provides the ability to limit the number of updates/deletes on
+DNS records by configuring a percentage of allowed operations as a threshold.
+If left unconfigured, suitable defaults take over instead. In the below example,
+the Dyn provider is configured with limits of 40% on both update and
+delete operations over all the records present.
+
+````yaml
+dyn:
+ class: octodns.provider.dyn.DynProvider
+ update_pcent_threshold: 0.4
+ delete_pcent_threshold: 0.4
+````
diff --git a/octodns/__init__.py b/octodns/__init__.py
index 341f51e..16ec066 100644
--- a/octodns/__init__.py
+++ b/octodns/__init__.py
@@ -3,4 +3,4 @@
from __future__ import absolute_import, division, print_function, \
unicode_literals
-__VERSION__ = '0.9.10'
+__VERSION__ = '0.9.13'
diff --git a/octodns/cmds/report.py b/octodns/cmds/report.py
index 3a26052..d0b82c0 100755
--- a/octodns/cmds/report.py
+++ b/octodns/cmds/report.py
@@ -17,7 +17,6 @@ from six import text_type
from octodns.cmds.args import ArgumentParser
from octodns.manager import Manager
-from octodns.zone import Zone
class AsyncResolver(Resolver):
@@ -56,7 +55,7 @@ def main():
except KeyError as e:
raise Exception('Unknown source: {}'.format(e.args[0]))
- zone = Zone(args.zone, manager.configured_sub_zones(args.zone))
+ zone = manager.get_zone(args.zone)
for source in sources:
source.populate(zone)
diff --git a/octodns/manager.py b/octodns/manager.py
index 288645f..104e445 100644
--- a/octodns/manager.py
+++ b/octodns/manager.py
@@ -9,6 +9,7 @@ from concurrent.futures import ThreadPoolExecutor
from importlib import import_module
from os import environ
from six import text_type
+from sys import stdout
import logging
from .provider.base import BaseProvider
@@ -121,6 +122,25 @@ class Manager(object):
raise ManagerException('Incorrect provider config for {}'
.format(provider_name))
+ self.processors = {}
+ for processor_name, processor_config in \
+ self.config.get('processors', {}).items():
+ try:
+ _class = processor_config.pop('class')
+ except KeyError:
+ self.log.exception('Invalid processor class')
+ raise ManagerException('Processor {} is missing class'
+ .format(processor_name))
+ _class = self._get_named_class('processor', _class)
+ kwargs = self._build_kwargs(processor_config)
+ try:
+ self.processors[processor_name] = _class(processor_name,
+ **kwargs)
+ except TypeError:
+ self.log.exception('Invalid processor config')
+ raise ManagerException('Incorrect processor config for {}'
+ .format(processor_name))
+
zone_tree = {}
# sort by reversed strings so that parent zones always come first
for name in sorted(self.config['zones'].keys(), key=lambda s: s[::-1]):
@@ -222,21 +242,34 @@ class Manager(object):
self.log.debug('configured_sub_zones: subs=%s', sub_zone_names)
return set(sub_zone_names)
- def _populate_and_plan(self, zone_name, sources, targets, lenient=False):
+ def _populate_and_plan(self, zone_name, processors, sources, targets,
+ desired=None, lenient=False):
self.log.debug('sync: populating, zone=%s, lenient=%s',
zone_name, lenient)
zone = Zone(zone_name,
sub_zones=self.configured_sub_zones(zone_name))
- for source in sources:
- try:
- source.populate(zone, lenient=lenient)
- except TypeError as e:
- if "keyword argument 'lenient'" not in text_type(e):
- raise
- self.log.warn(': provider %s does not accept lenient param',
- source.__class__.__name__)
- source.populate(zone)
+
+ if desired:
+ # This is an alias zone, rather than populate it we'll copy the
+ # records over from `desired`.
+ for _, records in desired._records.items():
+ for record in records:
+ zone.add_record(record.copy(zone=zone), lenient=lenient)
+ else:
+ for source in sources:
+ try:
+ source.populate(zone, lenient=lenient)
+ except TypeError as e:
+ if ("unexpected keyword argument 'lenient'"
+ not in text_type(e)):
+ raise
+ self.log.warn('provider %s does not accept lenient '
+ 'param', source.__class__.__name__)
+ source.populate(zone)
+
+ for processor in processors:
+ zone = processor.process_source_zone(zone, sources=sources)
self.log.debug('sync: planning, zone=%s', zone_name)
plans = []
@@ -249,25 +282,63 @@ class Manager(object):
'value': 'provider={}'.format(target.id)
})
zone.add_record(meta, replace=True)
- plan = target.plan(zone)
+ try:
+ plan = target.plan(zone, processors=processors)
+ except TypeError as e:
+ if "keyword argument 'processors'" not in text_type(e):
+ raise
+ self.log.warn('provider.plan %s does not accept processors '
+ 'param', target.__class__.__name__)
+ plan = target.plan(zone)
+
+ for processor in processors:
+ plan = processor.process_plan(plan, sources=sources,
+ target=target)
if plan:
plans.append((target, plan))
- return plans
+ # Return the zone as it's the desired state
+ return plans, zone
def sync(self, eligible_zones=[], eligible_sources=[], eligible_targets=[],
- dry_run=True, force=False):
- self.log.info('sync: eligible_zones=%s, eligible_targets=%s, '
- 'dry_run=%s, force=%s', eligible_zones, eligible_targets,
- dry_run, force)
+ dry_run=True, force=False, plan_output_fh=stdout):
+
+ self.log.info(
+ 'sync: eligible_zones=%s, eligible_targets=%s, dry_run=%s, '
+ 'force=%s, plan_output_fh=%s',
+ eligible_zones, eligible_targets, dry_run, force,
+ getattr(plan_output_fh, 'name', plan_output_fh.__class__.__name__))
zones = self.config['zones'].items()
if eligible_zones:
zones = [z for z in zones if z[0] in eligible_zones]
+ aliased_zones = {}
futures = []
for zone_name, config in zones:
self.log.info('sync: zone=%s', zone_name)
+ if 'alias' in config:
+ source_zone = config['alias']
+
+ # Check that the source zone is defined.
+ if source_zone not in self.config['zones']:
+ self.log.error('Invalid alias zone {}, target {} does '
+ 'not exist'.format(zone_name, source_zone))
+ raise ManagerException('Invalid alias zone {}: '
+ 'source zone {} does not exist'
+ .format(zone_name, source_zone))
+
+ # Check that the source zone is not an alias zone itself.
+ if 'alias' in self.config['zones'][source_zone]:
+ self.log.error('Invalid alias zone {}, target {} is an '
+ 'alias zone'.format(zone_name, source_zone))
+ raise ManagerException('Invalid alias zone {}: source '
+ 'zone {} is an alias zone'
+ .format(zone_name, source_zone))
+
+ aliased_zones[zone_name] = source_zone
+ continue
+
lenient = config.get('lenient', False)
try:
sources = config['sources']
@@ -281,6 +352,8 @@ class Manager(object):
raise ManagerException('Zone {} is missing targets'
.format(zone_name))
+ processors = config.get('processors', [])
+
if (eligible_sources and not
[s for s in sources if s in eligible_sources]):
self.log.info('sync: no eligible sources, skipping')
@@ -298,6 +371,15 @@ class Manager(object):
self.log.info('sync: sources=%s -> targets=%s', sources, targets)
+ try:
+ collected = []
+ for processor in processors:
+ collected.append(self.processors[processor])
+ processors = collected
+ except KeyError:
+ raise ManagerException('Zone {}, unknown processor: {}'
+ .format(zone_name, processor))
+
try:
# rather than using a list comprehension, we break this loop
# out so that the `except` block below can reference the
@@ -324,12 +406,43 @@ class Manager(object):
.format(zone_name, target))
futures.append(self._executor.submit(self._populate_and_plan,
- zone_name, sources,
- targets, lenient=lenient))
+ zone_name, processors,
+ sources, targets,
+ lenient=lenient))
- # Wait on all results and unpack/flatten them in to a list of target &
- # plan pairs.
- plans = [p for f in futures for p in f.result()]
+ # Wait on all results and unpack/flatten the plans and store the
+ # desired states in case we need them below
+ plans = []
+ desired = {}
+ for future in futures:
+ ps, d = future.result()
+ desired[d.name] = d
+ for plan in ps:
+ plans.append(plan)
+
+ # Populate aliases zones.
+ futures = []
+ for zone_name, zone_source in aliased_zones.items():
+ source_config = self.config['zones'][zone_source]
+ try:
+ desired_config = desired[zone_source]
+ except KeyError:
+ raise ManagerException('Zone {} cannot be sync without zone '
+ '{} sinced it is aliased'
+ .format(zone_name, zone_source))
+ futures.append(self._executor.submit(
+ self._populate_and_plan,
+ zone_name,
+ processors,
+ [],
+ [self.providers[t] for t in source_config['targets']],
+ desired=desired_config,
+ lenient=lenient
+ ))
+
+ # Wait on results and unpack/flatten the plans, ignore the desired here
+ # as these are aliased zones
+ plans += [p for f in futures for p in f.result()[0]]
# Best effort sort plans children first so that we create/update
# children zones before parents which should allow us to more safely
@@ -339,7 +452,7 @@ class Manager(object):
plans.sort(key=self._plan_keyer, reverse=True)
for output in self.plan_outputs.values():
- output.run(plans=plans, log=self.log)
+ output.run(plans=plans, log=self.log, fh=plan_output_fh)
if not force:
self.log.debug('sync: checking safety')
@@ -377,12 +490,11 @@ class Manager(object):
except KeyError as e:
raise ManagerException('Unknown source: {}'.format(e.args[0]))
- sub_zones = self.configured_sub_zones(zone)
- za = Zone(zone, sub_zones)
+ za = self.get_zone(zone)
for source in a:
source.populate(za)
- zb = Zone(zone, sub_zones)
+ zb = self.get_zone(zone)
for source in b:
source.populate(zb)
@@ -421,6 +533,25 @@ class Manager(object):
for zone_name, config in self.config['zones'].items():
zone = Zone(zone_name, self.configured_sub_zones(zone_name))
+ source_zone = config.get('alias')
+ if source_zone:
+ if source_zone not in self.config['zones']:
+ self.log.exception('Invalid alias zone')
+ raise ManagerException('Invalid alias zone {}: '
+ 'source zone {} does not exist'
+ .format(zone_name, source_zone))
+
+ if 'alias' in self.config['zones'][source_zone]:
+ self.log.exception('Invalid alias zone')
+ raise ManagerException('Invalid alias zone {}: '
+ 'source zone {} is an alias zone'
+ .format(zone_name, source_zone))
+
+ # this is just here to satisfy coverage, see
+ # https://github.com/nedbat/coveragepy/issues/198
+ source_zone = source_zone
+ continue
+
try:
sources = config['sources']
except KeyError:
@@ -428,9 +559,9 @@ class Manager(object):
.format(zone_name))
try:
- # rather than using a list comprehension, we break this loop
- # out so that the `except` block below can reference the
- # `source`
+ # rather than using a list comprehension, we break this
+ # loop out so that the `except` block below can reference
+ # the `source`
collected = []
for source in sources:
collected.append(self.providers[source])
@@ -442,3 +573,24 @@ class Manager(object):
for source in sources:
if isinstance(source, YamlProvider):
source.populate(zone)
+
+ # check that processors are in order if any are specified
+ processors = config.get('processors', [])
+ try:
+ # same as above, but for processors this time
+ for processor in processors:
+ collected.append(self.processors[processor])
+ except KeyError:
+ raise ManagerException('Zone {}, unknown processor: {}'
+ .format(zone_name, processor))
+
+ def get_zone(self, zone_name):
+ if not zone_name[-1] == '.':
+ raise ManagerException('Invalid zone name {}, missing ending dot'
+ .format(zone_name))
+
+ for name, config in self.config['zones'].items():
+ if name == zone_name:
+ return Zone(name, self.configured_sub_zones(name))
+
+ raise ManagerException('Unknown zone name {}'.format(zone_name))
diff --git a/octodns/processor/__init__.py b/octodns/processor/__init__.py
new file mode 100644
index 0000000..14ccf18
--- /dev/null
+++ b/octodns/processor/__init__.py
@@ -0,0 +1,6 @@
+#
+#
+#
+
+from __future__ import absolute_import, division, print_function, \
+ unicode_literals
diff --git a/octodns/processor/acme.py b/octodns/processor/acme.py
new file mode 100644
index 0000000..2d7c101
--- /dev/null
+++ b/octodns/processor/acme.py
@@ -0,0 +1,61 @@
+#
+#
+#
+
+from __future__ import absolute_import, division, print_function, \
+ unicode_literals
+
+from logging import getLogger
+
+from .base import BaseProcessor
+
+
+class AcmeMangingProcessor(BaseProcessor):
+ log = getLogger('AcmeMangingProcessor')
+
+ def __init__(self, name):
+ '''
+ processors:
+ acme:
+ class: octodns.processor.acme.AcmeMangingProcessor
+
+ ...
+
+ zones:
+ something.com.:
+ ...
+ processors:
+ - acme
+ ...
+ '''
+ super(AcmeMangingProcessor, self).__init__(name)
+
+ self._owned = set()
+
+ def process_source_zone(self, desired, *args, **kwargs):
+ for record in desired.records:
+ if record._type == 'TXT' and \
+ record.name.startswith('_acme-challenge'):
+ # We have a managed acme challenge record (owned by octoDNS) so
+ # we should mark it as such
+ record = record.copy()
+ record.values.append('*octoDNS*')
+ record.values.sort()
+ # This assumes we'll see things as sources before targets,
+ # which is the case...
+ self._owned.add(record)
+ desired.add_record(record, replace=True)
+ return desired
+
+ def process_target_zone(self, existing, *args, **kwargs):
+ for record in existing.records:
+ # Uses a startswith rather than == to ignore subdomain challenges,
+ # e.g. _acme-challenge.foo.domain.com when managing domain.com
+ if record._type == 'TXT' and \
+ record.name.startswith('_acme-challenge') and \
+ '*octoDNS*' not in record.values and \
+ record not in self._owned:
+ self.log.info('_process: ignoring %s', record.fqdn)
+ existing.remove_record(record)
+
+ return existing
diff --git a/octodns/processor/base.py b/octodns/processor/base.py
new file mode 100644
index 0000000..98f2baa
--- /dev/null
+++ b/octodns/processor/base.py
@@ -0,0 +1,69 @@
+#
+#
+#
+
+from __future__ import absolute_import, division, print_function, \
+ unicode_literals
+
+
+class BaseProcessor(object):
+
+ def __init__(self, name):
+ self.name = name
+
+ def process_source_zone(self, desired, sources):
+ '''
+ Called after all sources have completed populate. Provides an
+ opportunity for the processor to modify the desired `Zone` that targets
+ will recieve.
+
+ - Will see `desired` after any modifications done by
+ `Provider._process_desired_zone` and processors configured to run
+ before this one.
+ - May modify `desired` directly.
+ - Must return `desired` which will normally be the `desired` param.
+ - Must not modify records directly, `record.copy` should be called,
+ the results of which can be modified, and then `Zone.add_record` may
+ be used with `replace=True`.
+ - May call `Zone.remove_record` to remove records from `desired`.
+ - Sources may be empty, as will be the case for aliased zones.
+ '''
+ return desired
+
+ def process_target_zone(self, existing, target):
+ '''
+ Called after a target has completed `populate`, before changes are
+ computed between `existing` and `desired`. This provides an opportunity
+ to modify the `existing` `Zone`.
+
+ - Will see `existing` after any modifrications done by processors
+ configured to run before this one.
+ - May modify `existing` directly.
+ - Must return `existing` which will normally be the `existing` param.
+ - Must not modify records directly, `record.copy` should be called,
+ the results of which can be modified, and then `Zone.add_record` may
+ be used with `replace=True`.
+ - May call `Zone.remove_record` to remove records from `existing`.
+ '''
+ return existing
+
+ def process_plan(self, plan, sources, target):
+ '''
+ Called after the planning phase has completed. Provides an opportunity
+ for the processors to modify the plan thus changing the actions that
+ will be displayed and potentially applied.
+
+ - `plan` may be None if no changes were detected, if so a `Plan` may
+ still be created and returned.
+ - May modify `plan.changes` directly or create a new `Plan`.
+ - Does not have to modify `plan.desired` and/or `plan.existing` to line
+ up with any modifications made to `plan.changes`.
+ - Should copy over `plan.exists`, `plan.update_pcent_threshold`, and
+ `plan.delete_pcent_threshold` when creating a new `Plan`.
+ - Must return a `Plan` which may be `plan` or can be a newly created
+ one `plan.desired` and `plan.existing` copied over as-is or modified.
+ '''
+ # plan may be None if no changes were detected up until now, the
+ # process may still create a plan.
+ # sources may be empty, as will be the case for aliased zones
+ return plan
diff --git a/octodns/processor/filter.py b/octodns/processor/filter.py
new file mode 100644
index 0000000..d9b8ee3
--- /dev/null
+++ b/octodns/processor/filter.py
@@ -0,0 +1,42 @@
+#
+#
+#
+
+from __future__ import absolute_import, division, print_function, \
+ unicode_literals
+
+from .base import BaseProcessor
+
+
+class TypeAllowlistFilter(BaseProcessor):
+
+ def __init__(self, name, allowlist):
+ super(TypeAllowlistFilter, self).__init__(name)
+ self.allowlist = set(allowlist)
+
+ def _process(self, zone, *args, **kwargs):
+ for record in zone.records:
+ if record._type not in self.allowlist:
+ zone.remove_record(record)
+
+ return zone
+
+ process_source_zone = _process
+ process_target_zone = _process
+
+
+class TypeRejectlistFilter(BaseProcessor):
+
+ def __init__(self, name, rejectlist):
+ super(TypeRejectlistFilter, self).__init__(name)
+ self.rejectlist = set(rejectlist)
+
+ def _process(self, zone, *args, **kwargs):
+ for record in zone.records:
+ if record._type in self.rejectlist:
+ zone.remove_record(record)
+
+ return zone
+
+ process_source_zone = _process
+ process_target_zone = _process
diff --git a/octodns/processor/ownership.py b/octodns/processor/ownership.py
new file mode 100644
index 0000000..42f041d
--- /dev/null
+++ b/octodns/processor/ownership.py
@@ -0,0 +1,100 @@
+#
+#
+#
+
+from __future__ import absolute_import, division, print_function, \
+ unicode_literals
+
+from collections import defaultdict
+
+from ..provider.plan import Plan
+from ..record import Record
+
+from .base import BaseProcessor
+
+
+# Mark anything octoDNS is managing that way it can know it's safe to modify or
+# delete. We'll take ownership of existing records that we're told to manage
+# and thus "own" them going forward.
+class OwnershipProcessor(BaseProcessor):
+
+ def __init__(self, name, txt_name='_owner', txt_value='*octodns*'):
+ super(OwnershipProcessor, self).__init__(name)
+ self.txt_name = txt_name
+ self.txt_value = txt_value
+ self._txt_values = [txt_value]
+
+ def process_source_zone(self, desired, *args, **kwargs):
+ for record in desired.records:
+ # Then create and add an ownership TXT for each of them
+ record_name = record.name.replace('*', '_wildcard')
+ if record.name:
+ name = '{}.{}.{}'.format(self.txt_name, record._type,
+ record_name)
+ else:
+ name = '{}.{}'.format(self.txt_name, record._type)
+ txt = Record.new(desired, name, {
+ 'type': 'TXT',
+ 'ttl': 60,
+ 'value': self.txt_value,
+ })
+ desired.add_record(txt)
+
+ return desired
+
+ def _is_ownership(self, record):
+ return record._type == 'TXT' and \
+ record.name.startswith(self.txt_name) \
+ and record.values == self._txt_values
+
+ def process_plan(self, plan, *args, **kwargs):
+ if not plan:
+ # If we don't have any change there's nothing to do
+ return plan
+
+ # First find all the ownership info
+ owned = defaultdict(dict)
+ # We need to look for ownership in both the desired and existing
+ # states, many things will show up in both, but that's fine.
+ for record in list(plan.existing.records) + list(plan.desired.records):
+ if self._is_ownership(record):
+ pieces = record.name.split('.', 2)
+ if len(pieces) > 2:
+ _, _type, name = pieces
+ name = name.replace('_wildcard', '*')
+ else:
+ _type = pieces[1]
+ name = ''
+ owned[name][_type.upper()] = True
+
+ # Cases:
+ # - Configured in source
+ # - We'll fully CRU/manage it adding ownership TXT,
+ # thanks to process_source_zone, if needed
+ # - Not in source
+ # - Has an ownership TXT - delete it & the ownership TXT
+ # - Does not have an ownership TXT - don't delete it
+ # - Special records like octodns-meta
+ # - Should be left alone and should not have ownerthis TXTs
+
+ filtered_changes = []
+ for change in plan.changes:
+ record = change.record
+
+ if not self._is_ownership(record) and \
+ record._type not in owned[record.name] and \
+ record.name != 'octodns-meta':
+ # It's not an ownership TXT, it's not owned, and it's not
+ # special we're going to ignore it
+ continue
+
+ # We own this record or owned it up until now so whatever the
+ # change is we should do
+ filtered_changes.append(change)
+
+ if plan.changes != filtered_changes:
+ return Plan(plan.existing, plan.desired, filtered_changes,
+ plan.exists, plan.update_pcent_threshold,
+ plan.delete_pcent_threshold)
+
+ return plan
diff --git a/octodns/provider/__init__.py b/octodns/provider/__init__.py
index 14ccf18..7e18783 100644
--- a/octodns/provider/__init__.py
+++ b/octodns/provider/__init__.py
@@ -4,3 +4,11 @@
from __future__ import absolute_import, division, print_function, \
unicode_literals
+
+
+class ProviderException(Exception):
+ pass
+
+
+class SupportsException(ProviderException):
+ pass
diff --git a/octodns/provider/azuredns.py b/octodns/provider/azuredns.py
index 3d8122a..0b4e1c9 100644
--- a/octodns/provider/azuredns.py
+++ b/octodns/provider/azuredns.py
@@ -5,19 +5,29 @@
from __future__ import absolute_import, division, print_function, \
unicode_literals
+from collections import defaultdict
+
+from azure.identity import ClientSecretCredential
from azure.common.credentials import ServicePrincipalCredentials
from azure.mgmt.dns import DnsManagementClient
-from msrestazure.azure_exceptions import CloudError
+from azure.mgmt.trafficmanager import TrafficManagerManagementClient
from azure.mgmt.dns.models import ARecord, AaaaRecord, CaaRecord, \
CnameRecord, MxRecord, SrvRecord, NsRecord, PtrRecord, TxtRecord, Zone
+from azure.mgmt.trafficmanager.models import Profile, DnsConfig, \
+ MonitorConfig, Endpoint, MonitorConfigCustomHeadersItem
import logging
from functools import reduce
-from ..record import Record
+from ..record import Record, Update, GeoCodes
+from . import ProviderException
from .base import BaseProvider
+class AzureException(ProviderException):
+ pass
+
+
def escape_semicolon(s):
assert s
return s.replace(';', '\\;')
@@ -28,6 +38,25 @@ def unescape_semicolon(s):
return s.replace('\\;', ';')
+def azure_chunked_value(val):
+ CHUNK_SIZE = 255
+ val_replace = val.replace('"', '\\"')
+ value = unescape_semicolon(val_replace)
+ if len(val) > CHUNK_SIZE:
+ vs = [value[i:i + CHUNK_SIZE]
+ for i in range(0, len(value), CHUNK_SIZE)]
+ else:
+ vs = value
+ return vs
+
+
+def azure_chunked_values(s):
+ values = []
+ for v in s:
+ values.append(azure_chunked_value(v))
+ return values
+
+
class _AzureRecord(object):
'''Wrapper for OctoDNS record for AzureProvider to make dns_client calls.
@@ -49,14 +78,16 @@ class _AzureRecord(object):
'TXT': TxtRecord
}
- def __init__(self, resource_group, record, delete=False):
+ def __init__(self, resource_group, record, delete=False,
+ traffic_manager=None):
'''Constructor for _AzureRecord.
Notes on Azure records: An Azure record set has the form
- RecordSet(name=<...>, type=<...>, arecords=[...], aaaa_records, ..)
+ RecordSet(name=<...>, type=<...>, a_records=[...],
+ aaaa_records=[...], ...)
When constructing an azure record as done in self._apply_Create,
the argument parameters for an A record would be
- parameters={'ttl': , 'arecords': [ARecord(),]}.
+ parameters={'ttl': , 'a_records': [ARecord(),]}.
As another example for CNAME record:
parameters={'ttl': , 'cname_record': CnameRecord()}.
@@ -72,26 +103,32 @@ class _AzureRecord(object):
:type return: _AzureRecord
'''
+ self.log = logging.getLogger('AzureRecord')
+
self.resource_group = resource_group
- self.zone_name = record.zone.name[:len(record.zone.name) - 1]
+ self.zone_name = record.zone.name[:-1]
self.relative_record_set_name = record.name or '@'
self.record_type = record._type
+ self._record = record
+ self.traffic_manager = traffic_manager
if delete:
return
# Refer to function docstring for key_name and class_name.
- format_u_s = '' if record._type == 'A' else '_'
- key_name = '{}{}records'.format(self.record_type, format_u_s).lower()
+ key_name = '{}_records'.format(self.record_type).lower()
if record._type == 'CNAME':
- key_name = key_name[:len(key_name) - 1]
+ key_name = key_name[:-1]
azure_class = self.TYPE_MAP[self.record_type]
- self.params = getattr(self, '_params_for_{}'.format(record._type))
- self.params = self.params(record.data, key_name, azure_class)
+ params_for = getattr(self, '_params_for_{}'.format(record._type))
+ self.params = params_for(record.data, key_name, azure_class)
self.params['ttl'] = record.ttl
def _params_for_A(self, data, key_name, azure_class):
+ if self._record.dynamic and self.traffic_manager:
+ return {'target_resource': self.traffic_manager}
+
try:
values = data['values']
except KeyError:
@@ -99,6 +136,9 @@ class _AzureRecord(object):
return {key_name: [azure_class(ipv4_address=v) for v in values]}
def _params_for_AAAA(self, data, key_name, azure_class):
+ if self._record.dynamic and self.traffic_manager:
+ return {'target_resource': self.traffic_manager}
+
try:
values = data['values']
except KeyError:
@@ -119,6 +159,9 @@ class _AzureRecord(object):
return {key_name: params}
def _params_for_CNAME(self, data, key_name, azure_class):
+ if self._record.dynamic and self.traffic_manager:
+ return {'target_resource': self.traffic_manager}
+
return {key_name: azure_class(cname=data['value'])}
def _params_for_MX(self, data, key_name, azure_class):
@@ -162,11 +205,19 @@ class _AzureRecord(object):
return {key_name: [azure_class(ptrdname=v) for v in values]}
def _params_for_TXT(self, data, key_name, azure_class):
+
+ params = []
try: # API for TxtRecord has list of str, even for singleton
- values = [unescape_semicolon(v) for v in data['values']]
+ values = [v for v in azure_chunked_values(data['values'])]
except KeyError:
- values = [unescape_semicolon(data['value'])]
- return {key_name: [azure_class(value=[v]) for v in values]}
+ values = [azure_chunked_value(data['value'])]
+
+ for v in values:
+ if isinstance(v, list):
+ params.append(azure_class(value=v))
+ else:
+ params.append(azure_class(value=[v]))
+ return {key_name: params}
def _equals(self, b):
'''Checks whether two records are equal by comparing all fields.
@@ -199,25 +250,6 @@ class _AzureRecord(object):
(parse_dict(self.params) == parse_dict(b.params)) & \
(self.relative_record_set_name == b.relative_record_set_name)
- def __str__(self):
- '''String representation of an _AzureRecord.
- :type return: str
- '''
- string = 'Zone: {}; '.format(self.zone_name)
- string += 'Name: {}; '.format(self.relative_record_set_name)
- string += 'Type: {}; '.format(self.record_type)
- if not hasattr(self, 'params'):
- return string
- string += 'Ttl: {}; '.format(self.params['ttl'])
- for char in self.params:
- if char != 'ttl':
- try:
- for rec in self.params[char]:
- string += 'Record: {}; '.format(rec.__dict__)
- except:
- string += 'Record: {}; '.format(self.params[char].__dict__)
- return string
-
def _check_endswith_dot(string):
return string if string.endswith('.') else string + '.'
@@ -231,7 +263,145 @@ def _parse_azure_type(string):
:type return: str
'''
- return string.split('/')[len(string.split('/')) - 1]
+ return string.split('/')[-1]
+
+
+def _root_traffic_manager_name(record):
+ # ATM names can only have letters, numbers and hyphens
+ # replace dots with double hyphens to ensure unique mapping,
+ # hoping that real life FQDNs won't have double hyphens
+ name = record.fqdn[:-1].replace('.', '--')
+ if record._type != 'CNAME':
+ name += '-{}'.format(record._type)
+ return name
+
+
+def _rule_traffic_manager_name(pool, record):
+ prefix = _root_traffic_manager_name(record)
+ return '{}-rule-{}'.format(prefix, pool)
+
+
+def _pool_traffic_manager_name(pool, record):
+ prefix = _root_traffic_manager_name(record)
+ return '{}-pool-{}'.format(prefix, pool)
+
+
+def _get_monitor(record):
+ monitor = MonitorConfig(
+ protocol=record.healthcheck_protocol,
+ port=record.healthcheck_port,
+ path=record.healthcheck_path,
+ )
+ host = record.healthcheck_host()
+ if host:
+ monitor.custom_headers = [MonitorConfigCustomHeadersItem(
+ name='Host', value=host
+ )]
+ return monitor
+
+
+def _check_valid_dynamic(record):
+ typ = record._type
+ if typ in ['A', 'AAAA']:
+ defaults = set(record.values)
+ if len(defaults) > 1:
+ pools = record.dynamic.pools
+ vals = set(
+ v['value']
+ for _, pool in pools.items()
+ for v in pool._data()['values']
+ )
+ if defaults != vals:
+ # we don't yet support multi-value defaults, specifying all
+ # pool values allows for Traffic Manager profile optimization
+ msg = ('{} {}: Values of A/AAAA dynamic records must either '
+ 'have a single value or contain all values from all '
+ 'pools')
+ raise AzureException(msg.format(record.fqdn, record._type))
+ elif typ != 'CNAME':
+ # dynamic records of unsupported type
+ msg = '{}: Dynamic records in Azure must be of type A/AAAA/CNAME'
+ raise AzureException(msg.format(record.fqdn))
+
+
+def _profile_is_match(have, desired):
+ if have is None or desired is None:
+ return False
+
+ log = logging.getLogger('azuredns._profile_is_match').debug
+
+ def false(have, desired, name=None):
+ prefix = 'profile={}'.format(name) if name else ''
+ attr = have.__class__.__name__
+ log('%s have.%s = %s', prefix, attr, have)
+ log('%s desired.%s = %s', prefix, attr, desired)
+ return False
+
+ # compare basic attributes
+ if have.name != desired.name or \
+ have.traffic_routing_method != desired.traffic_routing_method or \
+ len(have.endpoints) != len(desired.endpoints):
+ return false(have, desired)
+
+ # compare dns config
+ dns_have = have.dns_config
+ dns_desired = desired.dns_config
+ if dns_have.ttl != dns_desired.ttl or \
+ dns_have.relative_name is None or \
+ dns_desired.relative_name is None or \
+ dns_have.relative_name != dns_desired.relative_name:
+ return false(dns_have, dns_desired, have.name)
+
+ # compare monitoring configuration
+ monitor_have = have.monitor_config
+ monitor_desired = desired.monitor_config
+ if monitor_have.protocol != monitor_desired.protocol or \
+ monitor_have.port != monitor_desired.port or \
+ monitor_have.path != monitor_desired.path or \
+ monitor_have.custom_headers != monitor_desired.custom_headers:
+ return false(monitor_have, monitor_desired, have.name)
+
+ # compare endpoints
+ method = have.traffic_routing_method
+ if method == 'Priority':
+ have_endpoints = sorted(have.endpoints, key=lambda e: e.priority)
+ desired_endpoints = sorted(desired.endpoints,
+ key=lambda e: e.priority)
+ elif method == 'Weighted':
+ have_endpoints = sorted(have.endpoints, key=lambda e: e.target)
+ desired_endpoints = sorted(desired.endpoints, key=lambda e: e.target)
+ else:
+ have_endpoints = have.endpoints
+ desired_endpoints = desired.endpoints
+ endpoints = zip(have_endpoints, desired_endpoints)
+ for have_endpoint, desired_endpoint in endpoints:
+ if have_endpoint.name != desired_endpoint.name or \
+ have_endpoint.type != desired_endpoint.type:
+ return false(have_endpoint, desired_endpoint, have.name)
+ target_type = have_endpoint.type.split('/')[-1]
+ if target_type == 'externalEndpoints':
+ # compare value, weight, priority
+ if have_endpoint.target != desired_endpoint.target:
+ return false(have_endpoint, desired_endpoint, have.name)
+ if method == 'Weighted' and \
+ have_endpoint.weight != desired_endpoint.weight:
+ return false(have_endpoint, desired_endpoint, have.name)
+ elif target_type == 'nestedEndpoints':
+ # compare targets
+ if have_endpoint.target_resource_id != \
+ desired_endpoint.target_resource_id:
+ return false(have_endpoint, desired_endpoint, have.name)
+ # compare geos
+ if method == 'Geographic':
+ have_geos = sorted(have_endpoint.geo_mapping)
+ desired_geos = sorted(desired_endpoint.geo_mapping)
+ if have_geos != desired_geos:
+ return false(have_endpoint, desired_endpoint, have.name)
+ else:
+ # unexpected, give up
+ return False
+
+ return True
class AzureProvider(BaseProvider):
@@ -281,9 +451,13 @@ class AzureProvider(BaseProvider):
The first four variables above can be hidden in environment variables
and octoDNS will automatically search for them in the shell. It is
possible to also hard-code into the config file: eg, resource_group.
+
+ Please read https://github.com/octodns/octodns/pull/706 for an overview
+ of how dynamic records are designed and caveats of using them.
'''
SUPPORTS_GEO = False
- SUPPORTS_DYNAMIC = False
+ SUPPORTS_DYNAMIC = True
+ SUPPORTS_MUTLIVALUE_PTR = True
SUPPORTS = set(('A', 'AAAA', 'CAA', 'CNAME', 'MX', 'NS', 'PTR', 'SRV',
'TXT'))
@@ -294,18 +468,57 @@ class AzureProvider(BaseProvider):
'key=***, directory_id:%s', id, client_id, directory_id)
super(AzureProvider, self).__init__(id, *args, **kwargs)
- credentials = ServicePrincipalCredentials(
- client_id, secret=key, tenant=directory_id
- )
- self._dns_client = DnsManagementClient(credentials, sub_id)
+ # Store necessary initialization params
+ self._dns_client_handle = None
+ self._dns_client_client_id = client_id
+ self._dns_client_key = key
+ self._dns_client_directory_id = directory_id
+ self._dns_client_subscription_id = sub_id
+ self.__dns_client = None
+ self.__tm_client = None
+
self._resource_group = resource_group
self._azure_zones = set()
+ self._traffic_managers = dict()
+
+ @property
+ def _dns_client(self):
+ if self.__dns_client is None:
+ # Azure's logger spits out a lot of debug messages at 'INFO'
+ # level, override it by re-assigning `info` method to `debug`
+ # (ugly hack until I find a better way)
+ logger_name = 'azure.core.pipeline.policies.http_logging_policy'
+ logger = logging.getLogger(logger_name)
+ logger.info = logger.debug
+ self.__dns_client = DnsManagementClient(
+ credential=ClientSecretCredential(
+ client_id=self._dns_client_client_id,
+ client_secret=self._dns_client_key,
+ tenant_id=self._dns_client_directory_id,
+ logger=logger,
+ ),
+ subscription_id=self._dns_client_subscription_id,
+ )
+ return self.__dns_client
+
+ @property
+ def _tm_client(self):
+ if self.__tm_client is None:
+ self.__tm_client = TrafficManagerManagementClient(
+ ServicePrincipalCredentials(
+ self._dns_client_client_id,
+ secret=self._dns_client_key,
+ tenant=self._dns_client_directory_id,
+ ),
+ self._dns_client_subscription_id,
+ )
+ return self.__tm_client
def _populate_zones(self):
self.log.debug('azure_zones: loading')
list_zones = self._dns_client.zones.list_by_resource_group
for zone in list_zones(self._resource_group):
- self._azure_zones.add(zone.name)
+ self._azure_zones.add(zone.name.rstrip('.'))
def _check_zone(self, name, create=False):
'''Checks whether a zone specified in a source exist in Azure server.
@@ -320,29 +533,56 @@ class AzureProvider(BaseProvider):
:type return: str or None
'''
- self.log.debug('_check_zone: name=%s', name)
- try:
- if name in self._azure_zones:
- return name
- self._dns_client.zones.get(self._resource_group, name)
+ self.log.debug('_check_zone: name=%s create=%s', name, create)
+ # Check if the zone already exists in our set
+ if name in self._azure_zones:
+ return name
+ # If not, and its time to create, lets do it.
+ if create:
+ self.log.debug('_check_zone:no matching zone; creating %s', name)
+ create_zone = self._dns_client.zones.create_or_update
+ create_zone(self._resource_group, name, Zone(location='global'))
self._azure_zones.add(name)
return name
- except CloudError as err:
- msg = 'The Resource \'Microsoft.Network/dnszones/{}\''.format(name)
- msg += ' under resource group \'{}\''.format(self._resource_group)
- msg += ' was not found.'
- if msg == err.message:
- # Then the only error is that the zone doesn't currently exist
- if create:
- self.log.debug('_check_zone:no matching zone; creating %s',
- name)
- create_zone = self._dns_client.zones.create_or_update
- create_zone(self._resource_group, name,
- Zone(location='global'))
- return name
- else:
- return
- raise
+ else:
+ # Else return nothing (aka false)
+ return
+
+ def _populate_traffic_managers(self):
+ self.log.debug('traffic managers: loading')
+ list_profiles = self._tm_client.profiles.list_by_resource_group
+ for profile in list_profiles(self._resource_group):
+ self._traffic_managers[profile.id] = profile
+ # link nested profiles in advance for convenience
+ for _, profile in self._traffic_managers.items():
+ self._populate_nested_profiles(profile)
+
+ def _populate_nested_profiles(self, profile):
+ for ep in profile.endpoints:
+ target_id = ep.target_resource_id
+ if target_id and target_id in self._traffic_managers:
+ target = self._traffic_managers[target_id]
+ ep.target_resource = self._populate_nested_profiles(target)
+ return profile
+
+ def _get_tm_profile_by_id(self, resource_id):
+ if not self._traffic_managers:
+ self._populate_traffic_managers()
+ return self._traffic_managers.get(resource_id)
+
+ def _profile_name_to_id(self, name):
+ return '/subscriptions/' + self._dns_client_subscription_id + \
+ '/resourceGroups/' + self._resource_group + \
+ '/providers/Microsoft.Network/trafficManagerProfiles/' + \
+ name
+
+ def _get_tm_profile_by_name(self, name):
+ profile_id = self._profile_name_to_id(name)
+ return self._get_tm_profile_by_id(profile_id)
+
+ def _get_tm_for_dynamic_record(self, record):
+ name = _root_traffic_manager_name(record)
+ return self._get_tm_profile_by_name(name)
def populate(self, zone, target=False, lenient=False):
'''Required function of manager.py to collect records from zone.
@@ -373,35 +613,63 @@ class AzureProvider(BaseProvider):
exists = False
before = len(zone.records)
- zone_name = zone.name[:len(zone.name) - 1]
+ zone_name = zone.name[:-1]
self._populate_zones()
- self._check_zone(zone_name)
- _records = []
records = self._dns_client.record_sets.list_by_dns_zone
if self._check_zone(zone_name):
exists = True
for azrecord in records(self._resource_group, zone_name):
- if _parse_azure_type(azrecord.type) in self.SUPPORTS:
- _records.append(azrecord)
- for azrecord in _records:
- record_name = azrecord.name if azrecord.name != '@' else ''
typ = _parse_azure_type(azrecord.type)
- data = getattr(self, '_data_for_{}'.format(typ))
- data = data(azrecord)
- data['type'] = typ
- data['ttl'] = azrecord.ttl
- record = Record.new(zone, record_name, data, source=self)
+ if typ not in self.SUPPORTS:
+ continue
+
+ record = self._populate_record(zone, azrecord, lenient)
zone.add_record(record, lenient=lenient)
self.log.info('populate: found %s records, exists=%s',
len(zone.records) - before, exists)
return exists
+ def _populate_record(self, zone, azrecord, lenient=False):
+ record_name = azrecord.name if azrecord.name != '@' else ''
+ typ = _parse_azure_type(azrecord.type)
+
+ data_for = getattr(self, '_data_for_{}'.format(typ))
+ data = data_for(azrecord)
+ data['type'] = typ
+ data['ttl'] = azrecord.ttl
+ return Record.new(zone, record_name, data, source=self,
+ lenient=lenient)
+
def _data_for_A(self, azrecord):
- return {'values': [ar.ipv4_address for ar in azrecord.arecords]}
+ if azrecord.a_records is None:
+ if azrecord.target_resource.id:
+ return self._data_for_dynamic(azrecord)
+ else:
+ # dynamic record alias is broken, return dummy value and apply
+ # will likely overwrite/fix it
+ self.log.warn('_data_for_A: Missing Traffic Manager '
+ 'alias for dynamic A record %s, forcing '
+ 're-link by setting an invalid value',
+ azrecord.fqdn)
+ return {'values': ['255.255.255.255']}
+
+ return {'values': [ar.ipv4_address for ar in azrecord.a_records]}
def _data_for_AAAA(self, azrecord):
+ if azrecord.aaaa_records is None:
+ if azrecord.target_resource.id:
+ return self._data_for_dynamic(azrecord)
+ else:
+ # dynamic record alias is broken, return dummy value and apply
+ # will likely overwrite/fix it
+ self.log.warn('_data_for_AAAA: Missing Traffic Manager '
+ 'alias for dynamic AAAA record %s, forcing '
+ 're-link by setting an invalid value',
+ azrecord.fqdn)
+ return {'values': ['::1']}
+
return {'values': [ar.ipv6_address for ar in azrecord.aaaa_records]}
def _data_for_CAA(self, azrecord):
@@ -416,14 +684,20 @@ class AzureProvider(BaseProvider):
:type azrecord: azure.mgmt.dns.models.RecordSet
:type return: dict
-
- CNAME and PTR both use the catch block to catch possible empty
- records. Refer to population comment.
'''
- try:
- return {'value': _check_endswith_dot(azrecord.cname_record.cname)}
- except:
- return {'value': '.'}
+ if azrecord.cname_record is None:
+ if azrecord.target_resource.id:
+ return self._data_for_dynamic(azrecord)
+ else:
+ # dynamic record alias is broken, return dummy value and apply
+ # will likely overwrite/fix it
+ self.log.warn('_data_for_CNAME: Missing Traffic Manager '
+ 'alias for dynamic CNAME record %s, forcing '
+ 're-link by setting an invalid value',
+ azrecord.fqdn)
+ return {'value': 'iam.invalid.'}
+
+ return {'value': _check_endswith_dot(azrecord.cname_record.cname)}
def _data_for_MX(self, azrecord):
return {'values': [{'preference': ar.preference,
@@ -435,11 +709,8 @@ class AzureProvider(BaseProvider):
return {'values': [_check_endswith_dot(val) for val in vals]}
def _data_for_PTR(self, azrecord):
- try:
- ptrdname = azrecord.ptr_records[0].ptrdname
- return {'value': _check_endswith_dot(ptrdname)}
- except:
- return {'value': '.'}
+ vals = [ar.ptrdname for ar in azrecord.ptr_records]
+ return {'values': [_check_endswith_dot(val) for val in vals]}
def _data_for_SRV(self, azrecord):
return {'values': [{'priority': ar.priority, 'weight': ar.weight,
@@ -451,6 +722,479 @@ class AzureProvider(BaseProvider):
ar.value))
for ar in azrecord.txt_records]}
+ def _data_for_dynamic(self, azrecord):
+ default = set()
+ pools = defaultdict(lambda: {'fallback': None, 'values': []})
+ rules = []
+ typ = _parse_azure_type(azrecord.type)
+
+ # top level profile
+ root_profile = self._get_tm_profile_by_id(azrecord.target_resource.id)
+ if root_profile.traffic_routing_method != 'Geographic':
+ # This record does not use geo fencing, so we skip the Geographic
+ # profile hop; let's pretend to be a geo-profile's only endpoint
+ geo_ep = Endpoint(
+ name=root_profile.endpoints[0].name.split('--', 1)[0],
+ target_resource_id=root_profile.id
+ )
+ geo_ep.target_resource = root_profile
+ endpoints = [geo_ep]
+ else:
+ endpoints = root_profile.endpoints
+
+ for geo_ep in endpoints:
+ rule = {}
+
+ # resolve list of regions
+ geo_map = list(geo_ep.geo_mapping or [])
+ if geo_map and geo_map != ['WORLD']:
+ if 'GEO-ME' in geo_map:
+ # Azure treats Middle East as a separate group, but
+ # its part of Asia in octoDNS, so we need to remove GEO-ME
+ # if GEO-AS is also in the list
+ # Throw exception otherwise, it should not happen if the
+ # profile was generated by octoDNS
+ if 'GEO-AS' not in geo_map:
+ msg = 'Profile={} for record {}: '.format(
+ root_profile.name, azrecord.fqdn)
+ msg += 'Middle East (GEO-ME) is not supported by ' + \
+ 'octoDNS. It needs to be either paired ' + \
+ 'with Asia (GEO-AS) or expanded into ' + \
+ 'individual list of countries.'
+ raise AzureException(msg)
+ geo_map.remove('GEO-ME')
+ geos = rule.setdefault('geos', [])
+ for code in geo_map:
+ if code.startswith('GEO-'):
+ # continent
+ if code == 'GEO-AP':
+ # Azure uses Australia/Pacific (AP) instead of
+ # Oceania https://docs.microsoft.com/en-us/azure/
+ # traffic-manager/
+ # traffic-manager-geographic-regions
+ geos.append('OC')
+ else:
+ geos.append(code[len('GEO-'):])
+ elif '-' in code:
+ # state
+ country, province = code.split('-', 1)
+ country = GeoCodes.country_to_code(country)
+ geos.append('{}-{}'.format(country, province))
+ elif code == 'WORLD':
+ geos.append(code)
+ else:
+ # country
+ geos.append(GeoCodes.country_to_code(code))
+
+ # build fallback chain from second level priority profile
+ if geo_ep.target_resource_id and \
+ geo_ep.target_resource.traffic_routing_method == 'Priority':
+ rule_endpoints = geo_ep.target_resource.endpoints
+ rule_endpoints.sort(key=lambda e: e.priority)
+ else:
+ # this geo directly points to a pool containing the default
+ # so we skip the Priority profile hop and directly use an
+ # external endpoint or Weighted profile
+ # let's pretend to be a Priority profile's only endpoint
+ rule_endpoints = [geo_ep]
+
+ pool = None
+ for rule_ep in rule_endpoints:
+ pool_name = rule_ep.name
+
+ # last/default pool
+ if pool_name.endswith('--default--'):
+ default.add(rule_ep.target)
+ if pool_name == '--default--':
+ # this should be the last one, so let's break here
+ break
+ # last pool is a single value pool and its value is same
+ # as record's default value
+ pool_name = pool_name[:-len('--default--')]
+
+ # set first priority endpoint as the rule's primary pool
+ if 'pool' not in rule:
+ rule['pool'] = pool_name
+
+ if pool:
+ # set current pool as fallback of the previous pool
+ pool['fallback'] = pool_name
+
+ if pool_name in pools:
+ # we've already populated this and subsequent pools
+ break
+
+ # populate the pool from Weighted profile
+ # these should be leaf node entries with no further nesting
+ pool = pools[pool_name]
+ endpoints = []
+
+ if rule_ep.target_resource_id:
+ # third (and last) level weighted RR profile
+ endpoints = rule_ep.target_resource.endpoints
+ else:
+ # single-value pool, so we skip the Weighted profile hop
+ # and directly use an external endpoint; let's pretend to
+ # be a Weighted profile's only endpoint
+ endpoints = [rule_ep]
+
+ for pool_ep in endpoints:
+ val = pool_ep.target
+ if typ == 'CNAME':
+ val = _check_endswith_dot(val)
+ pool['values'].append({
+ 'value': val,
+ 'weight': pool_ep.weight or 1,
+ })
+ if pool_ep.name.endswith('--default--'):
+ default.add(val)
+
+ rules.append(rule)
+
+ # add separate rule for re-used world pool
+ for rule in list(rules):
+ geos = rule.get('geos', [])
+ if len(geos) > 1 and 'WORLD' in geos:
+ geos.remove('WORLD')
+ rules.append({'pool': rule['pool']})
+
+ # Order and convert to a list
+ default = sorted(default)
+
+ data = {
+ 'dynamic': {
+ 'pools': pools,
+ 'rules': rules,
+ },
+ }
+
+ if typ == 'CNAME':
+ data['value'] = _check_endswith_dot(default[0])
+ else:
+ data['values'] = default
+
+ return data
+
+ def _extra_changes(self, existing, desired, changes):
+ changed = set(c.record for c in changes)
+
+ log = self.log.info
+ seen_profiles = {}
+ extra = []
+ for record in desired.records:
+ if not getattr(record, 'dynamic', False):
+ # Already changed, or not dynamic, no need to check it
+ continue
+
+ # Abort if there are unsupported dynamic record configurations
+ _check_valid_dynamic(record)
+
+ # let's walk through and show what will be changed even if
+ # the record is already in list of changes
+ added = (record in changed)
+
+ active = set()
+ profiles = self._generate_traffic_managers(record)
+
+ for profile in profiles:
+ name = profile.name
+
+ endpoints = set()
+ for ep in profile.endpoints:
+ if not ep.target:
+ continue
+ if ep.target in endpoints:
+ msg = '{} contains duplicate endpoint {}'
+ raise AzureException(msg.format(name, ep.target))
+ endpoints.add(ep.target)
+
+ if name in seen_profiles:
+ # exit if a possible collision is detected, even though
+ # we've tried to ensure unique mapping
+ msg = 'Collision in Traffic Manager names detected'
+ msg = '{}: {} and {} both want to use {}'.format(
+ msg, seen_profiles[name], record.fqdn, name)
+ raise AzureException(msg)
+ else:
+ seen_profiles[name] = record.fqdn
+
+ active.add(name)
+ existing_profile = self._get_tm_profile_by_name(name)
+ if not _profile_is_match(existing_profile, profile):
+ log('_extra_changes: Profile name=%s will be synced',
+ name)
+ if not added:
+ extra.append(Update(record, record))
+ added = True
+
+ existing_profiles = self._find_traffic_managers(record)
+ for name in existing_profiles - active:
+ log('_extra_changes: Profile name=%s will be destroyed', name)
+ if not added:
+ extra.append(Update(record, record))
+ added = True
+
+ return extra
+
+ def _generate_tm_profile(self, routing, endpoints, record, label=None):
+ # figure out profile name and Traffic Manager FQDN
+ name = _root_traffic_manager_name(record)
+ if routing == 'Weighted' and label:
+ name = _pool_traffic_manager_name(label, record)
+ elif routing == 'Priority' and label:
+ name = _rule_traffic_manager_name(label, record)
+
+ # set appropriate endpoint types
+ endpoint_type_prefix = 'Microsoft.Network/trafficManagerProfiles/'
+ for ep in endpoints:
+ if ep.target_resource_id:
+ ep.type = endpoint_type_prefix + 'nestedEndpoints'
+ elif ep.target:
+ ep.type = endpoint_type_prefix + 'externalEndpoints'
+ else:
+ msg = ('Invalid endpoint {} in profile {}, needs to have ' +
+ 'either target or target_resource_id').format(
+ ep.name, name)
+ raise AzureException(msg)
+
+ # build and return
+ return Profile(
+ id=self._profile_name_to_id(name),
+ name=name,
+ traffic_routing_method=routing,
+ dns_config=DnsConfig(
+ relative_name=name.lower(),
+ ttl=record.ttl,
+ ),
+ monitor_config=_get_monitor(record),
+ endpoints=endpoints,
+ location='global',
+ )
+
+ def _convert_tm_to_root(self, profile, record):
+ profile.name = _root_traffic_manager_name(record)
+ profile.id = self._profile_name_to_id(profile.name)
+ profile.dns_config.relative_name = profile.name.lower()
+
+ return profile
+
+ def _generate_traffic_managers(self, record):
+ traffic_managers = []
+ pools = record.dynamic.pools
+ rules = record.dynamic.rules
+ typ = record._type
+
+ if typ == 'CNAME':
+ defaults = [record.value[:-1]]
+ else:
+ defaults = record.values
+ profile = self._generate_tm_profile
+
+ # a pool can be re-used only with a world pool, record the pool
+ # to later consolidate it with a geo pool if one exists since we
+ # can't have multiple endpoints with the same target in ATM
+ world_pool = None
+ for rule in rules:
+ if not rule.data.get('geos', []):
+ world_pool = rule.data['pool']
+ world_seen = False
+
+ geo_endpoints = []
+ pool_profiles = {}
+
+ for rule in record.dynamic.rules:
+ pool_name = rule.data['pool']
+ if pool_name == world_pool and world_seen:
+ # this world pool is already mentioned in another geo rule
+ continue
+
+ # Prepare the list of Traffic manager geos
+ rule_geos = rule.data.get('geos', [])
+ geos = []
+ for geo in rule_geos:
+ if '-' in geo:
+ # country/state
+ geos.append(geo.split('-', 1)[-1])
+ else:
+ # continent
+ if geo == 'AS':
+ # Middle East is part of Asia in octoDNS, but
+ # Azure treats it as a separate "group", so let's
+ # add it in the list of geo mappings. We will drop
+ # it when we later parse the list of regions.
+ geos.append('GEO-ME')
+ elif geo == 'OC':
+ # Azure uses Australia/Pacific (AP) instead of
+ # Oceania
+ geo = 'AP'
+
+ geos.append('GEO-{}'.format(geo))
+ if not geos or pool_name == world_pool:
+ geos.append('WORLD')
+ world_seen = True
+
+ rule_endpoints = []
+ priority = 1
+ default_seen = False
+
+ while pool_name:
+ # iterate until we reach end of fallback chain
+ pool = pools[pool_name].data
+ if len(pool['values']) > 1:
+ # create Weighted profile for multi-value pool
+ pool_profile = pool_profiles.get(pool_name)
+ if pool_profile is None:
+ endpoints = []
+ for val in pool['values']:
+ target = val['value']
+ # strip trailing dot from CNAME value
+ if typ == 'CNAME':
+ target = target[:-1]
+ ep_name = '{}--{}'.format(pool_name, target)
+ # Endpoint names cannot have colons, drop them
+ # from IPv6 addresses
+ ep_name = ep_name.replace(':', '-')
+ if target in defaults:
+ # mark default
+ ep_name += '--default--'
+ default_seen = True
+ endpoints.append(Endpoint(
+ name=ep_name,
+ target=target,
+ weight=val.get('weight', 1),
+ ))
+ pool_profile = profile(
+ 'Weighted', endpoints, record, pool_name)
+ traffic_managers.append(pool_profile)
+ pool_profiles[pool_name] = pool_profile
+
+ # append pool to endpoint list of fallback rule profile
+ rule_endpoints.append(Endpoint(
+ name=pool_name,
+ target_resource_id=pool_profile.id,
+ priority=priority,
+ ))
+ else:
+ # Skip Weighted profile hop for single-value pool
+ # append its value as an external endpoint to fallback
+ # rule profile
+ target = pool['values'][0]['value']
+ if typ == 'CNAME':
+ target = target[:-1]
+ ep_name = pool_name
+ if target in defaults:
+ # mark default
+ ep_name += '--default--'
+ default_seen = True
+ rule_endpoints.append(Endpoint(
+ name=ep_name,
+ target=target,
+ priority=priority,
+ ))
+
+ priority += 1
+ pool_name = pool.get('fallback')
+
+ # append default endpoint unless it is already included in
+ # last pool of rule profile
+ if not default_seen:
+ rule_endpoints.append(Endpoint(
+ name='--default--',
+ target=defaults[0],
+ priority=priority,
+ ))
+
+ if len(rule_endpoints) > 1:
+ # create rule profile with fallback chain
+ rule_profile = profile(
+ 'Priority', rule_endpoints, record, rule.data['pool'])
+ traffic_managers.append(rule_profile)
+
+ # append rule profile to top-level geo profile
+ geo_endpoints.append(Endpoint(
+ name=rule.data['pool'],
+ target_resource_id=rule_profile.id,
+ geo_mapping=geos,
+ ))
+ else:
+ # Priority profile has only one endpoint; skip the hop and
+ # append its only endpoint to the top-level profile
+ rule_ep = rule_endpoints[0]
+ if rule_ep.target_resource_id:
+ # point directly to the Weighted pool profile
+ geo_endpoints.append(Endpoint(
+ name=rule_ep.name,
+ target_resource_id=rule_ep.target_resource_id,
+ geo_mapping=geos,
+ ))
+ else:
+ # just add the value of single-value pool
+ geo_endpoints.append(Endpoint(
+ name=rule_ep.name,
+ target=rule_ep.target,
+ geo_mapping=geos,
+ ))
+
+ if len(geo_endpoints) == 1 and \
+ geo_endpoints[0].geo_mapping == ['WORLD'] and \
+ geo_endpoints[0].target_resource_id:
+ # Single WORLD rule does not require a Geographic profile, use
+ # the target profile as the root profile
+ self._convert_tm_to_root(traffic_managers[-1], record)
+ else:
+ geo_profile = profile('Geographic', geo_endpoints, record)
+ traffic_managers.append(geo_profile)
+
+ return traffic_managers
+
+ def _sync_traffic_managers(self, desired_profiles):
+ seen = set()
+
+ tm_sync = self._tm_client.profiles.create_or_update
+ populate = self._populate_nested_profiles
+
+ for desired in desired_profiles:
+ name = desired.name
+ if name in seen:
+ continue
+
+ existing = self._get_tm_profile_by_name(name)
+ if not _profile_is_match(existing, desired):
+ self.log.info(
+ '_sync_traffic_managers: Syncing profile=%s', name)
+ profile = tm_sync(self._resource_group, name, desired)
+ self._traffic_managers[profile.id] = populate(profile)
+ else:
+ self.log.debug(
+ '_sync_traffic_managers: Skipping profile=%s: up to date',
+ name)
+ seen.add(name)
+
+ return seen
+
+ def _find_traffic_managers(self, record):
+ tm_prefix = _root_traffic_manager_name(record)
+
+ profiles = set()
+ for profile_id in self._traffic_managers:
+ # match existing profiles with record's prefix
+ name = profile_id.split('/')[-1]
+ if name == tm_prefix or \
+ name.startswith('{}-pool-'.format(tm_prefix)) or \
+ name.startswith('{}-rule-'.format(tm_prefix)):
+ profiles.add(name)
+
+ return profiles
+
+ def _traffic_managers_gc(self, record, active_profiles):
+ existing_profiles = self._find_traffic_managers(record)
+
+ # delete unused profiles
+ for profile_name in existing_profiles - active_profiles:
+ self.log.info('_traffic_managers_gc: Deleting profile=%s',
+ profile_name)
+ self._tm_client.profiles.delete(self._resource_group, profile_name)
+
def _apply_Create(self, change):
'''A record from change must be created.
@@ -459,7 +1203,25 @@ class AzureProvider(BaseProvider):
:type return: void
'''
- ar = _AzureRecord(self._resource_group, change.new)
+ record = change.new
+
+ dynamic = getattr(record, 'dynamic', False)
+ root_profile = None
+ endpoints = []
+ if dynamic:
+ profiles = self._generate_traffic_managers(record)
+ root_profile = profiles[-1]
+ if record._type in ['A', 'AAAA'] and len(profiles) > 1:
+ # A/AAAA records cannot be aliased to Traffic Managers that
+ # contain other nested Traffic Managers. To work around this
+ # limitation, we remove nesting before adding the record, and
+ # then add the nested endpoints later.
+ endpoints = root_profile.endpoints
+ root_profile.endpoints = []
+ self._sync_traffic_managers(profiles)
+
+ ar = _AzureRecord(self._resource_group, record,
+ traffic_manager=root_profile)
create = self._dns_client.record_sets.create_or_update
create(resource_group_name=ar.resource_group,
@@ -468,18 +1230,97 @@ class AzureProvider(BaseProvider):
record_type=ar.record_type,
parameters=ar.params)
- self.log.debug('* Success Create/Update: {}'.format(ar))
+ if endpoints:
+ # add nested endpoints for A/AAAA dynamic record limitation after
+ # record creation
+ root_profile.endpoints = endpoints
+ self._sync_traffic_managers([root_profile])
+
+ self.log.debug('* Success Create: {}'.format(record))
+
+ def _apply_Update(self, change):
+ '''A record from change must be created.
- _apply_Update = _apply_Create
+ :param change: a change object
+ :type change: octodns.record.Change
+
+ :type return: void
+ '''
+ existing = change.existing
+ new = change.new
+ existing_is_dynamic = getattr(existing, 'dynamic', False)
+ new_is_dynamic = getattr(new, 'dynamic', False)
+
+ update_record = True
+
+ if new_is_dynamic:
+ endpoints = []
+ profiles = self._generate_traffic_managers(new)
+ root_profile = profiles[-1]
+
+ if new._type in ['A', 'AAAA']:
+ if existing_is_dynamic:
+ # update to the record is not needed
+ update_record = False
+ elif len(profiles) > 1:
+ # record needs to aliased; remove nested endpoints, we
+ # will add them at the end
+ endpoints = root_profile.endpoints
+ root_profile.endpoints = []
+ elif existing.ttl == new.ttl and existing_is_dynamic:
+ # CNAME dynamic records only have TTL in them, everything else
+ # goes inside the aliased traffic managers; skip update if TTL
+ # is unchanged and existing record is already aliased to its
+ # traffic manager
+ update_record = False
+
+ active = self._sync_traffic_managers(profiles)
+
+ if update_record:
+ profile = self._get_tm_for_dynamic_record(new)
+ ar = _AzureRecord(self._resource_group, new,
+ traffic_manager=profile)
+ update = self._dns_client.record_sets.create_or_update
+
+ update(resource_group_name=ar.resource_group,
+ zone_name=ar.zone_name,
+ relative_record_set_name=ar.relative_record_set_name,
+ record_type=ar.record_type,
+ parameters=ar.params)
+
+ if new_is_dynamic:
+ # add any pending nested endpoints
+ if endpoints:
+ root_profile.endpoints = endpoints
+ self._sync_traffic_managers([root_profile])
+ # let's cleanup unused traffic managers
+ self._traffic_managers_gc(new, active)
+ elif existing_is_dynamic:
+ # cleanup traffic managers when a dynamic record gets
+ # changed to a simple record
+ self._traffic_managers_gc(existing, set())
+
+ self.log.debug('* Success Update: {}'.format(new))
def _apply_Delete(self, change):
- ar = _AzureRecord(self._resource_group, change.existing, delete=True)
+ '''A record from change must be deleted.
+
+ :param change: a change object
+ :type change: octodns.record.Change
+
+ :type return: void
+ '''
+ record = change.record
+ ar = _AzureRecord(self._resource_group, record, delete=True)
delete = self._dns_client.record_sets.delete
delete(self._resource_group, ar.zone_name, ar.relative_record_set_name,
ar.record_type)
- self.log.debug('* Success Delete: {}'.format(ar))
+ if getattr(record, 'dynamic', False):
+ self._traffic_managers_gc(record, set())
+
+ self.log.debug('* Success Delete: {}'.format(record))
def _apply(self, plan):
'''Required function of manager.py to actually apply a record change.
@@ -497,6 +1338,19 @@ class AzureProvider(BaseProvider):
azure_zone_name = desired.name[:len(desired.name) - 1]
self._check_zone(azure_zone_name, create=True)
+ '''
+ Force the operation order to be Delete() before all other operations.
+ Helps avoid problems in updating
+ - a CNAME record into an A record.
+ - an A record into a CNAME record.
+ '''
+
+ for change in changes:
+ class_name = change.__class__.__name__
+ if class_name == 'Delete':
+ self._apply_Delete(change)
+
for change in changes:
class_name = change.__class__.__name__
- getattr(self, '_apply_{}'.format(class_name))(change)
+ if class_name != 'Delete':
+ getattr(self, '_apply_{}'.format(class_name))(change)
diff --git a/octodns/provider/base.py b/octodns/provider/base.py
index ae87844..b636d65 100644
--- a/octodns/provider/base.py
+++ b/octodns/provider/base.py
@@ -10,13 +10,15 @@ from six import text_type
from ..source.base import BaseSource
from ..zone import Zone
from .plan import Plan
+from . import SupportsException
class BaseProvider(BaseSource):
def __init__(self, id, apply_disabled=False,
update_pcent_threshold=Plan.MAX_SAFE_UPDATE_PCENT,
- delete_pcent_threshold=Plan.MAX_SAFE_DELETE_PCENT):
+ delete_pcent_threshold=Plan.MAX_SAFE_DELETE_PCENT,
+ strict_supports=False):
super(BaseProvider, self).__init__(id)
self.log.debug('__init__: id=%s, apply_disabled=%s, '
'update_pcent_threshold=%.2f, '
@@ -28,6 +30,43 @@ class BaseProvider(BaseSource):
self.apply_disabled = apply_disabled
self.update_pcent_threshold = update_pcent_threshold
self.delete_pcent_threshold = delete_pcent_threshold
+ self.strict_supports = strict_supports
+
+ def _process_desired_zone(self, desired):
+ '''
+ An opportunity for providers to modify the desired zone records before
+ planning. `desired` is a "shallow" copy, see `Zone.copy` for more
+ information
+
+ - Must call `super` at an appropriate point for their work, generally
+ that means as the final step of the method, returning the result of
+ the `super` call.
+ - May modify `desired` directly.
+ - Must not modify records directly, `record.copy` should be called,
+ the results of which can be modified, and then `Zone.add_record` may
+ be used with `replace=True`.
+ - May call `Zone.remove_record` to remove records from `desired`.
+ - Must call supports_warn_or_except with information about any changes
+ that are made to have them logged or throw errors depending on the
+ provider configuration.
+ '''
+ if self.SUPPORTS_MUTLIVALUE_PTR:
+ # nothing do here
+ return desired
+
+ for record in desired.records:
+ if record._type == 'PTR' and len(record.values) > 1:
+ # replace with a single-value copy
+ msg = 'multi-value PTR records not supported for {}' \
+ .format(record.fqdn)
+ fallback = 'falling back to single value, {}' \
+ .format(record.value)
+ self.supports_warn_or_except(msg, fallback)
+ record = record.copy()
+ record.values = [record.value]
+ desired.add_record(record, replace=True)
+
+ return desired
def _include_change(self, change):
'''
@@ -44,9 +83,21 @@ class BaseProvider(BaseSource):
'''
return []
- def plan(self, desired):
+ def supports_warn_or_except(self, msg, fallback):
+ if self.strict_supports:
+ raise SupportsException('{}: {}'.format(self.id, msg))
+ self.log.warning('{}; {}'.format(msg, fallback))
+
+ def plan(self, desired, processors=[]):
self.log.info('plan: desired=%s', desired.name)
+ # Make a (shallow) copy of the desired state so that everything from
+ # now on (in this target) can modify it as they see fit without
+ # worrying about impacting other targets.
+ desired = desired.copy()
+
+ desired = self._process_desired_zone(desired)
+
existing = Zone(desired.name, desired.sub_zones)
exists = self.populate(existing, target=True, lenient=True)
if exists is None:
@@ -55,6 +106,9 @@ class BaseProvider(BaseSource):
self.log.warn('Provider %s used in target mode did not return '
'exists', self.id)
+ for processor in processors:
+ existing = processor.process_target_zone(existing, target=self)
+
# compute the changes at the zone/record level
changes = existing.changes(desired, self)
@@ -91,7 +145,10 @@ class BaseProvider(BaseSource):
self.log.info('apply: disabled')
return 0
- self.log.info('apply: making changes')
+ zone_name = plan.desired.name
+ num_changes = len(plan.changes)
+ self.log.info('apply: making %d changes to %s', num_changes,
+ zone_name)
self._apply(plan)
return len(plan.changes)
diff --git a/octodns/provider/cloudflare.py b/octodns/provider/cloudflare.py
index db937e5..a774acc 100644
--- a/octodns/provider/cloudflare.py
+++ b/octodns/provider/cloudflare.py
@@ -10,12 +10,14 @@ from copy import deepcopy
from logging import getLogger
from requests import Session
from time import sleep
+from urllib.parse import urlsplit
from ..record import Record, Update
+from . import ProviderException
from .base import BaseProvider
-class CloudflareError(Exception):
+class CloudflareError(ProviderException):
def __init__(self, data):
try:
message = data['errors'][0]['message']
@@ -75,8 +77,8 @@ class CloudflareProvider(BaseProvider):
'''
SUPPORTS_GEO = False
SUPPORTS_DYNAMIC = False
- SUPPORTS = set(('ALIAS', 'A', 'AAAA', 'CAA', 'CNAME', 'MX', 'NS', 'PTR',
- 'SRV', 'SPF', 'TXT'))
+ SUPPORTS = set(('ALIAS', 'A', 'AAAA', 'CAA', 'CNAME', 'LOC', 'MX', 'NS',
+ 'PTR', 'SRV', 'SPF', 'TXT', 'URLFWD'))
MIN_TTL = 120
TIMEOUT = 15
@@ -133,6 +135,7 @@ class CloudflareProvider(BaseProvider):
timeout=self.TIMEOUT)
self.log.debug('_request: status=%d', resp.status_code)
if resp.status_code == 400:
+ self.log.debug('_request: data=%s', data)
raise CloudflareError(resp.json())
if resp.status_code == 403:
raise CloudflareAuthenticationError(resp.json())
@@ -142,6 +145,11 @@ class CloudflareProvider(BaseProvider):
resp.raise_for_status()
return resp.json()
+ def _change_keyer(self, change):
+ key = change.__class__.__name__
+ order = {'Delete': 0, 'Create': 1, 'Update': 2}
+ return order[key]
+
@property
def zones(self):
if self._zones is None:
@@ -164,6 +172,9 @@ class CloudflareProvider(BaseProvider):
return self._zones
+ def _ttl_data(self, ttl):
+ return 300 if ttl == 1 else ttl
+
def _data_for_cdn(self, name, _type, records):
self.log.info('CDN rewrite for %s', records[0]['name'])
_type = "CNAME"
@@ -171,14 +182,14 @@ class CloudflareProvider(BaseProvider):
_type = "ALIAS"
return {
- 'ttl': records[0]['ttl'],
+ 'ttl': self._ttl_data(records[0]['ttl']),
'type': _type,
'value': '{}.cdn.cloudflare.net.'.format(records[0]['name']),
}
def _data_for_multiple(self, _type, records):
return {
- 'ttl': records[0]['ttl'],
+ 'ttl': self._ttl_data(records[0]['ttl']),
'type': _type,
'values': [r['content'] for r in records],
}
@@ -189,7 +200,7 @@ class CloudflareProvider(BaseProvider):
def _data_for_TXT(self, _type, records):
return {
- 'ttl': records[0]['ttl'],
+ 'ttl': self._ttl_data(records[0]['ttl']),
'type': _type,
'values': [r['content'].replace(';', '\\;') for r in records],
}
@@ -200,7 +211,7 @@ class CloudflareProvider(BaseProvider):
data = r['data']
values.append(data)
return {
- 'ttl': records[0]['ttl'],
+ 'ttl': self._ttl_data(records[0]['ttl']),
'type': _type,
'values': values,
}
@@ -208,7 +219,7 @@ class CloudflareProvider(BaseProvider):
def _data_for_CNAME(self, _type, records):
only = records[0]
return {
- 'ttl': only['ttl'],
+ 'ttl': self._ttl_data(only['ttl']),
'type': _type,
'value': '{}.'.format(only['content'])
}
@@ -216,6 +227,30 @@ class CloudflareProvider(BaseProvider):
_data_for_ALIAS = _data_for_CNAME
_data_for_PTR = _data_for_CNAME
+ def _data_for_LOC(self, _type, records):
+ values = []
+ for record in records:
+ r = record['data']
+ values.append({
+ 'lat_degrees': int(r['lat_degrees']),
+ 'lat_minutes': int(r['lat_minutes']),
+ 'lat_seconds': float(r['lat_seconds']),
+ 'lat_direction': r['lat_direction'],
+ 'long_degrees': int(r['long_degrees']),
+ 'long_minutes': int(r['long_minutes']),
+ 'long_seconds': float(r['long_seconds']),
+ 'long_direction': r['long_direction'],
+ 'altitude': float(r['altitude']),
+ 'size': float(r['size']),
+ 'precision_horz': float(r['precision_horz']),
+ 'precision_vert': float(r['precision_vert']),
+ })
+ return {
+ 'ttl': self._ttl_data(records[0]['ttl']),
+ 'type': _type,
+ 'values': values
+ }
+
def _data_for_MX(self, _type, records):
values = []
for r in records:
@@ -224,14 +259,14 @@ class CloudflareProvider(BaseProvider):
'exchange': '{}.'.format(r['content']),
})
return {
- 'ttl': records[0]['ttl'],
+ 'ttl': self._ttl_data(records[0]['ttl']),
'type': _type,
'values': values,
}
def _data_for_NS(self, _type, records):
return {
- 'ttl': records[0]['ttl'],
+ 'ttl': self._ttl_data(records[0]['ttl']),
'type': _type,
'values': ['{}.'.format(r['content']) for r in records],
}
@@ -239,15 +274,33 @@ class CloudflareProvider(BaseProvider):
def _data_for_SRV(self, _type, records):
values = []
for r in records:
+ target = ('{}.'.format(r['data']['target'])
+ if r['data']['target'] != "." else ".")
values.append({
'priority': r['data']['priority'],
'weight': r['data']['weight'],
'port': r['data']['port'],
- 'target': '{}.'.format(r['data']['target']),
+ 'target': target,
})
return {
'type': _type,
- 'ttl': records[0]['ttl'],
+ 'ttl': self._ttl_data(records[0]['ttl']),
+ 'values': values
+ }
+
+ def _data_for_URLFWD(self, _type, records):
+ values = []
+ for r in records:
+ values.append({
+ 'path': r['path'],
+ 'target': r['url'],
+ 'code': r['status_code'],
+ 'masking': 2,
+ 'query': 0,
+ })
+ return {
+ 'type': _type,
+ 'ttl': 300, # ttl does not exist for this type, forcing a setting
'values': values
}
@@ -270,6 +323,13 @@ class CloudflareProvider(BaseProvider):
else:
page = None
+ path = '/zones/{}/pagerules'.format(zone_id)
+ resp = self._try_request('GET', path, params={'status': 'active'})
+ for r in resp['result']:
+ # assumption, base on API guide, will only contain 1 action
+ if r['actions'][0]['id'] == 'forwarding_url':
+ records += [r]
+
self._zone_records[zone.name] = records
return self._zone_records[zone.name]
@@ -306,10 +366,29 @@ class CloudflareProvider(BaseProvider):
exists = True
values = defaultdict(lambda: defaultdict(list))
for record in records:
- name = zone.hostname_from_fqdn(record['name'])
- _type = record['type']
- if _type in self.SUPPORTS:
- values[name][record['type']].append(record)
+ if 'targets' in record:
+ # assumption, targets will always contain 1 target
+ # API documentation only indicates 'url' as the only target
+ # if record['targets'][0]['target'] == 'url':
+ uri = record['targets'][0]['constraint']['value']
+ uri = '//' + uri if not uri.startswith('http') else uri
+ parsed_uri = urlsplit(uri)
+ name = zone.hostname_from_fqdn(parsed_uri.netloc)
+ path = parsed_uri.path
+ _type = 'URLFWD'
+ # assumption, actions will always contain 1 action
+ _values = record['actions'][0]['value']
+ _values['path'] = path
+ # no ttl set by pagerule, creating one
+ _values['ttl'] = 300
+ values[name][_type].append(_values)
+ # the dns_records branch
+ # elif 'name' in record:
+ else:
+ name = zone.hostname_from_fqdn(record['name'])
+ _type = record['type']
+ if _type in self.SUPPORTS:
+ values[name][record['type']].append(record)
for name, types in values.items():
for _type, records in types.items():
@@ -341,6 +420,11 @@ class CloudflareProvider(BaseProvider):
existing.update({
'ttl': new['ttl']
})
+ elif change.new._type == 'URLFWD':
+ existing = deepcopy(change.existing.data)
+ existing.update({
+ 'ttl': new['ttl']
+ })
else:
existing = change.existing.data
@@ -384,6 +468,25 @@ class CloudflareProvider(BaseProvider):
_contents_for_PTR = _contents_for_CNAME
+ def _contents_for_LOC(self, record):
+ for value in record.values:
+ yield {
+ 'data': {
+ 'lat_degrees': value.lat_degrees,
+ 'lat_minutes': value.lat_minutes,
+ 'lat_seconds': value.lat_seconds,
+ 'lat_direction': value.lat_direction,
+ 'long_degrees': value.long_degrees,
+ 'long_minutes': value.long_minutes,
+ 'long_seconds': value.long_seconds,
+ 'long_direction': value.long_direction,
+ 'altitude': value.altitude,
+ 'size': value.size,
+ 'precision_horz': value.precision_horz,
+ 'precision_vert': value.precision_vert,
+ }
+ }
+
def _contents_for_MX(self, record):
for value in record.values:
yield {
@@ -405,6 +508,8 @@ class CloudflareProvider(BaseProvider):
name = subdomain
for value in record.values:
+ target = value.target[:-1] if value.target != "." else "."
+
yield {
'data': {
'service': service,
@@ -413,10 +518,35 @@ class CloudflareProvider(BaseProvider):
'priority': value.priority,
'weight': value.weight,
'port': value.port,
- 'target': value.target[:-1],
+ 'target': target,
}
}
+ def _contents_for_URLFWD(self, record):
+ name = record.fqdn[:-1]
+ for value in record.values:
+ yield {
+ 'targets': [
+ {
+ 'target': 'url',
+ 'constraint': {
+ 'operator': 'matches',
+ 'value': name + value.path
+ }
+ }
+ ],
+ 'actions': [
+ {
+ 'id': 'forwarding_url',
+ 'value': {
+ 'url': value.target,
+ 'status_code': value.code,
+ }
+ }
+ ],
+ 'status': 'active',
+ }
+
def _record_is_proxied(self, record):
return (
not self.cdn and
@@ -432,20 +562,25 @@ class CloudflareProvider(BaseProvider):
if _type == 'ALIAS':
_type = 'CNAME'
- contents_for = getattr(self, '_contents_for_{}'.format(_type))
- for content in contents_for(record):
- content.update({
- 'name': name,
- 'type': _type,
- 'ttl': ttl,
- })
-
- if _type in _PROXIABLE_RECORD_TYPES:
+ if _type == 'URLFWD':
+ contents_for = getattr(self, '_contents_for_{}'.format(_type))
+ for content in contents_for(record):
+ yield content
+ else:
+ contents_for = getattr(self, '_contents_for_{}'.format(_type))
+ for content in contents_for(record):
content.update({
- 'proxied': self._record_is_proxied(record)
+ 'name': name,
+ 'type': _type,
+ 'ttl': ttl,
})
- yield content
+ if _type in _PROXIABLE_RECORD_TYPES:
+ content.update({
+ 'proxied': self._record_is_proxied(record)
+ })
+
+ yield content
def _gen_key(self, data):
# Note that most CF record data has a `content` field the value of
@@ -456,10 +591,11 @@ class CloudflareProvider(BaseProvider):
# new records cleanly. In general when there are multiple records for a
# name & type each will have a distinct/consistent `content` that can
# serve as a unique identifier.
- # BUT... there are exceptions. MX, CAA, and SRV don't have a simple
+ # BUT... there are exceptions. MX, CAA, LOC and SRV don't have a simple
# content as things are currently implemented so we need to handle
# those explicitly and create unique/hashable strings for them.
- _type = data['type']
+ # AND... for URLFWD/Redirects additional adventures are created.
+ _type = data.get('type', 'URLFWD')
if _type == 'MX':
return '{priority} {content}'.format(**data)
elif _type == 'CAA':
@@ -468,12 +604,39 @@ class CloudflareProvider(BaseProvider):
elif _type == 'SRV':
data = data['data']
return '{port} {priority} {target} {weight}'.format(**data)
+ elif _type == 'LOC':
+ data = data['data']
+ loc = (
+ '{lat_degrees}',
+ '{lat_minutes}',
+ '{lat_seconds}',
+ '{lat_direction}',
+ '{long_degrees}',
+ '{long_minutes}',
+ '{long_seconds}',
+ '{long_direction}',
+ '{altitude}',
+ '{size}',
+ '{precision_horz}',
+ '{precision_vert}')
+ return ' '.join(loc).format(**data)
+ elif _type == 'URLFWD':
+ uri = data['targets'][0]['constraint']['value']
+ uri = '//' + uri if not uri.startswith('http') else uri
+ parsed_uri = urlsplit(uri)
+ return '{name} {path} {url} {status_code}' \
+ .format(name=parsed_uri.netloc,
+ path=parsed_uri.path,
+ **data['actions'][0]['value'])
return data['content']
def _apply_Create(self, change):
new = change.new
zone_id = self.zones[new.zone.name]
- path = '/zones/{}/dns_records'.format(zone_id)
+ if new._type == 'URLFWD':
+ path = '/zones/{}/pagerules'.format(zone_id)
+ else:
+ path = '/zones/{}/dns_records'.format(zone_id)
for content in self._gen_data(new):
self._try_request('POST', path, data=content)
@@ -486,14 +649,27 @@ class CloudflareProvider(BaseProvider):
existing = {}
# Find all of the existing CF records for this name & type
for record in self.zone_records(zone):
- name = zone.hostname_from_fqdn(record['name'])
+ if 'targets' in record:
+ uri = record['targets'][0]['constraint']['value']
+ uri = '//' + uri if not uri.startswith('http') else uri
+ parsed_uri = urlsplit(uri)
+ name = zone.hostname_from_fqdn(parsed_uri.netloc)
+ path = parsed_uri.path
+ # assumption, actions will always contain 1 action
+ _values = record['actions'][0]['value']
+ _values['path'] = path
+ _values['ttl'] = 300
+ _values['type'] = 'URLFWD'
+ record.update(_values)
+ else:
+ name = zone.hostname_from_fqdn(record['name'])
# Use the _record_for so that we include all of standard
# conversion logic
r = self._record_for(zone, name, record['type'], [record], True)
if hostname == r.name and _type == r._type:
- # Round trip the single value through a record to contents flow
- # to get a consistent _gen_data result that matches what
- # went in to new_contents
+ # Round trip the single value through a record to contents
+ # flow to get a consistent _gen_data result that matches
+ # what went in to new_contents
data = next(self._gen_data(r))
# Record the record_id and data for this existing record
@@ -561,7 +737,10 @@ class CloudflareProvider(BaseProvider):
# otherwise required, just makes things deterministic
# Creates
- path = '/zones/{}/dns_records'.format(zone_id)
+ if _type == 'URLFWD':
+ path = '/zones/{}/pagerules'.format(zone_id)
+ else:
+ path = '/zones/{}/dns_records'.format(zone_id)
for _, data in sorted(creates.items()):
self.log.debug('_apply_Update: creating %s', data)
self._try_request('POST', path, data=data)
@@ -571,7 +750,10 @@ class CloudflareProvider(BaseProvider):
record_id = info['record_id']
data = info['data']
old_data = info['old_data']
- path = '/zones/{}/dns_records/{}'.format(zone_id, record_id)
+ if _type == 'URLFWD':
+ path = '/zones/{}/pagerules/{}'.format(zone_id, record_id)
+ else:
+ path = '/zones/{}/dns_records/{}'.format(zone_id, record_id)
self.log.debug('_apply_Update: updating %s, %s -> %s',
record_id, data, old_data)
self._try_request('PUT', path, data=data)
@@ -580,7 +762,10 @@ class CloudflareProvider(BaseProvider):
for _, info in sorted(deletes.items()):
record_id = info['record_id']
old_data = info['data']
- path = '/zones/{}/dns_records/{}'.format(zone_id, record_id)
+ if _type == 'URLFWD':
+ path = '/zones/{}/pagerules/{}'.format(zone_id, record_id)
+ else:
+ path = '/zones/{}/dns_records/{}'.format(zone_id, record_id)
self.log.debug('_apply_Update: removing %s, %s', record_id,
old_data)
self._try_request('DELETE', path)
@@ -592,11 +777,24 @@ class CloudflareProvider(BaseProvider):
existing_type = 'CNAME' if existing._type == 'ALIAS' \
else existing._type
for record in self.zone_records(existing.zone):
- if existing_name == record['name'] and \
- existing_type == record['type']:
- path = '/zones/{}/dns_records/{}'.format(record['zone_id'],
- record['id'])
- self._try_request('DELETE', path)
+ if 'targets' in record:
+ uri = record['targets'][0]['constraint']['value']
+ uri = '//' + uri if not uri.startswith('http') else uri
+ parsed_uri = urlsplit(uri)
+ record_name = parsed_uri.netloc
+ record_type = 'URLFWD'
+ zone_id = self.zones.get(existing.zone.name, False)
+ if existing_name == record_name and \
+ existing_type == record_type:
+ path = '/zones/{}/pagerules/{}' \
+ .format(zone_id, record['id'])
+ self._try_request('DELETE', path)
+ else:
+ if existing_name == record['name'] and \
+ existing_type == record['type']:
+ path = '/zones/{}/dns_records/{}' \
+ .format(record['zone_id'], record['id'])
+ self._try_request('DELETE', path)
def _apply(self, plan):
desired = plan.desired
@@ -616,6 +814,11 @@ class CloudflareProvider(BaseProvider):
self.zones[name] = zone_id
self._zone_records[name] = {}
+ # Force the operation order to be Delete() -> Create() -> Update()
+ # This will help avoid problems in updating a CNAME record into an
+ # A record and vice-versa
+ changes.sort(key=self._change_keyer)
+
for change in changes:
class_name = change.__class__.__name__
getattr(self, '_apply_{}'.format(class_name))(change)
diff --git a/octodns/provider/constellix.py b/octodns/provider/constellix.py
index 67f7a6d..cc65ae1 100644
--- a/octodns/provider/constellix.py
+++ b/octodns/provider/constellix.py
@@ -8,7 +8,6 @@ from __future__ import absolute_import, division, print_function, \
from collections import defaultdict
from requests import Session
from base64 import b64encode
-from ipaddress import ip_address
from six import string_types
import hashlib
import hmac
@@ -16,10 +15,11 @@ import logging
import time
from ..record import Record
+from . import ProviderException
from .base import BaseProvider
-class ConstellixClientException(Exception):
+class ConstellixClientException(ProviderException):
pass
@@ -139,11 +139,6 @@ class ConstellixClient(object):
v['value'] = self._absolutize_value(v['value'],
zone_name)
- # compress IPv6 addresses
- if record['type'] == 'AAAA':
- for i, v in enumerate(value):
- value[i] = str(ip_address(v))
-
return resp
def record_create(self, zone_name, record_type, params):
diff --git a/octodns/provider/digitalocean.py b/octodns/provider/digitalocean.py
index e192543..fe31754 100644
--- a/octodns/provider/digitalocean.py
+++ b/octodns/provider/digitalocean.py
@@ -10,10 +10,11 @@ from requests import Session
import logging
from ..record import Record
+from . import ProviderException
from .base import BaseProvider
-class DigitalOceanClientException(Exception):
+class DigitalOceanClientException(ProviderException):
pass
@@ -186,10 +187,14 @@ class DigitalOceanProvider(BaseProvider):
def _data_for_SRV(self, _type, records):
values = []
for record in records:
+ target = (
+ '{}.'.format(record['data'])
+ if record['data'] != "." else "."
+ )
values.append({
'port': record['port'],
'priority': record['priority'],
- 'target': '{}.'.format(record['data']),
+ 'target': target,
'weight': record['weight']
})
return {
diff --git a/octodns/provider/dnsimple.py b/octodns/provider/dnsimple.py
index f83098e..009e829 100644
--- a/octodns/provider/dnsimple.py
+++ b/octodns/provider/dnsimple.py
@@ -10,10 +10,11 @@ from requests import Session
import logging
from ..record import Record
+from . import ProviderException
from .base import BaseProvider
-class DnsimpleClientException(Exception):
+class DnsimpleClientException(ProviderException):
pass
@@ -51,8 +52,8 @@ class DnsimpleClient(object):
resp.raise_for_status()
return resp
- def domain(self, name):
- path = '/domains/{}'.format(name)
+ def zone(self, name):
+ path = '/zones/{}'.format(name)
return self._request('GET', path).json()
def domain_create(self, name):
@@ -218,12 +219,23 @@ class DnsimpleProvider(BaseProvider):
try:
weight, port, target = record['content'].split(' ', 2)
except ValueError:
- # see _data_for_NAPTR's continue
+ # their api/website will let you create invalid records, this
+ # essentially handles that by ignoring them for values
+ # purposes. That will cause updates to happen to delete them if
+ # they shouldn't exist or update them if they're wrong
+ self.log.warning(
+ '_data_for_SRV: unsupported %s record (%s)',
+ _type,
+ record['content']
+ )
continue
+
+ target = '{}.'.format(target) if target != "." else "."
+
values.append({
'port': port,
'priority': record['priority'],
- 'target': '{}.'.format(target),
+ 'target': target,
'weight': weight
})
return {
@@ -270,6 +282,10 @@ class DnsimpleProvider(BaseProvider):
for record in self.zone_records(zone):
_type = record['type']
if _type not in self.SUPPORTS:
+ self.log.warning(
+ 'populate: skipping unsupported %s record',
+ _type
+ )
continue
elif _type == 'TXT' and record['content'].startswith('ALIAS for'):
# ALIAS has a "ride along" TXT record with 'ALIAS for XXXX',
@@ -290,6 +306,27 @@ class DnsimpleProvider(BaseProvider):
len(zone.records) - before, exists)
return exists
+ def supports(self, record):
+ # DNSimple does not support empty/NULL SRV records
+ #
+ # Fails silently and leaves a corrupt record
+ #
+ # Skip the record and continue
+ if record._type == "SRV":
+ if 'value' in record.data:
+ targets = (record.data['value']['target'],)
+ else:
+ targets = [value['target'] for value in record.data['values']]
+
+ if "." in targets:
+ self.log.warning(
+ 'supports: unsupported %s record with target (%s)',
+ record._type, targets
+ )
+ return False
+
+ return super(DnsimpleProvider, self).supports(record)
+
def _params_for_multiple(self, record):
for value in record.values:
yield {
@@ -406,7 +443,7 @@ class DnsimpleProvider(BaseProvider):
domain_name = desired.name[:-1]
try:
- self._client.domain(domain_name)
+ self._client.zone(domain_name)
except DnsimpleClientNotFound:
self.log.debug('_apply: no matching zone, creating domain')
self._client.domain_create(domain_name)
diff --git a/octodns/provider/dnsmadeeasy.py b/octodns/provider/dnsmadeeasy.py
index 0bf05a0..ddd40d2 100644
--- a/octodns/provider/dnsmadeeasy.py
+++ b/octodns/provider/dnsmadeeasy.py
@@ -13,10 +13,11 @@ import hmac
import logging
from ..record import Record
+from . import ProviderException
from .base import BaseProvider
-class DnsMadeEasyClientException(Exception):
+class DnsMadeEasyClientException(ProviderException):
pass
@@ -284,6 +285,30 @@ class DnsMadeEasyProvider(BaseProvider):
len(zone.records) - before, exists)
return exists
+ def supports(self, record):
+ # DNS Made Easy does not support empty/NULL SRV records
+ #
+ # Attempting to sync such a record would generate the following error
+ #
+ # octodns.provider.dnsmadeeasy.DnsMadeEasyClientBadRequest:
+ # - Record value may not be a standalone dot.
+ #
+ # Skip the record and continue
+ if record._type == "SRV":
+ if 'value' in record.data:
+ targets = (record.data['value']['target'],)
+ else:
+ targets = [value['target'] for value in record.data['values']]
+
+ if "." in targets:
+ self.log.warning(
+ 'supports: unsupported %s record with target (%s)',
+ record._type, targets
+ )
+ return False
+
+ return super(DnsMadeEasyProvider, self).supports(record)
+
def _params_for_multiple(self, record):
for value in record.values:
yield {
diff --git a/octodns/provider/dyn.py b/octodns/provider/dyn.py
index f7a15d2..2fbcadb 100644
--- a/octodns/provider/dyn.py
+++ b/octodns/provider/dyn.py
@@ -604,7 +604,7 @@ class DynProvider(BaseProvider):
return record
- def _is_traffic_director_dyanmic(self, td, rulesets):
+ def _is_traffic_director_dynamic(self, td, rulesets):
for ruleset in rulesets:
try:
pieces = ruleset.label.split(':')
@@ -632,7 +632,7 @@ class DynProvider(BaseProvider):
continue
# critical to call rulesets once, each call loads them :-(
rulesets = td.rulesets
- if self._is_traffic_director_dyanmic(td, rulesets):
+ if self._is_traffic_director_dynamic(td, rulesets):
record = \
self._populate_dynamic_traffic_director(zone, fqdn,
_type, td,
@@ -705,7 +705,7 @@ class DynProvider(BaseProvider):
label)
extra.append(Update(record, record))
continue
- if _monitor_doesnt_match(monitor, record.healthcheck_host,
+ if _monitor_doesnt_match(monitor, record.healthcheck_host(),
record.healthcheck_path,
record.healthcheck_protocol,
record.healthcheck_port):
@@ -828,13 +828,13 @@ class DynProvider(BaseProvider):
self.traffic_director_monitors[label] = \
self.traffic_director_monitors[fqdn]
del self.traffic_director_monitors[fqdn]
- if _monitor_doesnt_match(monitor, record.healthcheck_host,
+ if _monitor_doesnt_match(monitor, record.healthcheck_host(),
record.healthcheck_path,
record.healthcheck_protocol,
record.healthcheck_port):
self.log.info('_traffic_director_monitor: updating monitor '
'for %s', label)
- monitor.update(record.healthcheck_host,
+ monitor.update(record.healthcheck_host(),
record.healthcheck_path,
record.healthcheck_protocol,
record.healthcheck_port)
@@ -845,7 +845,7 @@ class DynProvider(BaseProvider):
monitor = DSFMonitor(label, protocol=record.healthcheck_protocol,
response_count=2, probe_interval=60,
retries=2, port=record.healthcheck_port,
- active='Y', host=record.healthcheck_host,
+ active='Y', host=record.healthcheck_host(),
timeout=self.MONITOR_TIMEOUT,
header=self.MONITOR_HEADER,
path=record.healthcheck_path)
diff --git a/octodns/provider/easydns.py b/octodns/provider/easydns.py
index 835fcb9..67c846a 100644
--- a/octodns/provider/easydns.py
+++ b/octodns/provider/easydns.py
@@ -12,10 +12,11 @@ import logging
import base64
from ..record import Record
+from . import ProviderException
from .base import BaseProvider
-class EasyDNSClientException(Exception):
+class EasyDNSClientException(ProviderException):
pass
@@ -59,7 +60,7 @@ class EasyDNSClient(object):
self.base_path = self.SANDBOX if sandbox else self.LIVE
sess = Session()
sess.headers.update({'Authorization': 'Basic {}'
- .format(self.auth_key)})
+ .format(self.auth_key.decode('utf-8'))})
sess.headers.update({'accept': 'application/json'})
self._sess = sess
diff --git a/octodns/provider/edgedns.py b/octodns/provider/edgedns.py
index 26f0917..1dde770 100644
--- a/octodns/provider/edgedns.py
+++ b/octodns/provider/edgedns.py
@@ -12,10 +12,11 @@ from collections import defaultdict
from logging import getLogger
from ..record import Record
+from . import ProviderException
from .base import BaseProvider
-class AkamaiClientNotFound(Exception):
+class AkamaiClientNotFound(ProviderException):
def __init__(self, resp):
message = "404: Resource not found"
diff --git a/octodns/provider/gandi.py b/octodns/provider/gandi.py
new file mode 100644
index 0000000..0938ac9
--- /dev/null
+++ b/octodns/provider/gandi.py
@@ -0,0 +1,379 @@
+#
+#
+#
+
+from __future__ import absolute_import, division, print_function, \
+ unicode_literals
+
+from collections import defaultdict
+from requests import Session
+import logging
+
+from ..record import Record
+from . import ProviderException
+from .base import BaseProvider
+
+
+class GandiClientException(ProviderException):
+ pass
+
+
+class GandiClientBadRequest(GandiClientException):
+
+ def __init__(self, r):
+ super(GandiClientBadRequest, self).__init__(r.text)
+
+
+class GandiClientUnauthorized(GandiClientException):
+
+ def __init__(self, r):
+ super(GandiClientUnauthorized, self).__init__(r.text)
+
+
+class GandiClientForbidden(GandiClientException):
+
+ def __init__(self, r):
+ super(GandiClientForbidden, self).__init__(r.text)
+
+
+class GandiClientNotFound(GandiClientException):
+
+ def __init__(self, r):
+ super(GandiClientNotFound, self).__init__(r.text)
+
+
+class GandiClientUnknownDomainName(GandiClientException):
+
+ def __init__(self, msg):
+ super(GandiClientUnknownDomainName, self).__init__(msg)
+
+
+class GandiClient(object):
+
+ def __init__(self, token):
+ session = Session()
+ session.headers.update({'Authorization': 'Apikey {}'.format(token)})
+ self._session = session
+ self.endpoint = 'https://api.gandi.net/v5'
+
+ def _request(self, method, path, params={}, data=None):
+ url = '{}{}'.format(self.endpoint, path)
+ r = self._session.request(method, url, params=params, json=data)
+ if r.status_code == 400:
+ raise GandiClientBadRequest(r)
+ if r.status_code == 401:
+ raise GandiClientUnauthorized(r)
+ elif r.status_code == 403:
+ raise GandiClientForbidden(r)
+ elif r.status_code == 404:
+ raise GandiClientNotFound(r)
+ r.raise_for_status()
+ return r
+
+ def zone(self, zone_name):
+ return self._request('GET', '/livedns/domains/{}'
+ .format(zone_name)).json()
+
+ def zone_create(self, zone_name):
+ return self._request('POST', '/livedns/domains', data={
+ 'fqdn': zone_name,
+ 'zone': {}
+ }).json()
+
+ def zone_records(self, zone_name):
+ records = self._request('GET', '/livedns/domains/{}/records'
+ .format(zone_name)).json()
+
+ for record in records:
+ if record['rrset_name'] == '@':
+ record['rrset_name'] = ''
+
+ # Change relative targets to absolute ones.
+ if record['rrset_type'] in ['ALIAS', 'CNAME', 'DNAME', 'MX',
+ 'NS', 'SRV']:
+ for i, value in enumerate(record['rrset_values']):
+ if not value.endswith('.'):
+ record['rrset_values'][i] = '{}.{}.'.format(
+ value, zone_name)
+
+ return records
+
+ def record_create(self, zone_name, data):
+ self._request('POST', '/livedns/domains/{}/records'.format(zone_name),
+ data=data)
+
+ def record_delete(self, zone_name, record_name, record_type):
+ self._request('DELETE', '/livedns/domains/{}/records/{}/{}'
+ .format(zone_name, record_name, record_type))
+
+
+class GandiProvider(BaseProvider):
+ '''
+ Gandi provider using API v5.
+
+ gandi:
+ class: octodns.provider.gandi.GandiProvider
+ # Your API key (required)
+ token: XXXXXXXXXXXX
+ '''
+
+ SUPPORTS_GEO = False
+ SUPPORTS_DYNAMIC = False
+ SUPPORTS = set((['A', 'AAAA', 'ALIAS', 'CAA', 'CNAME', 'DNAME',
+ 'MX', 'NS', 'PTR', 'SPF', 'SRV', 'SSHFP', 'TXT']))
+
+ def __init__(self, id, token, *args, **kwargs):
+ self.log = logging.getLogger('GandiProvider[{}]'.format(id))
+ self.log.debug('__init__: id=%s, token=***', id)
+ super(GandiProvider, self).__init__(id, *args, **kwargs)
+ self._client = GandiClient(token)
+
+ self._zone_records = {}
+
+ def _data_for_multiple(self, _type, records):
+ return {
+ 'ttl': records[0]['rrset_ttl'],
+ 'type': _type,
+ 'values': [v.replace(';', '\\;') for v in
+ records[0]['rrset_values']] if _type == 'TXT' else
+ records[0]['rrset_values']
+ }
+
+ _data_for_A = _data_for_multiple
+ _data_for_AAAA = _data_for_multiple
+ _data_for_TXT = _data_for_multiple
+ _data_for_SPF = _data_for_multiple
+ _data_for_NS = _data_for_multiple
+
+ def _data_for_CAA(self, _type, records):
+ values = []
+ for record in records[0]['rrset_values']:
+ flags, tag, value = record.split(' ')
+ values.append({
+ 'flags': flags,
+ 'tag': tag,
+ # Remove quotes around value.
+ 'value': value[1:-1],
+ })
+
+ return {
+ 'ttl': records[0]['rrset_ttl'],
+ 'type': _type,
+ 'values': values
+ }
+
+ def _data_for_single(self, _type, records):
+ return {
+ 'ttl': records[0]['rrset_ttl'],
+ 'type': _type,
+ 'value': records[0]['rrset_values'][0]
+ }
+
+ _data_for_ALIAS = _data_for_single
+ _data_for_CNAME = _data_for_single
+ _data_for_DNAME = _data_for_single
+ _data_for_PTR = _data_for_single
+
+ def _data_for_MX(self, _type, records):
+ values = []
+ for record in records[0]['rrset_values']:
+ priority, server = record.split(' ')
+ values.append({
+ 'preference': priority,
+ 'exchange': server
+ })
+
+ return {
+ 'ttl': records[0]['rrset_ttl'],
+ 'type': _type,
+ 'values': values
+ }
+
+ def _data_for_SRV(self, _type, records):
+ values = []
+ for record in records[0]['rrset_values']:
+ priority, weight, port, target = record.split(' ', 3)
+ values.append({
+ 'priority': priority,
+ 'weight': weight,
+ 'port': port,
+ 'target': target
+ })
+
+ return {
+ 'ttl': records[0]['rrset_ttl'],
+ 'type': _type,
+ 'values': values
+ }
+
+ def _data_for_SSHFP(self, _type, records):
+ values = []
+ for record in records[0]['rrset_values']:
+ algorithm, fingerprint_type, fingerprint = record.split(' ', 2)
+ values.append({
+ 'algorithm': algorithm,
+ 'fingerprint': fingerprint,
+ 'fingerprint_type': fingerprint_type
+ })
+
+ return {
+ 'ttl': records[0]['rrset_ttl'],
+ 'type': _type,
+ 'values': values
+ }
+
+ def zone_records(self, zone):
+ if zone.name not in self._zone_records:
+ try:
+ self._zone_records[zone.name] = \
+ self._client.zone_records(zone.name[:-1])
+ except GandiClientNotFound:
+ return []
+
+ return self._zone_records[zone.name]
+
+ def populate(self, zone, target=False, lenient=False):
+ self.log.debug('populate: name=%s, target=%s, lenient=%s', zone.name,
+ target, lenient)
+
+ values = defaultdict(lambda: defaultdict(list))
+ for record in self.zone_records(zone):
+ _type = record['rrset_type']
+ if _type not in self.SUPPORTS:
+ continue
+ values[record['rrset_name']][record['rrset_type']].append(record)
+
+ before = len(zone.records)
+ for name, types in values.items():
+ for _type, records in types.items():
+ data_for = getattr(self, '_data_for_{}'.format(_type))
+ record = Record.new(zone, name, data_for(_type, records),
+ source=self, lenient=lenient)
+ zone.add_record(record, lenient=lenient)
+
+ exists = zone.name in self._zone_records
+ self.log.info('populate: found %s records, exists=%s',
+ len(zone.records) - before, exists)
+ return exists
+
+ def _record_name(self, name):
+ return name if name else '@'
+
+ def _params_for_multiple(self, record):
+ return {
+ 'rrset_name': self._record_name(record.name),
+ 'rrset_ttl': record.ttl,
+ 'rrset_type': record._type,
+ 'rrset_values': [v.replace('\\;', ';') for v in
+ record.values] if record._type == 'TXT'
+ else record.values
+ }
+
+ _params_for_A = _params_for_multiple
+ _params_for_AAAA = _params_for_multiple
+ _params_for_NS = _params_for_multiple
+ _params_for_TXT = _params_for_multiple
+ _params_for_SPF = _params_for_multiple
+
+ def _params_for_CAA(self, record):
+ return {
+ 'rrset_name': self._record_name(record.name),
+ 'rrset_ttl': record.ttl,
+ 'rrset_type': record._type,
+ 'rrset_values': ['{} {} "{}"'.format(v.flags, v.tag, v.value)
+ for v in record.values]
+ }
+
+ def _params_for_single(self, record):
+ return {
+ 'rrset_name': self._record_name(record.name),
+ 'rrset_ttl': record.ttl,
+ 'rrset_type': record._type,
+ 'rrset_values': [record.value]
+ }
+
+ _params_for_ALIAS = _params_for_single
+ _params_for_CNAME = _params_for_single
+ _params_for_DNAME = _params_for_single
+ _params_for_PTR = _params_for_single
+
+ def _params_for_MX(self, record):
+ return {
+ 'rrset_name': self._record_name(record.name),
+ 'rrset_ttl': record.ttl,
+ 'rrset_type': record._type,
+ 'rrset_values': ['{} {}'.format(v.preference, v.exchange)
+ for v in record.values]
+ }
+
+ def _params_for_SRV(self, record):
+ return {
+ 'rrset_name': self._record_name(record.name),
+ 'rrset_ttl': record.ttl,
+ 'rrset_type': record._type,
+ 'rrset_values': ['{} {} {} {}'.format(v.priority, v.weight, v.port,
+ v.target) for v in record.values]
+ }
+
+ def _params_for_SSHFP(self, record):
+ return {
+ 'rrset_name': self._record_name(record.name),
+ 'rrset_ttl': record.ttl,
+ 'rrset_type': record._type,
+ 'rrset_values': ['{} {} {}'.format(v.algorithm, v.fingerprint_type,
+ v.fingerprint) for v in record.values]
+ }
+
+ def _apply_create(self, change):
+ new = change.new
+ data = getattr(self, '_params_for_{}'.format(new._type))(new)
+ self._client.record_create(new.zone.name[:-1], data)
+
+ def _apply_update(self, change):
+ self._apply_delete(change)
+ self._apply_create(change)
+
+ def _apply_delete(self, change):
+ existing = change.existing
+ zone = existing.zone
+ self._client.record_delete(zone.name[:-1],
+ self._record_name(existing.name),
+ existing._type)
+
+ def _apply(self, plan):
+ desired = plan.desired
+ changes = plan.changes
+ zone = desired.name[:-1]
+ self.log.debug('_apply: zone=%s, len(changes)=%d', desired.name,
+ len(changes))
+
+ try:
+ self._client.zone(zone)
+ except GandiClientNotFound:
+ self.log.info('_apply: no existing zone, trying to create it')
+ try:
+ self._client.zone_create(zone)
+ self.log.info('_apply: zone has been successfully created')
+ except GandiClientNotFound:
+ # We suppress existing exception before raising
+ # GandiClientUnknownDomainName.
+ e = GandiClientUnknownDomainName('This domain is not '
+ 'registered at Gandi. '
+ 'Please register or '
+ 'transfer it here '
+ 'to be able to manage its '
+ 'DNS zone.')
+ e.__cause__ = None
+ raise e
+
+ # Force records deletion to be done before creation in order to avoid
+ # "CNAME record must be the only record" error when an existing CNAME
+ # record is replaced by an A/AAAA record.
+ changes.reverse()
+
+ for change in changes:
+ class_name = change.__class__.__name__
+ getattr(self, '_apply_{}'.format(class_name.lower()))(change)
+
+ # Clear out the cache if any
+ self._zone_records.pop(desired.name, None)
diff --git a/octodns/provider/gcore.py b/octodns/provider/gcore.py
new file mode 100644
index 0000000..b76f90e
--- /dev/null
+++ b/octodns/provider/gcore.py
@@ -0,0 +1,624 @@
+#
+#
+#
+
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+ unicode_literals,
+)
+
+from collections import defaultdict
+from requests import Session
+import http
+import logging
+import urllib.parse
+
+from ..record import GeoCodes
+from ..record import Record
+from . import ProviderException
+from .base import BaseProvider
+
+
+class GCoreClientException(ProviderException):
+ def __init__(self, r):
+ super(GCoreClientException, self).__init__(r.text)
+
+
+class GCoreClientBadRequest(GCoreClientException):
+ def __init__(self, r):
+ super(GCoreClientBadRequest, self).__init__(r)
+
+
+class GCoreClientNotFound(GCoreClientException):
+ def __init__(self, r):
+ super(GCoreClientNotFound, self).__init__(r)
+
+
+class GCoreClient(object):
+
+ ROOT_ZONES = "zones"
+
+ def __init__(
+ self,
+ log,
+ api_url,
+ auth_url,
+ token=None,
+ token_type=None,
+ login=None,
+ password=None,
+ ):
+ self.log = log
+ self._session = Session()
+ self._api_url = api_url
+ if token is not None and token_type is not None:
+ self._session.headers.update(
+ {"Authorization": "{} {}".format(token_type, token)}
+ )
+ elif login is not None and password is not None:
+ token = self._auth(auth_url, login, password)
+ self._session.headers.update(
+ {"Authorization": "Bearer {}".format(token)}
+ )
+ else:
+ raise ValueError("either token or login & password must be set")
+
+ def _auth(self, url, login, password):
+ # well, can't use _request, since API returns 400 if credentials
+ # invalid which will be logged, but we don't want do this
+ r = self._session.request(
+ "POST",
+ self._build_url(url, "auth", "jwt", "login"),
+ json={"username": login, "password": password},
+ )
+ r.raise_for_status()
+ return r.json()["access"]
+
+ def _request(self, method, url, params=None, data=None):
+ r = self._session.request(
+ method, url, params=params, json=data, timeout=30.0
+ )
+ if r.status_code == http.HTTPStatus.BAD_REQUEST:
+ self.log.error(
+ "bad request %r has been sent to %r: %s", data, url, r.text
+ )
+ raise GCoreClientBadRequest(r)
+ elif r.status_code == http.HTTPStatus.NOT_FOUND:
+ self.log.error("resource %r not found: %s", url, r.text)
+ raise GCoreClientNotFound(r)
+ elif r.status_code == http.HTTPStatus.INTERNAL_SERVER_ERROR:
+ self.log.error("server error no %r to %r: %s", data, url, r.text)
+ raise GCoreClientException(r)
+ r.raise_for_status()
+ return r
+
+ def zone(self, zone_name):
+ return self._request(
+ "GET", self._build_url(self._api_url, self.ROOT_ZONES, zone_name)
+ ).json()
+
+ def zone_create(self, zone_name):
+ return self._request(
+ "POST",
+ self._build_url(self._api_url, self.ROOT_ZONES),
+ data={"name": zone_name},
+ ).json()
+
+ def zone_records(self, zone_name):
+ rrsets = self._request(
+ "GET",
+ "{}".format(
+ self._build_url(
+ self._api_url, self.ROOT_ZONES, zone_name, "rrsets"
+ )
+ ),
+ params={"all": "true"},
+ ).json()
+ records = rrsets["rrsets"]
+ return records
+
+ def record_create(self, zone_name, rrset_name, type_, data):
+ self._request(
+ "POST", self._rrset_url(zone_name, rrset_name, type_), data=data
+ )
+
+ def record_update(self, zone_name, rrset_name, type_, data):
+ self._request(
+ "PUT", self._rrset_url(zone_name, rrset_name, type_), data=data
+ )
+
+ def record_delete(self, zone_name, rrset_name, type_):
+ self._request("DELETE", self._rrset_url(zone_name, rrset_name, type_))
+
+ def _rrset_url(self, zone_name, rrset_name, type_):
+ return self._build_url(
+ self._api_url, self.ROOT_ZONES, zone_name, rrset_name, type_
+ )
+
+ @staticmethod
+ def _build_url(base, *items):
+ for i in items:
+ base = base.strip("/") + "/"
+ base = urllib.parse.urljoin(base, i)
+ return base
+
+
+class GCoreProvider(BaseProvider):
+ """
+ GCore provider using API v2.
+
+ gcore:
+ class: octodns.provider.gcore.GCoreProvider
+ # Your API key
+ token: XXXXXXXXXXXX
+ # token_type: APIKey
+ # or login + password
+ login: XXXXXXXXXXXX
+ password: XXXXXXXXXXXX
+ # auth_url: https://api.gcdn.co
+ # url: https://dnsapi.gcorelabs.com/v2
+ # records_per_response: 1
+ """
+
+ SUPPORTS_GEO = False
+ SUPPORTS_DYNAMIC = True
+ SUPPORTS = set(("A", "AAAA", "NS", "MX", "TXT", "SRV", "CNAME", "PTR"))
+
+ def __init__(self, id, *args, **kwargs):
+ token = kwargs.pop("token", None)
+ token_type = kwargs.pop("token_type", "APIKey")
+ login = kwargs.pop("login", None)
+ password = kwargs.pop("password", None)
+ api_url = kwargs.pop("url", "https://dnsapi.gcorelabs.com/v2")
+ auth_url = kwargs.pop("auth_url", "https://api.gcdn.co")
+ self.records_per_response = kwargs.pop("records_per_response", 1)
+ self.log = logging.getLogger("GCoreProvider[{}]".format(id))
+ self.log.debug("__init__: id=%s", id)
+ super(GCoreProvider, self).__init__(id, *args, **kwargs)
+ self._client = GCoreClient(
+ self.log,
+ api_url,
+ auth_url,
+ token=token,
+ token_type=token_type,
+ login=login,
+ password=password,
+ )
+
+ def _add_dot_if_need(self, value):
+ return "{}.".format(value) if not value.endswith(".") else value
+
+ def _build_pools(self, record, default_pool_name, value_transform_fn):
+ defaults = []
+ geo_sets, pool_idx = dict(), 0
+ pools = defaultdict(lambda: {"values": []})
+ for rr in record["resource_records"]:
+ meta = rr.get("meta", {}) or {}
+ value = {"value": value_transform_fn(rr["content"][0])}
+ countries = meta.get("countries", []) or []
+ continents = meta.get("continents", []) or []
+
+ if meta.get("default", False):
+ pools[default_pool_name]["values"].append(value)
+ defaults.append(value["value"])
+ continue
+ # defaults is false or missing and no conties or continents
+ elif len(continents) == 0 and len(countries) == 0:
+ defaults.append(value["value"])
+ continue
+
+ # RR with the same set of countries and continents are
+ # combined in single pool
+ geo_set = frozenset(
+ [GeoCodes.country_to_code(cc.upper()) for cc in countries]
+ ) | frozenset(cc.upper() for cc in continents)
+ if geo_set not in geo_sets:
+ geo_sets[geo_set] = "pool-{}".format(pool_idx)
+ pool_idx += 1
+
+ pools[geo_sets[geo_set]]["values"].append(value)
+
+ return pools, geo_sets, defaults
+
+ def _build_rules(self, pools, geo_sets):
+ rules = []
+ for name, _ in pools.items():
+ rule = {"pool": name}
+ geo_set = next(
+ (
+ geo_set
+ for geo_set, pool_name in geo_sets.items()
+ if pool_name == name
+ ),
+ {},
+ )
+ if len(geo_set) > 0:
+ rule["geos"] = list(geo_set)
+ rules.append(rule)
+
+ return sorted(rules, key=lambda x: x["pool"])
+
+ def _data_for_dynamic(self, record, value_transform_fn=lambda x: x):
+ default_pool = "other"
+ pools, geo_sets, defaults = self._build_pools(
+ record, default_pool, value_transform_fn
+ )
+ if len(pools) == 0:
+ raise RuntimeError(
+ "filter is enabled, but no pools where built for {}".format(
+ record
+ )
+ )
+
+ # defaults can't be empty, so use first pool values
+ if len(defaults) == 0:
+ defaults = [
+ value_transform_fn(v["value"])
+ for v in next(iter(pools.values()))["values"]
+ ]
+
+ # if at least one default RR was found then setup fallback for
+ # other pools to default
+ if default_pool in pools:
+ for pool_name, pool in pools.items():
+ if pool_name == default_pool:
+ continue
+ pool["fallback"] = default_pool
+
+ rules = self._build_rules(pools, geo_sets)
+ return pools, rules, defaults
+
+ def _data_for_single(self, _type, record):
+ return {
+ "ttl": record["ttl"],
+ "type": _type,
+ "value": self._add_dot_if_need(
+ record["resource_records"][0]["content"][0]
+ ),
+ }
+
+ _data_for_PTR = _data_for_single
+
+ def _data_for_CNAME(self, _type, record):
+ if record.get("filters") is None:
+ return self._data_for_single(_type, record)
+
+ pools, rules, defaults = self._data_for_dynamic(
+ record, self._add_dot_if_need
+ )
+ return {
+ "ttl": record["ttl"],
+ "type": _type,
+ "dynamic": {"pools": pools, "rules": rules},
+ "value": self._add_dot_if_need(defaults[0]),
+ }
+
+ def _data_for_multiple(self, _type, record):
+ extra = dict()
+ if record.get("filters") is not None:
+ pools, rules, defaults = self._data_for_dynamic(record)
+ extra = {
+ "dynamic": {"pools": pools, "rules": rules},
+ "values": defaults,
+ }
+ else:
+ extra = {
+ "values": [
+ rr_value
+ for resource_record in record["resource_records"]
+ for rr_value in resource_record["content"]
+ ]
+ }
+ return {
+ "ttl": record["ttl"],
+ "type": _type,
+ **extra,
+ }
+
+ _data_for_A = _data_for_multiple
+ _data_for_AAAA = _data_for_multiple
+
+ def _data_for_TXT(self, _type, record):
+ return {
+ "ttl": record["ttl"],
+ "type": _type,
+ "values": [
+ rr_value.replace(";", "\\;")
+ for resource_record in record["resource_records"]
+ for rr_value in resource_record["content"]
+ ],
+ }
+
+ def _data_for_MX(self, _type, record):
+ return {
+ "ttl": record["ttl"],
+ "type": _type,
+ "values": [
+ dict(
+ preference=preference,
+ exchange=self._add_dot_if_need(exchange),
+ )
+ for preference, exchange in map(
+ lambda x: x["content"], record["resource_records"]
+ )
+ ],
+ }
+
+ def _data_for_NS(self, _type, record):
+ return {
+ "ttl": record["ttl"],
+ "type": _type,
+ "values": [
+ self._add_dot_if_need(rr_value)
+ for resource_record in record["resource_records"]
+ for rr_value in resource_record["content"]
+ ],
+ }
+
+ def _data_for_SRV(self, _type, record):
+ return {
+ "ttl": record["ttl"],
+ "type": _type,
+ "values": [
+ dict(
+ priority=priority,
+ weight=weight,
+ port=port,
+ target=self._add_dot_if_need(target),
+ )
+ for priority, weight, port, target in map(
+ lambda x: x["content"], record["resource_records"]
+ )
+ ],
+ }
+
+ def zone_records(self, zone):
+ try:
+ return self._client.zone_records(zone.name[:-1]), True
+ except GCoreClientNotFound:
+ return [], False
+
+ def populate(self, zone, target=False, lenient=False):
+ self.log.debug(
+ "populate: name=%s, target=%s, lenient=%s",
+ zone.name,
+ target,
+ lenient,
+ )
+
+ values = defaultdict(defaultdict)
+ records, exists = self.zone_records(zone)
+ for record in records:
+ _type = record["type"].upper()
+ if _type not in self.SUPPORTS:
+ continue
+ if self._should_ignore(record):
+ continue
+ rr_name = zone.hostname_from_fqdn(record["name"])
+ values[rr_name][_type] = record
+
+ before = len(zone.records)
+ for name, types in values.items():
+ for _type, record in types.items():
+ data_for = getattr(self, "_data_for_{}".format(_type))
+ record = Record.new(
+ zone,
+ name,
+ data_for(_type, record),
+ source=self,
+ lenient=lenient,
+ )
+ zone.add_record(record, lenient=lenient)
+
+ self.log.info(
+ "populate: found %s records, exists=%s",
+ len(zone.records) - before,
+ exists,
+ )
+ return exists
+
+ def _should_ignore(self, record):
+ name = record.get("name", "name-not-defined")
+ if record.get("filters") is None:
+ return False
+ want_filters = 3
+ filters = record.get("filters", [])
+ if len(filters) != want_filters:
+ self.log.info(
+ "ignore %s has filters and their count is not %d",
+ name,
+ want_filters,
+ )
+ return True
+ types = [v.get("type") for v in filters]
+ for i, want_type in enumerate(["geodns", "default", "first_n"]):
+ if types[i] != want_type:
+ self.log.info(
+ "ignore %s, filters.%d.type is %s, want %s",
+ name,
+ i,
+ types[i],
+ want_type,
+ )
+ return True
+ limits = [filters[i].get("limit", 1) for i in [1, 2]]
+ if limits[0] != limits[1]:
+ self.log.info(
+ "ignore %s, filters.1.limit (%d) != filters.2.limit (%d)",
+ name,
+ limits[0],
+ limits[1],
+ )
+ return True
+ return False
+
+ def _params_for_dymanic(self, record):
+ records = []
+ default_pool_found = False
+ default_values = set(
+ record.values if hasattr(record, "values") else [record.value]
+ )
+ for rule in record.dynamic.rules:
+ meta = dict()
+ # build meta tags if geos information present
+ if len(rule.data.get("geos", [])) > 0:
+ for geo_code in rule.data["geos"]:
+ geo = GeoCodes.parse(geo_code)
+
+ country = geo["country_code"]
+ continent = geo["continent_code"]
+ if country is not None:
+ meta.setdefault("countries", []).append(country)
+ else:
+ meta.setdefault("continents", []).append(continent)
+ else:
+ meta["default"] = True
+
+ pool_values = set()
+ pool_name = rule.data["pool"]
+ for value in record.dynamic.pools[pool_name].data["values"]:
+ v = value["value"]
+ records.append({"content": [v], "meta": meta})
+ pool_values.add(v)
+
+ default_pool_found |= default_values == pool_values
+
+ # if default values doesn't match any pool values, then just add this
+ # values with no any meta
+ if not default_pool_found:
+ for value in default_values:
+ records.append({"content": [value]})
+
+ return records
+
+ def _params_for_single(self, record):
+ return {
+ "ttl": record.ttl,
+ "resource_records": [{"content": [record.value]}],
+ }
+
+ _params_for_PTR = _params_for_single
+
+ def _params_for_CNAME(self, record):
+ if not record.dynamic:
+ return self._params_for_single(record)
+
+ return {
+ "ttl": record.ttl,
+ "resource_records": self._params_for_dymanic(record),
+ "filters": [
+ {"type": "geodns"},
+ {
+ "type": "default",
+ "limit": self.records_per_response,
+ "strict": False,
+ },
+ {"type": "first_n", "limit": self.records_per_response},
+ ],
+ }
+
+ def _params_for_multiple(self, record):
+ extra = dict()
+ if record.dynamic:
+ extra["resource_records"] = self._params_for_dymanic(record)
+ extra["filters"] = [
+ {"type": "geodns"},
+ {
+ "type": "default",
+ "limit": self.records_per_response,
+ "strict": False,
+ },
+ {"type": "first_n", "limit": self.records_per_response},
+ ]
+ else:
+ extra["resource_records"] = [
+ {"content": [value]} for value in record.values
+ ]
+ return {
+ "ttl": record.ttl,
+ **extra,
+ }
+
+ _params_for_A = _params_for_multiple
+ _params_for_AAAA = _params_for_multiple
+
+ def _params_for_NS(self, record):
+ return {
+ "ttl": record.ttl,
+ "resource_records": [
+ {"content": [value]} for value in record.values
+ ],
+ }
+
+ def _params_for_TXT(self, record):
+ return {
+ "ttl": record.ttl,
+ "resource_records": [
+ {"content": [value.replace("\\;", ";")]}
+ for value in record.values
+ ],
+ }
+
+ def _params_for_MX(self, record):
+ return {
+ "ttl": record.ttl,
+ "resource_records": [
+ {"content": [rec.preference, rec.exchange]}
+ for rec in record.values
+ ],
+ }
+
+ def _params_for_SRV(self, record):
+ return {
+ "ttl": record.ttl,
+ "resource_records": [
+ {"content": [rec.priority, rec.weight, rec.port, rec.target]}
+ for rec in record.values
+ ],
+ }
+
+ def _apply_create(self, change):
+ self.log.info("creating: %s", change)
+ new = change.new
+ data = getattr(self, "_params_for_{}".format(new._type))(new)
+ self._client.record_create(
+ new.zone.name[:-1], new.fqdn, new._type, data
+ )
+
+ def _apply_update(self, change):
+ self.log.info("updating: %s", change)
+ new = change.new
+ data = getattr(self, "_params_for_{}".format(new._type))(new)
+ self._client.record_update(
+ new.zone.name[:-1], new.fqdn, new._type, data
+ )
+
+ def _apply_delete(self, change):
+ self.log.info("deleting: %s", change)
+ existing = change.existing
+ self._client.record_delete(
+ existing.zone.name[:-1], existing.fqdn, existing._type
+ )
+
+ def _apply(self, plan):
+ desired = plan.desired
+ changes = plan.changes
+ zone = desired.name[:-1]
+ self.log.debug(
+ "_apply: zone=%s, len(changes)=%d", desired.name, len(changes)
+ )
+
+ try:
+ self._client.zone(zone)
+ except GCoreClientNotFound:
+ self.log.info("_apply: no existing zone, trying to create it")
+ self._client.zone_create(zone)
+ self.log.info("_apply: zone has been successfully created")
+
+ changes.reverse()
+
+ for change in changes:
+ class_name = change.__class__.__name__
+ getattr(self, "_apply_{}".format(class_name.lower()))(change)
diff --git a/octodns/provider/hetzner.py b/octodns/provider/hetzner.py
new file mode 100644
index 0000000..b1b2884
--- /dev/null
+++ b/octodns/provider/hetzner.py
@@ -0,0 +1,340 @@
+#
+#
+#
+
+from __future__ import absolute_import, division, print_function, \
+ unicode_literals
+
+from collections import defaultdict
+from requests import Session
+import logging
+
+from ..record import Record
+from . import ProviderException
+from .base import BaseProvider
+
+
+class HetznerClientException(ProviderException):
+ pass
+
+
+class HetznerClientNotFound(HetznerClientException):
+
+ def __init__(self):
+ super(HetznerClientNotFound, self).__init__('Not Found')
+
+
+class HetznerClientUnauthorized(HetznerClientException):
+
+ def __init__(self):
+ super(HetznerClientUnauthorized, self).__init__('Unauthorized')
+
+
+class HetznerClient(object):
+ BASE_URL = 'https://dns.hetzner.com/api/v1'
+
+ def __init__(self, token):
+ session = Session()
+ session.headers.update({'Auth-API-Token': token})
+ self._session = session
+
+ def _do(self, method, path, params=None, data=None):
+ url = '{}{}'.format(self.BASE_URL, path)
+ response = self._session.request(method, url, params=params, json=data)
+ if response.status_code == 401:
+ raise HetznerClientUnauthorized()
+ if response.status_code == 404:
+ raise HetznerClientNotFound()
+ response.raise_for_status()
+ return response
+
+ def _do_json(self, method, path, params=None, data=None):
+ return self._do(method, path, params, data).json()
+
+ def zone_get(self, name):
+ params = {'name': name}
+ return self._do_json('GET', '/zones', params)['zones'][0]
+
+ def zone_create(self, name, ttl=None):
+ data = {'name': name, 'ttl': ttl}
+ return self._do_json('POST', '/zones', data=data)['zone']
+
+ def zone_records_get(self, zone_id):
+ params = {'zone_id': zone_id}
+ records = self._do_json('GET', '/records', params=params)['records']
+ for record in records:
+ if record['name'] == '@':
+ record['name'] = ''
+ return records
+
+ def zone_record_create(self, zone_id, name, _type, value, ttl=None):
+ data = {'name': name or '@', 'ttl': ttl, 'type': _type, 'value': value,
+ 'zone_id': zone_id}
+ self._do('POST', '/records', data=data)
+
+ def zone_record_delete(self, zone_id, record_id):
+ self._do('DELETE', '/records/{}'.format(record_id))
+
+
+class HetznerProvider(BaseProvider):
+ '''
+ Hetzner DNS provider using API v1
+
+ hetzner:
+ class: octodns.provider.hetzner.HetznerProvider
+ # Your Hetzner API token (required)
+ token: foo
+ '''
+ SUPPORTS_GEO = False
+ SUPPORTS_DYNAMIC = False
+ SUPPORTS = set(('A', 'AAAA', 'CAA', 'CNAME', 'MX', 'NS', 'SRV', 'TXT'))
+
+ def __init__(self, id, token, *args, **kwargs):
+ self.log = logging.getLogger('HetznerProvider[{}]'.format(id))
+ self.log.debug('__init__: id=%s, token=***', id)
+ super(HetznerProvider, self).__init__(id, *args, **kwargs)
+ self._client = HetznerClient(token)
+
+ self._zone_records = {}
+ self._zone_metadata = {}
+ self._zone_name_to_id = {}
+
+ def _append_dot(self, value):
+ if value == '@' or value[-1] == '.':
+ return value
+ return '{}.'.format(value)
+
+ def zone_metadata(self, zone_id=None, zone_name=None):
+ if zone_name is not None:
+ if zone_name in self._zone_name_to_id:
+ zone_id = self._zone_name_to_id[zone_name]
+ else:
+ zone = self._client.zone_get(name=zone_name[:-1])
+ zone_id = zone['id']
+ self._zone_name_to_id[zone_name] = zone_id
+ self._zone_metadata[zone_id] = zone
+
+ return self._zone_metadata[zone_id]
+
+ def _record_ttl(self, record):
+ default_ttl = self.zone_metadata(zone_id=record['zone_id'])['ttl']
+ return record['ttl'] if 'ttl' in record else default_ttl
+
+ def _data_for_multiple(self, _type, records):
+ values = [record['value'].replace(';', '\\;') for record in records]
+ return {
+ 'ttl': self._record_ttl(records[0]),
+ 'type': _type,
+ 'values': values
+ }
+
+ _data_for_A = _data_for_multiple
+ _data_for_AAAA = _data_for_multiple
+
+ def _data_for_CAA(self, _type, records):
+ values = []
+ for record in records:
+ value_without_spaces = record['value'].replace(' ', '')
+ flags = value_without_spaces[0]
+ tag = value_without_spaces[1:].split('"')[0]
+ value = record['value'].split('"')[1]
+ values.append({
+ 'flags': int(flags),
+ 'tag': tag,
+ 'value': value,
+ })
+ return {
+ 'ttl': self._record_ttl(records[0]),
+ 'type': _type,
+ 'values': values
+ }
+
+ def _data_for_CNAME(self, _type, records):
+ record = records[0]
+ return {
+ 'ttl': self._record_ttl(record),
+ 'type': _type,
+ 'value': self._append_dot(record['value'])
+ }
+
+ def _data_for_MX(self, _type, records):
+ values = []
+ for record in records:
+ value_stripped_split = record['value'].strip().split(' ')
+ preference = value_stripped_split[0]
+ exchange = value_stripped_split[-1]
+ values.append({
+ 'preference': int(preference),
+ 'exchange': self._append_dot(exchange)
+ })
+ return {
+ 'ttl': self._record_ttl(records[0]),
+ 'type': _type,
+ 'values': values
+ }
+
+ def _data_for_NS(self, _type, records):
+ values = []
+ for record in records:
+ values.append(self._append_dot(record['value']))
+ return {
+ 'ttl': self._record_ttl(records[0]),
+ 'type': _type,
+ 'values': values,
+ }
+
+ def _data_for_SRV(self, _type, records):
+ values = []
+ for record in records:
+ value_stripped = record['value'].strip()
+ priority = value_stripped.split(' ')[0]
+ weight = value_stripped[len(priority):].strip().split(' ')[0]
+ target = value_stripped.split(' ')[-1]
+ port = value_stripped[:-len(target)].strip().split(' ')[-1]
+ values.append({
+ 'port': int(port),
+ 'priority': int(priority),
+ 'target': self._append_dot(target),
+ 'weight': int(weight)
+ })
+ return {
+ 'ttl': self._record_ttl(records[0]),
+ 'type': _type,
+ 'values': values
+ }
+
+ _data_for_TXT = _data_for_multiple
+
+ def zone_records(self, zone):
+ if zone.name not in self._zone_records:
+ try:
+ zone_id = self.zone_metadata(zone_name=zone.name)['id']
+ self._zone_records[zone.name] = \
+ self._client.zone_records_get(zone_id)
+ except HetznerClientNotFound:
+ return []
+
+ return self._zone_records[zone.name]
+
+ def populate(self, zone, target=False, lenient=False):
+ self.log.debug('populate: name=%s, target=%s, lenient=%s', zone.name,
+ target, lenient)
+
+ values = defaultdict(lambda: defaultdict(list))
+ for record in self.zone_records(zone):
+ _type = record['type']
+ if _type not in self.SUPPORTS:
+ self.log.warning('populate: skipping unsupported %s record',
+ _type)
+ continue
+ values[record['name']][record['type']].append(record)
+
+ before = len(zone.records)
+ for name, types in values.items():
+ for _type, records in types.items():
+ data_for = getattr(self, '_data_for_{}'.format(_type))
+ record = Record.new(zone, name, data_for(_type, records),
+ source=self, lenient=lenient)
+ zone.add_record(record, lenient=lenient)
+
+ exists = zone.name in self._zone_records
+ self.log.info('populate: found %s records, exists=%s',
+ len(zone.records) - before, exists)
+ return exists
+
+ def _params_for_multiple(self, record):
+ for value in record.values:
+ yield {
+ 'value': value.replace('\\;', ';'),
+ 'name': record.name,
+ 'ttl': record.ttl,
+ 'type': record._type
+ }
+
+ _params_for_A = _params_for_multiple
+ _params_for_AAAA = _params_for_multiple
+
+ def _params_for_CAA(self, record):
+ for value in record.values:
+ data = '{} {} "{}"'.format(value.flags, value.tag, value.value)
+ yield {
+ 'value': data,
+ 'name': record.name,
+ 'ttl': record.ttl,
+ 'type': record._type
+ }
+
+ def _params_for_single(self, record):
+ yield {
+ 'value': record.value,
+ 'name': record.name,
+ 'ttl': record.ttl,
+ 'type': record._type
+ }
+
+ _params_for_CNAME = _params_for_single
+
+ def _params_for_MX(self, record):
+ for value in record.values:
+ data = '{} {}'.format(value.preference, value.exchange)
+ yield {
+ 'value': data,
+ 'name': record.name,
+ 'ttl': record.ttl,
+ 'type': record._type
+ }
+
+ _params_for_NS = _params_for_multiple
+
+ def _params_for_SRV(self, record):
+ for value in record.values:
+ data = '{} {} {} {}'.format(value.priority, value.weight,
+ value.port, value.target)
+ yield {
+ 'value': data,
+ 'name': record.name,
+ 'ttl': record.ttl,
+ 'type': record._type
+ }
+
+ _params_for_TXT = _params_for_multiple
+
+ def _apply_Create(self, zone_id, change):
+ new = change.new
+ params_for = getattr(self, '_params_for_{}'.format(new._type))
+ for params in params_for(new):
+ self._client.zone_record_create(zone_id, params['name'],
+ params['type'], params['value'],
+ params['ttl'])
+
+ def _apply_Update(self, zone_id, change):
+ # It's way simpler to delete-then-recreate than to update
+ self._apply_Delete(zone_id, change)
+ self._apply_Create(zone_id, change)
+
+ def _apply_Delete(self, zone_id, change):
+ existing = change.existing
+ zone = existing.zone
+ for record in self.zone_records(zone):
+ if existing.name == record['name'] and \
+ existing._type == record['type']:
+ self._client.zone_record_delete(zone_id, record['id'])
+
+ def _apply(self, plan):
+ desired = plan.desired
+ changes = plan.changes
+ self.log.debug('_apply: zone=%s, len(changes)=%d', desired.name,
+ len(changes))
+
+ try:
+ zone_id = self.zone_metadata(zone_name=desired.name)['id']
+ except HetznerClientNotFound:
+ self.log.debug('_apply: no matching zone, creating domain')
+ zone_id = self._client.zone_create(desired.name[:-1])['id']
+
+ for change in changes:
+ class_name = change.__class__.__name__
+ getattr(self, '_apply_{}'.format(class_name))(zone_id, change)
+
+ # Clear out the cache if any
+ self._zone_records.pop(desired.name, None)
diff --git a/octodns/provider/mythicbeasts.py b/octodns/provider/mythicbeasts.py
index b255a74..683209d 100644
--- a/octodns/provider/mythicbeasts.py
+++ b/octodns/provider/mythicbeasts.py
@@ -11,6 +11,7 @@ from requests import Session
from logging import getLogger
from ..record import Record
+from . import ProviderException
from .base import BaseProvider
from collections import defaultdict
@@ -34,7 +35,7 @@ def remove_trailing_dot(value):
return value[:-1]
-class MythicBeastsUnauthorizedException(Exception):
+class MythicBeastsUnauthorizedException(ProviderException):
def __init__(self, zone, *args):
self.zone = zone
self.message = 'Mythic Beasts unauthorized for zone: {}'.format(
@@ -45,7 +46,7 @@ class MythicBeastsUnauthorizedException(Exception):
self.message, self.zone, *args)
-class MythicBeastsRecordException(Exception):
+class MythicBeastsRecordException(ProviderException):
def __init__(self, zone, command, *args):
self.zone = zone
self.command = command
@@ -70,13 +71,13 @@ class MythicBeastsProvider(BaseProvider):
...
mythicbeasts:
class: octodns.provider.mythicbeasts.MythicBeastsProvider
- passwords:
- my.domain.: 'password'
+ passwords:
+ my.domain.: 'DNS API v1 password'
zones:
my.domain.:
targets:
- - mythic
+ - mythicbeasts
'''
RE_MX = re.compile(r'^(?P[0-9]+)\s+(?P\S+)$',
diff --git a/octodns/provider/ns1.py b/octodns/provider/ns1.py
index 6cea185..8c09f39 100644
--- a/octodns/provider/ns1.py
+++ b/octodns/provider/ns1.py
@@ -17,10 +17,15 @@ from uuid import uuid4
from six import text_type
from ..record import Record, Update
+from . import ProviderException
from .base import BaseProvider
-class Ns1Exception(Exception):
+def _ensure_endswith_dot(string):
+ return string if string.endswith('.') else '{}.'.format(string)
+
+
+class Ns1Exception(ProviderException):
pass
@@ -76,9 +81,48 @@ class Ns1Client(object):
self._datasource = client.datasource()
self._datafeed = client.datafeed()
+ self.reset_caches()
+
+ def reset_caches(self):
self._datasource_id = None
self._feeds_for_monitors = None
self._monitors_cache = None
+ self._notifylists_cache = None
+ self._zones_cache = {}
+ self._records_cache = {}
+
+ def update_record_cache(func):
+ def call(self, zone, domain, _type, **params):
+ if zone in self._zones_cache:
+ # remove record's zone from cache
+ del self._zones_cache[zone]
+
+ cached = self._records_cache.setdefault(zone, {}) \
+ .setdefault(domain, {})
+
+ if _type in cached:
+ # remove record from cache
+ del cached[_type]
+
+ # write record to cache if its not a delete
+ new_record = func(self, zone, domain, _type, **params)
+ if new_record:
+ cached[_type] = new_record
+
+ return new_record
+
+ return call
+
+ def read_or_set_record_cache(func):
+ def call(self, zone, domain, _type):
+ cached = self._records_cache.setdefault(zone, {}) \
+ .setdefault(domain, {})
+ if _type not in cached:
+ cached[_type] = func(self, zone, domain, _type)
+
+ return cached[_type]
+
+ return call
@property
def datasource_id(self):
@@ -121,6 +165,14 @@ class Ns1Client(object):
{m['id']: m for m in self.monitors_list()}
return self._monitors_cache
+ @property
+ def notifylists(self):
+ if self._notifylists_cache is None:
+ self.log.debug('notifylists: fetching & building')
+ self._notifylists_cache = \
+ {l['name']: l for l in self.notifylists_list()}
+ return self._notifylists_cache
+
def datafeed_create(self, sourceid, name, config):
ret = self._try(self._datafeed.create, sourceid, name, config)
self.feeds_for_monitors[config['jobid']] = ret['id']
@@ -163,31 +215,45 @@ class Ns1Client(object):
return ret
def notifylists_delete(self, nlid):
+ for name, nl in self.notifylists.items():
+ if nl['id'] == nlid:
+ del self._notifylists_cache[name]
+ break
return self._try(self._notifylists.delete, nlid)
def notifylists_create(self, **body):
- return self._try(self._notifylists.create, body)
+ nl = self._try(self._notifylists.create, body)
+ # cache it
+ self.notifylists[nl['name']] = nl
+ return nl
def notifylists_list(self):
return self._try(self._notifylists.list)
+ @update_record_cache
def records_create(self, zone, domain, _type, **params):
return self._try(self._records.create, zone, domain, _type, **params)
+ @update_record_cache
def records_delete(self, zone, domain, _type):
return self._try(self._records.delete, zone, domain, _type)
+ @read_or_set_record_cache
def records_retrieve(self, zone, domain, _type):
return self._try(self._records.retrieve, zone, domain, _type)
+ @update_record_cache
def records_update(self, zone, domain, _type, **params):
return self._try(self._records.update, zone, domain, _type, **params)
def zones_create(self, name):
- return self._try(self._zones.create, name)
+ self._zones_cache[name] = self._try(self._zones.create, name)
+ return self._zones_cache[name]
def zones_retrieve(self, name):
- return self._try(self._zones.retrieve, name)
+ if name not in self._zones_cache:
+ self._zones_cache[name] = self._try(self._zones.retrieve, name)
+ return self._zones_cache[name]
def _try(self, method, *args, **kwargs):
tries = self.retry_count
@@ -216,6 +282,13 @@ class Ns1Provider(BaseProvider):
# Only required if using dynamic records
monitor_regions:
- lga
+ # Optional. Default: false. true is Recommended, but not the default
+ # for backwards compatibility reasons. If true, all NS1 monitors will
+ # use a shared notify list rather than one per record & value
+ # combination. See CHANGELOG,
+ # https://github.com/octodns/octodns/blob/master/CHANGELOG.md, for more
+ # information before enabling this behavior.
+ shared_notifylist: false
# Optional. Default: None. If set, back off in advance to avoid 429s
# from rate-limiting. Generally this should be set to the number
# of processes or workers hitting the API, e.g. the value of
@@ -233,10 +306,12 @@ class Ns1Provider(BaseProvider):
'''
SUPPORTS_GEO = True
SUPPORTS_DYNAMIC = True
+ SUPPORTS_MUTLIVALUE_PTR = True
SUPPORTS = set(('A', 'AAAA', 'ALIAS', 'CAA', 'CNAME', 'MX', 'NAPTR',
- 'NS', 'PTR', 'SPF', 'SRV', 'TXT'))
+ 'NS', 'PTR', 'SPF', 'SRV', 'TXT', 'URLFWD'))
ZONE_NOT_FOUND_MESSAGE = 'server error: zone not found'
+ SHARED_NOTIFYLIST_NAME = 'octoDNS NS1 Notify List'
def _update_filter(self, filter, with_disabled):
if with_disabled:
@@ -341,6 +416,9 @@ class Ns1Provider(BaseProvider):
'ASIAPAC': 'AS',
'EUROPE': 'EU',
'SOUTH-AMERICA': 'SA',
+ # continent NA has been handled as part of Geofence Country filter
+ # starting from v0.9.13. These below US-* just need to continue to
+ # exist here so it doesn't break the ugrade path
'US-CENTRAL': 'NA',
'US-EAST': 'NA',
'US-WEST': 'NA',
@@ -350,8 +428,6 @@ class Ns1Provider(BaseProvider):
'AS': ('ASIAPAC',),
'EU': ('EUROPE',),
'SA': ('SOUTH-AMERICA',),
- # TODO: what about CA, MX, and all the other NA countries?
- 'NA': ('US-CENTRAL', 'US-EAST', 'US-WEST'),
}
# Necessary for handling unsupported continents in _CONTINENT_TO_REGIONS
@@ -359,10 +435,16 @@ class Ns1Provider(BaseProvider):
'OC': {'FJ', 'NC', 'PG', 'SB', 'VU', 'AU', 'NF', 'NZ', 'FM', 'GU',
'KI', 'MH', 'MP', 'NR', 'PW', 'AS', 'CK', 'NU', 'PF', 'PN',
'TK', 'TO', 'TV', 'WF', 'WS'},
+ 'NA': {'DO', 'DM', 'BB', 'BL', 'BM', 'HT', 'KN', 'JM', 'VC', 'HN',
+ 'BS', 'BZ', 'PR', 'NI', 'LC', 'TT', 'VG', 'PA', 'TC', 'PM',
+ 'GT', 'AG', 'GP', 'AI', 'VI', 'CA', 'GD', 'AW', 'CR', 'GL',
+ 'CU', 'MF', 'SV', 'US', 'MQ', 'MS', 'KY', 'MX', 'CW', 'BQ',
+ 'SX', 'UM'}
}
def __init__(self, id, api_key, retry_count=4, monitor_regions=None,
- parallelism=None, client_config=None, *args, **kwargs):
+ parallelism=None, client_config=None, shared_notifylist=False,
+ *args, **kwargs):
self.log = getLogger('Ns1Provider[{}]'.format(id))
self.log.debug('__init__: id=%s, api_key=***, retry_count=%d, '
'monitor_regions=%s, parallelism=%s, client_config=%s',
@@ -370,6 +452,7 @@ class Ns1Provider(BaseProvider):
client_config)
super(Ns1Provider, self).__init__(id, *args, **kwargs)
self.monitor_regions = monitor_regions
+ self.shared_notifylist = shared_notifylist
self._client = Ns1Client(api_key, parallelism, retry_count,
client_config)
@@ -406,7 +489,7 @@ class Ns1Provider(BaseProvider):
for piece in note.split(' '):
try:
k, v = piece.split(':', 1)
- data[k] = v
+ data[k] = v if v != '' else None
except ValueError:
pass
return data
@@ -464,10 +547,10 @@ class Ns1Provider(BaseProvider):
pass
return pool_name
- def _data_for_dynamic_A(self, _type, record):
+ def _data_for_dynamic(self, _type, record):
# First make sure we have the expected filters config
if not self._valid_filter_config(record['filters'], record['domain']):
- self.log.error('_data_for_dynamic_A: %s %s has unsupported '
+ self.log.error('_data_for_dynamic: %s %s has unsupported '
'filters', record['domain'], _type)
raise Ns1Exception('Unrecognized advanced record')
@@ -479,31 +562,45 @@ class Ns1Provider(BaseProvider):
# region.
pools = defaultdict(lambda: {'fallback': None, 'values': []})
for answer in record['answers']:
- # region (group name in the UI) is the pool name
- pool_name = answer['region']
- # Get the actual pool name by removing the type
- pool_name = self._parse_dynamic_pool_name(pool_name)
- pool = pools[pool_name]
-
meta = answer['meta']
+ notes = self._parse_notes(meta.get('note', ''))
+
value = text_type(answer['answer'][0])
- if meta['priority'] == 1:
- # priority 1 means this answer is part of the pools own values
- value_dict = {
- 'value': value,
- 'weight': int(meta.get('weight', 1)),
- }
- # If we have the original pool name and the catchall pool name
- # in the answers, they point at the same pool. Add values only
- # once
- if value_dict not in pool['values']:
- pool['values'].append(value_dict)
+ if notes.get('from', False) == '--default--':
+ # It's a final/default value, record it and move on
+ default.add(value)
+ continue
+
+ # NS1 pool names can be found in notes > v0.9.11, in order to allow
+ # us to find fallback-only pools/values. Before that we used
+ # `region` (group name in the UI) and only paid attention to
+ # priority=1 (first level)
+ notes_pool_name = notes.get('pool', None)
+ if notes_pool_name is None:
+ # < v0.9.11
+ if meta['priority'] != 1:
+ # Ignore all but priority 1
+ continue
+ # And use region's pool name as the pool name
+ pool_name = self._parse_dynamic_pool_name(answer['region'])
else:
- # It's a fallback, we only care about it if it's a
- # final/default
- notes = self._parse_notes(meta.get('note', ''))
- if notes.get('from', False) == '--default--':
- default.add(value)
+ # > v0.9.11, use the notes-based name and consider all values
+ pool_name = notes_pool_name
+
+ pool = pools[pool_name]
+ value_dict = {
+ 'value': value,
+ 'weight': int(meta.get('weight', 1)),
+ }
+ if value_dict not in pool['values']:
+ # If we haven't seen this value before add it to the pool
+ pool['values'].append(value_dict)
+
+ # If there's a fallback recorded in the value for its pool go ahead
+ # and use it, another v0.9.11 thing
+ fallback = notes.get('fallback', None)
+ if fallback is not None:
+ pool['fallback'] = fallback
# The regions objects map to rules, but it's a bit fuzzy since they're
# tied to pools on the NS1 side, e.g. we can only have 1 rule per pool,
@@ -528,55 +625,61 @@ class Ns1Provider(BaseProvider):
rules[rule_order] = rule
# The group notes field in the UI is a `note` on the region here,
- # that's where we can find our pool's fallback.
+ # that's where we can find our pool's fallback in < v0.9.11 anyway
if 'fallback' in notes:
# set the fallback pool name
pools[pool_name]['fallback'] = notes['fallback']
geos = set()
- # continents are mapped (imperfectly) to regions, but what about
- # Canada/North America
for georegion in meta.get('georegion', []):
geos.add(self._REGION_TO_CONTINENT[georegion])
# Countries are easy enough to map, we just have to find their
# continent
#
- # NOTE: Special handling for Oceania
- # NS1 doesn't support Oceania as a region. So the Oceania countries
- # will be present in meta['country']. If all the countries in the
- # Oceania countries list are found, set the region to OC and remove
- # individual oceania country entries
-
- oc_countries = set()
+ # NOTE: Some continents need special handling since NS1
+ # does not supprt them as regions. These are defined under
+ # _CONTINENT_TO_LIST_OF_COUNTRIES. So the countries for these
+ # regions will be present in meta['country']. If all the countries
+ # in _CONTINENT_TO_LIST_OF_COUNTRIES[] list are found,
+ # set the continent as the region and remove individual countries
+
+ special_continents = dict()
for country in meta.get('country', []):
- # country_alpha2_to_continent_code fails for Pitcairn ('PN')
+ # country_alpha2_to_continent_code fails for Pitcairn ('PN'),
+ # United States Minor Outlying Islands ('UM') and
+ # Sint Maarten ('SX')
if country == 'PN':
con = 'OC'
+ elif country in ['SX', 'UM']:
+ con = 'NA'
else:
con = country_alpha2_to_continent_code(country)
- if con == 'OC':
- oc_countries.add(country)
+ if con in self._CONTINENT_TO_LIST_OF_COUNTRIES:
+ special_continents.setdefault(con, set()).add(country)
else:
- # Adding only non-OC countries here to geos
geos.add('{}-{}'.format(con, country))
- if oc_countries:
- if oc_countries == self._CONTINENT_TO_LIST_OF_COUNTRIES['OC']:
- # All OC countries found, so add 'OC' to geos
- geos.add('OC')
+ for continent, countries in special_continents.items():
+ if countries == self._CONTINENT_TO_LIST_OF_COUNTRIES[
+ continent]:
+ # All countries found, so add it to geos
+ geos.add(continent)
else:
- # Partial OC countries found, just add them as-is to geos
- for c in oc_countries:
- geos.add('{}-{}'.format('OC', c))
+ # Partial countries found, so just add them as-is to geos
+ for c in countries:
+ geos.add('{}-{}'.format(continent, c))
- # States are easy too, just assume NA-US (CA providences aren't
- # supported by octoDNS currently)
+ # States and provinces are easy too,
+ # just assume NA-US or NA-CA
for state in meta.get('us_state', []):
geos.add('NA-US-{}'.format(state))
+ for province in meta.get('ca_province', []):
+ geos.add('NA-CA-{}'.format(province))
+
if geos:
# There are geos, combine them with any existing geos for this
# pool and recorded the sorted unique set of them
@@ -588,16 +691,22 @@ class Ns1Provider(BaseProvider):
rules = list(rules.values())
rules.sort(key=lambda r: (r['_order'], r['pool']))
- return {
+ data = {
'dynamic': {
'pools': pools,
'rules': rules,
},
'ttl': record['ttl'],
'type': _type,
- 'values': sorted(default),
}
+ if _type == 'CNAME':
+ data['value'] = default[0]
+ else:
+ data['values'] = default
+
+ return data
+
def _data_for_A(self, _type, record):
if record.get('tier', 1) > 1:
# Advanced record, see if it's first answer has a note
@@ -607,7 +716,7 @@ class Ns1Provider(BaseProvider):
first_answer_note = ''
# If that note includes a `from` (pool name) it's a dynamic record
if 'from:' in first_answer_note:
- return self._data_for_dynamic_A(_type, record)
+ return self._data_for_dynamic(_type, record)
# If not it's an old geo record
return self._data_for_geo_A(_type, record)
@@ -646,6 +755,10 @@ class Ns1Provider(BaseProvider):
}
def _data_for_CNAME(self, _type, record):
+ if record.get('tier', 1) > 1:
+ # Advanced dynamic record
+ return self._data_for_dynamic(_type, record)
+
try:
value = record['short_answers'][0]
except IndexError:
@@ -657,7 +770,6 @@ class Ns1Provider(BaseProvider):
}
_data_for_ALIAS = _data_for_CNAME
- _data_for_PTR = _data_for_CNAME
def _data_for_MX(self, _type, record):
values = []
@@ -696,10 +808,11 @@ class Ns1Provider(BaseProvider):
return {
'ttl': record['ttl'],
'type': _type,
- 'values': [a if a.endswith('.') else '{}.'.format(a)
- for a in record['short_answers']],
+ 'values': record['short_answers'],
}
+ _data_for_PTR = _data_for_NS
+
def _data_for_SRV(self, _type, record):
values = []
for answer in record['short_answers']:
@@ -716,6 +829,23 @@ class Ns1Provider(BaseProvider):
'values': values,
}
+ def _data_for_URLFWD(self, _type, record):
+ values = []
+ for answer in record['short_answers']:
+ path, target, code, masking, query = answer.split(' ', 4)
+ values.append({
+ 'path': path,
+ 'target': target,
+ 'code': code,
+ 'masking': masking,
+ 'query': query,
+ })
+ return {
+ 'ttl': record['ttl'],
+ 'type': _type,
+ 'values': values,
+ }
+
def populate(self, zone, target=False, lenient=False):
self.log.debug('populate: name=%s, target=%s, lenient=%s',
zone.name,
@@ -732,9 +862,10 @@ class Ns1Provider(BaseProvider):
for record in ns1_zone['records']:
if record['type'] in ['ALIAS', 'CNAME', 'MX', 'NS', 'PTR',
'SRV']:
- for i, a in enumerate(record['short_answers']):
- if not a.endswith('.'):
- record['short_answers'][i] = '{}.'.format(a)
+ record['short_answers'] = [
+ _ensure_endswith_dot(a)
+ for a in record['short_answers']
+ ]
if record.get('tier', 1) > 1:
# Need to get the full record data for geo records
@@ -817,11 +948,17 @@ class Ns1Provider(BaseProvider):
for monitor in self._client.monitors.values():
data = self._parse_notes(monitor['notes'])
+ if not data:
+ continue
if expected_host == data['host'] and \
expected_type == data['type']:
# This monitor does not belong to this record
config = monitor['config']
value = config['host']
+ if record._type == 'CNAME':
+ # Append a trailing dot for CNAME records so that
+ # lookup by a CNAME answer works
+ value = value + '.'
monitors[value] = monitor
return monitors
@@ -832,7 +969,6 @@ class Ns1Provider(BaseProvider):
def _feed_create(self, monitor):
monitor_id = monitor['id']
self.log.debug('_feed_create: monitor=%s', monitor_id)
- # TODO: looks like length limit is 64 char
name = '{} - {}'.format(monitor['name'], self._uuid()[:6])
# Create the data feed
@@ -846,22 +982,36 @@ class Ns1Provider(BaseProvider):
return feed_id
+ def _notifylists_find_or_create(self, name):
+ self.log.debug('_notifylists_find_or_create: name="%s"', name)
+ try:
+ nl = self._client.notifylists[name]
+ self.log.debug('_notifylists_find_or_create: existing=%s',
+ nl['id'])
+ except KeyError:
+ notify_list = [{
+ 'config': {
+ 'sourceid': self._client.datasource_id,
+ },
+ 'type': 'datafeed',
+ }]
+ nl = self._client.notifylists_create(name=name,
+ notify_list=notify_list)
+ self.log.debug('_notifylists_find_or_create: created=%s',
+ nl['id'])
+
+ return nl
+
def _monitor_create(self, monitor):
self.log.debug('_monitor_create: monitor="%s"', monitor['name'])
- # Create the notify list
- notify_list = [{
- 'config': {
- 'sourceid': self._client.datasource_id,
- },
- 'type': 'datafeed',
- }]
- nl = self._client.notifylists_create(name=monitor['name'],
- notify_list=notify_list)
- nl_id = nl['id']
- self.log.debug('_monitor_create: notify_list=%s', nl_id)
+
+ # Find the right notifylist
+ nl_name = self.SHARED_NOTIFYLIST_NAME \
+ if self.shared_notifylist else monitor['name']
+ nl = self._notifylists_find_or_create(nl_name)
# Create the monitor
- monitor['notify_list'] = nl_id
+ monitor['notify_list'] = nl['id']
monitor = self._client.monitors_create(**monitor)
monitor_id = monitor['id']
self.log.debug('_monitor_create: monitor=%s', monitor_id)
@@ -872,6 +1022,10 @@ class Ns1Provider(BaseProvider):
host = record.fqdn[:-1]
_type = record._type
+ if _type == 'CNAME':
+ # NS1 does not accept a host value with a trailing dot
+ value = value[:-1]
+
ret = {
'active': True,
'config': {
@@ -894,10 +1048,13 @@ class Ns1Provider(BaseProvider):
'regions': self.monitor_regions,
}
+ if _type == 'AAAA':
+ ret['config']['ipv6'] = True
+
if record.healthcheck_protocol != 'TCP':
# IF it's HTTP we need to send the request string
path = record.healthcheck_path
- host = record.healthcheck_host
+ host = record.healthcheck_host(value=value)
request = r'GET {path} HTTP/1.0\r\nHost: {host}\r\n' \
r'User-agent: NS1\r\n\r\n'.format(path=path, host=host)
ret['config']['send'] = request
@@ -968,7 +1125,13 @@ class Ns1Provider(BaseProvider):
self._client.monitors_delete(monitor_id)
notify_list_id = monitor['notify_list']
- self._client.notifylists_delete(notify_list_id)
+ for nl_name, nl in self._client.notifylists.items():
+ if nl['id'] == notify_list_id:
+ # We've found the that might need deleting
+ if nl['name'] != self.SHARED_NOTIFYLIST_NAME:
+ # It's not shared so is safe to delete
+ self._client.notifylists_delete(notify_list_id)
+ break
def _add_answers_for_pool(self, answers, default_answers, pool_name,
pool_label, pool_answers, pools, priority):
@@ -978,12 +1141,15 @@ class Ns1Provider(BaseProvider):
seen.add(current_pool_name)
pool = pools[current_pool_name]
for answer in pool_answers[current_pool_name]:
+ fallback = pool.data['fallback']
answer = {
'answer': answer['answer'],
'meta': {
'priority': priority,
'note': self._encode_notes({
'from': pool_label,
+ 'pool': current_pool_name,
+ 'fallback': fallback or '',
}),
'up': {
'feed': answer['feed_id'],
@@ -1013,7 +1179,7 @@ class Ns1Provider(BaseProvider):
}
answers.append(answer)
- def _params_for_dynamic_A(self, record):
+ def _params_for_dynamic(self, record):
pools = record.dynamic.pools
# Convert rules to regions
@@ -1035,12 +1201,15 @@ class Ns1Provider(BaseProvider):
country = set()
georegion = set()
us_state = set()
+ ca_province = set()
for geo in rule.data.get('geos', []):
n = len(geo)
if n == 8:
# US state, e.g. NA-US-KY
- us_state.add(geo[-2:])
+ # CA province, e.g. NA-CA-NL
+ us_state.add(geo[-2:]) if "NA-US" in geo \
+ else ca_province.add(geo[-2:])
# For filtering. State filtering is done by the country
# filter
has_country = True
@@ -1073,7 +1242,7 @@ class Ns1Provider(BaseProvider):
'meta': georegion_meta,
}
- if country or us_state:
+ if country or us_state or ca_province:
# If there's country and/or states its a country pool,
# countries and states can coexist as they're handled by the
# same step in the filterchain (countries and georegions
@@ -1084,11 +1253,12 @@ class Ns1Provider(BaseProvider):
country_state_meta['country'] = sorted(country)
if us_state:
country_state_meta['us_state'] = sorted(us_state)
+ if ca_province:
+ country_state_meta['ca_province'] = sorted(ca_province)
regions['{}__country'.format(pool_name)] = {
'meta': country_state_meta,
}
-
- if not georegion and not country and not us_state:
+ elif not georegion:
# If there's no targeting it's a catchall
regions['{}__catchall'.format(pool_name)] = {
'meta': meta,
@@ -1099,25 +1269,35 @@ class Ns1Provider(BaseProvider):
# Build a list of primary values for each pool, including their
# feed_id (monitor)
+ value_feed = dict()
pool_answers = defaultdict(list)
for pool_name, pool in sorted(pools.items()):
for value in pool.data['values']:
weight = value['weight']
value = value['value']
- existing = existing_monitors.get(value)
- monitor_id, feed_id = self._monitor_sync(record, value,
- existing)
- active_monitors.add(monitor_id)
+ feed_id = value_feed.get(value)
+ # check for identical monitor and skip creating one if found
+ if not feed_id:
+ existing = existing_monitors.get(value)
+ monitor_id, feed_id = self._monitor_sync(record, value,
+ existing)
+ value_feed[value] = feed_id
+ active_monitors.add(monitor_id)
+
pool_answers[pool_name].append({
'answer': [value],
'weight': weight,
'feed_id': feed_id,
})
+ if record._type == 'CNAME':
+ default_values = [record.value]
+ else:
+ default_values = record.values
default_answers = [{
'answer': [v],
'weight': 1,
- } for v in record.values]
+ } for v in default_values]
# Build our list of answers
# The regions dictionary built above already has the required pool
@@ -1146,7 +1326,7 @@ class Ns1Provider(BaseProvider):
def _params_for_A(self, record):
if getattr(record, 'dynamic', False):
- return self._params_for_dynamic_A(record)
+ return self._params_for_dynamic(record)
elif hasattr(record, 'geo'):
return self._params_for_geo_A(record)
@@ -1171,12 +1351,13 @@ class Ns1Provider(BaseProvider):
values = [(v.flags, v.tag, v.value) for v in record.values]
return {'answers': values, 'ttl': record.ttl}, None
- # TODO: dynamic CNAME support
def _params_for_CNAME(self, record):
+ if getattr(record, 'dynamic', False):
+ return self._params_for_dynamic(record)
+
return {'answers': [record.value], 'ttl': record.ttl}, None
_params_for_ALIAS = _params_for_CNAME
- _params_for_PTR = _params_for_CNAME
def _params_for_MX(self, record):
values = [(v.preference, v.exchange) for v in record.values]
@@ -1187,11 +1368,22 @@ class Ns1Provider(BaseProvider):
v.replacement) for v in record.values]
return {'answers': values, 'ttl': record.ttl}, None
+ def _params_for_PTR(self, record):
+ return {
+ 'answers': record.values,
+ 'ttl': record.ttl,
+ }, None
+
def _params_for_SRV(self, record):
values = [(v.priority, v.weight, v.port, v.target)
for v in record.values]
return {'answers': values, 'ttl': record.ttl}, None
+ def _params_for_URLFWD(self, record):
+ values = [(v.path, v.target, v.code, v.masking, v.query)
+ for v in record.values]
+ return {'answers': values, 'ttl': record.ttl}, None
+
def _get_ns1_filters(self, ns1_zone_name):
ns1_filters = {}
ns1_zone = {}
@@ -1250,8 +1442,7 @@ class Ns1Provider(BaseProvider):
extra.append(Update(record, record))
continue
- for have in self._monitors_for(record).values():
- value = have['config']['host']
+ for value, have in self._monitors_for(record).items():
expected = self._monitor_gen(record, value)
# TODO: find values which have missing monitors
if not self._monitor_is_match(expected, have):
@@ -1285,7 +1476,9 @@ class Ns1Provider(BaseProvider):
params, active_monitor_ids = \
getattr(self, '_params_for_{}'.format(_type))(new)
self._client.records_update(zone, domain, _type, **params)
- self._monitors_gc(new, active_monitor_ids)
+ # If we're cleaning up we need to send in the old record since it'd
+ # have anything that needs cleaning up
+ self._monitors_gc(change.existing, active_monitor_ids)
def _apply_Delete(self, ns1_zone, change):
existing = change.existing
diff --git a/octodns/provider/ovh.py b/octodns/provider/ovh.py
index 54f62ac..9f7cd9a 100644
--- a/octodns/provider/ovh.py
+++ b/octodns/provider/ovh.py
@@ -370,11 +370,15 @@ class OvhProvider(BaseProvider):
@staticmethod
def _is_valid_dkim_key(key):
+ result = True
+ base64_decode = getattr(base64, 'decodestring', None)
+ base64_decode = getattr(base64, 'decodebytes', base64_decode)
+
try:
- base64.decodestring(bytearray(key, 'utf-8'))
+ result = base64_decode(bytearray(key, 'utf-8'))
except binascii.Error:
- return False
- return True
+ result = False
+ return result
def get_records(self, zone_name):
"""
diff --git a/octodns/provider/plan.py b/octodns/provider/plan.py
index af6863a..69bd2b2 100644
--- a/octodns/provider/plan.py
+++ b/octodns/provider/plan.py
@@ -50,7 +50,7 @@ class Plan(object):
except AttributeError:
existing_n = 0
- self.log.debug('__init__: Creates=%d, Updates=%d, Deletes=%d'
+ self.log.debug('__init__: Creates=%d, Updates=%d, Deletes=%d '
'Existing=%d',
self.change_counts['Create'],
self.change_counts['Update'],
diff --git a/octodns/provider/powerdns.py b/octodns/provider/powerdns.py
index bcb6980..0e4a5d9 100644
--- a/octodns/provider/powerdns.py
+++ b/octodns/provider/powerdns.py
@@ -6,6 +6,7 @@ from __future__ import absolute_import, division, print_function, \
unicode_literals
from requests import HTTPError, Session
+from operator import itemgetter
import logging
from ..record import Create, Record
@@ -15,8 +16,8 @@ from .base import BaseProvider
class PowerDnsBaseProvider(BaseProvider):
SUPPORTS_GEO = False
SUPPORTS_DYNAMIC = False
- SUPPORTS = set(('A', 'AAAA', 'ALIAS', 'CAA', 'CNAME', 'MX', 'NAPTR', 'NS',
- 'PTR', 'SPF', 'SSHFP', 'SRV', 'TXT'))
+ SUPPORTS = set(('A', 'AAAA', 'ALIAS', 'CAA', 'CNAME', 'LOC', 'MX', 'NAPTR',
+ 'NS', 'PTR', 'SPF', 'SSHFP', 'SRV', 'TXT'))
TIMEOUT = 5
def __init__(self, id, host, api_key, port=8081,
@@ -102,6 +103,33 @@ class PowerDnsBaseProvider(BaseProvider):
_data_for_SPF = _data_for_quoted
_data_for_TXT = _data_for_quoted
+ def _data_for_LOC(self, rrset):
+ values = []
+ for record in rrset['records']:
+ lat_degrees, lat_minutes, lat_seconds, lat_direction, \
+ long_degrees, long_minutes, long_seconds, long_direction, \
+ altitude, size, precision_horz, precision_vert = \
+ record['content'].replace('m', '').split(' ', 11)
+ values.append({
+ 'lat_degrees': int(lat_degrees),
+ 'lat_minutes': int(lat_minutes),
+ 'lat_seconds': float(lat_seconds),
+ 'lat_direction': lat_direction,
+ 'long_degrees': int(long_degrees),
+ 'long_minutes': int(long_minutes),
+ 'long_seconds': float(long_seconds),
+ 'long_direction': long_direction,
+ 'altitude': float(altitude),
+ 'size': float(size),
+ 'precision_horz': float(precision_horz),
+ 'precision_vert': float(precision_vert),
+ })
+ return {
+ 'ttl': rrset['ttl'],
+ 'type': rrset['type'],
+ 'values': values
+ }
+
def _data_for_MX(self, rrset):
values = []
for record in rrset['records']:
@@ -183,7 +211,10 @@ class PowerDnsBaseProvider(BaseProvider):
version = resp.json()['version']
self.log.debug('powerdns_version: got version %s from server',
version)
- self._powerdns_version = [int(p) for p in version.split('.')]
+ # The extra `-` split is to handle pre-release and source built
+ # versions like 4.5.0-alpha0.435.master.gcb114252b
+ self._powerdns_version = [
+ int(p.split('-')[0]) for p in version.split('.')[:3]]
return self._powerdns_version
@@ -282,6 +313,27 @@ class PowerDnsBaseProvider(BaseProvider):
_records_for_SPF = _records_for_quoted
_records_for_TXT = _records_for_quoted
+ def _records_for_LOC(self, record):
+ return [{
+ 'content':
+ '%d %d %0.3f %s %d %d %.3f %s %0.2fm %0.2fm %0.2fm %0.2fm' %
+ (
+ int(v.lat_degrees),
+ int(v.lat_minutes),
+ float(v.lat_seconds),
+ v.lat_direction,
+ int(v.long_degrees),
+ int(v.long_minutes),
+ float(v.long_seconds),
+ v.long_direction,
+ float(v.altitude),
+ float(v.size),
+ float(v.precision_horz),
+ float(v.precision_vert)
+ ),
+ 'disabled': False
+ } for v in record.values]
+
def _records_for_MX(self, record):
return [{
'content': '{} {}'.format(v.preference, v.exchange),
@@ -378,6 +430,12 @@ class PowerDnsBaseProvider(BaseProvider):
for change in changes:
class_name = change.__class__.__name__
mods.append(getattr(self, '_mod_{}'.format(class_name))(change))
+
+ # Ensure that any DELETE modifications always occur before any REPLACE
+ # modifications. This ensures that an A record can be replaced by a
+ # CNAME record and vice-versa.
+ mods.sort(key=itemgetter('changetype'))
+
self.log.debug('_apply: sending change request')
try:
diff --git a/octodns/provider/route53.py b/octodns/provider/route53.py
index 0d5bab9..0ef60bc 100644
--- a/octodns/provider/route53.py
+++ b/octodns/provider/route53.py
@@ -19,6 +19,7 @@ from six import text_type
from ..equality import EqualityTupleMixin
from ..record import Record, Update
from ..record.geo import GeoCodes
+from . import ProviderException
from .base import BaseProvider
octal_re = re.compile(r'\\(\d\d\d)')
@@ -512,7 +513,7 @@ class _Route53GeoRecord(_Route53Record):
self.values)
-class Route53ProviderException(Exception):
+class Route53ProviderException(ProviderException):
pass
@@ -924,6 +925,43 @@ class Route53Provider(BaseProvider):
return data
+ def _process_desired_zone(self, desired):
+ for record in desired.records:
+ if getattr(record, 'dynamic', False):
+ # Make a copy of the record in case we have to muck with it
+ dynamic = record.dynamic
+ rules = []
+ for i, rule in enumerate(dynamic.rules):
+ geos = rule.data.get('geos', [])
+ if not geos:
+ rules.append(rule)
+ continue
+ filtered_geos = [g for g in geos
+ if not g.startswith('NA-CA-')]
+ if not filtered_geos:
+ # We've removed all geos, we'll have to skip this rule
+ msg = 'NA-CA-* not supported for {}' \
+ .format(record.fqdn)
+ fallback = 'skipping rule {}'.format(i)
+ self.supports_warn_or_except(msg, fallback)
+ continue
+ elif geos != filtered_geos:
+ msg = 'NA-CA-* not supported for {}' \
+ .format(record.fqdn)
+ fallback = 'filtering rule {} from ({}) to ({})' \
+ .format(i, ', '.join(geos),
+ ', '.join(filtered_geos))
+ self.supports_warn_or_except(msg, fallback)
+ rule.data['geos'] = filtered_geos
+ rules.append(rule)
+
+ if rules != dynamic.rules:
+ record = record.copy()
+ record.dynamic.rules = rules
+ desired.add_record(record, replace=True)
+
+ return super(Route53Provider, self)._process_desired_zone(desired)
+
def populate(self, zone, target=False, lenient=False):
self.log.debug('populate: name=%s, target=%s, lenient=%s', zone.name,
target, lenient)
@@ -1051,10 +1089,11 @@ class Route53Provider(BaseProvider):
health_check, value=None):
config = health_check['HealthCheckConfig']
- # So interestingly Route53 normalizes IPAddress which will cause us to
- # fail to find see things as equivalent. To work around this we'll
- # ip_address's returned object for equivalence
- # E.g 2001:4860:4860::8842 -> 2001:4860:4860:0:0:0:0:8842
+ # So interestingly Route53 normalizes IPv6 addresses to a funky, but
+ # valid, form which will cause us to fail to find see things as
+ # equivalent. To work around this we'll ip_address's returned objects
+ # for equivalence.
+ # E.g 2001:4860:4860:0:0:0:0:8842 -> 2001:4860:4860::8842
if value:
value = ip_address(text_type(value))
config_ip_address = ip_address(text_type(config['IPAddress']))
@@ -1084,7 +1123,7 @@ class Route53Provider(BaseProvider):
try:
ip_address(text_type(value))
# We're working with an IP, host is the Host header
- healthcheck_host = record.healthcheck_host
+ healthcheck_host = record.healthcheck_host(value=value)
except (AddressValueError, ValueError):
# This isn't an IP, host is the value, value should be None
healthcheck_host = value
@@ -1253,7 +1292,12 @@ class Route53Provider(BaseProvider):
return self._gen_mods('DELETE', existing_records, existing_rrsets)
def _extra_changes_update_needed(self, record, rrset):
- healthcheck_host = record.healthcheck_host
+ if record._type == 'CNAME':
+ # For CNAME, healthcheck host by default points to the CNAME value
+ healthcheck_host = rrset['ResourceRecords'][0]['Value']
+ else:
+ healthcheck_host = record.healthcheck_host()
+
healthcheck_path = record.healthcheck_path
healthcheck_protocol = record.healthcheck_protocol
healthcheck_port = record.healthcheck_port
diff --git a/octodns/provider/selectel.py b/octodns/provider/selectel.py
index b9a99aa..87f8d4f 100644
--- a/octodns/provider/selectel.py
+++ b/octodns/provider/selectel.py
@@ -12,6 +12,7 @@ from logging import getLogger
from requests import Session
from ..record import Record, Update
+from . import ProviderException
from .base import BaseProvider
@@ -20,7 +21,7 @@ def escape_semicolon(s):
return s.replace(';', '\\;')
-class SelectelAuthenticationRequired(Exception):
+class SelectelAuthenticationRequired(ProviderException):
def __init__(self, msg):
message = 'Authorization failed. Invalid or empty token.'
super(SelectelAuthenticationRequired, self).__init__(message)
diff --git a/octodns/provider/transip.py b/octodns/provider/transip.py
index 7458e36..2bdedc4 100644
--- a/octodns/provider/transip.py
+++ b/octodns/provider/transip.py
@@ -8,6 +8,7 @@ from __future__ import absolute_import, division, print_function, \
from suds import WebFault
from collections import defaultdict
+from . import ProviderException
from .base import BaseProvider
from logging import getLogger
from ..record import Record
@@ -15,7 +16,7 @@ from transip.service.domain import DomainService
from transip.service.objects import DnsEntry
-class TransipException(Exception):
+class TransipException(ProviderException):
pass
@@ -49,8 +50,8 @@ class TransipProvider(BaseProvider):
'''
SUPPORTS_GEO = False
SUPPORTS_DYNAMIC = False
- SUPPORTS = set(
- ('A', 'AAAA', 'CNAME', 'MX', 'SRV', 'SPF', 'TXT', 'SSHFP', 'CAA'))
+ SUPPORTS = set(('A', 'AAAA', 'CNAME', 'MX', 'NS', 'SRV', 'SPF', 'TXT',
+ 'SSHFP', 'CAA'))
# unsupported by OctoDNS: 'TLSA'
MIN_TTL = 120
TIMEOUT = 15
diff --git a/octodns/provider/ultra.py b/octodns/provider/ultra.py
index eb10e0d..e382a33 100644
--- a/octodns/provider/ultra.py
+++ b/octodns/provider/ultra.py
@@ -1,13 +1,13 @@
from collections import defaultdict
-from ipaddress import ip_address
from logging import getLogger
from requests import Session
from ..record import Record
+from . import ProviderException
from .base import BaseProvider
-class UltraClientException(Exception):
+class UltraClientException(ProviderException):
'''
Base Ultra exception type
'''
@@ -36,12 +36,12 @@ class UltraProvider(BaseProvider):
'''
Neustar UltraDNS provider
- Documentation for Ultra REST API requires a login:
- https://portal.ultradns.com/static/docs/REST-API_User_Guide.pdf
- Implemented to the May 20, 2020 version of the document (dated on page ii)
- Also described as Version 2.83.0 (title page)
+ Documentation for Ultra REST API:
+ https://ultra-portalstatic.ultradns.com/static/docs/REST-API_User_Guide.pdf
+ Implemented to the May 26, 2021 version of the document (dated on page ii)
+ Also described as Version 3.18.0 (title page)
- Tested against 3.0.0-20200627220036.81047f5
+ Tested against 3.20.1-20210521075351.36b9297
As determined by querying https://api.ultradns.com/version
ultra:
@@ -57,6 +57,7 @@ class UltraProvider(BaseProvider):
RECORDS_TO_TYPE = {
'A (1)': 'A',
'AAAA (28)': 'AAAA',
+ 'APEXALIAS (65282)': 'ALIAS',
'CAA (257)': 'CAA',
'CNAME (5)': 'CNAME',
'MX (15)': 'MX',
@@ -72,6 +73,7 @@ class UltraProvider(BaseProvider):
SUPPORTS_GEO = False
SUPPORTS_DYNAMIC = False
TIMEOUT = 5
+ ZONE_REQUEST_LIMIT = 100
def _request(self, method, path, params=None,
data=None, json=None, json_response=True):
@@ -151,7 +153,7 @@ class UltraProvider(BaseProvider):
def zones(self):
if self._zones is None:
offset = 0
- limit = 100
+ limit = self.ZONE_REQUEST_LIMIT
zones = []
paging = True
while paging:
@@ -194,8 +196,6 @@ class UltraProvider(BaseProvider):
}
def _data_for_AAAA(self, _type, records):
- for i, v in enumerate(records['rdata']):
- records['rdata'][i] = str(ip_address(v))
return {
'ttl': records['ttl'],
'type': _type,
@@ -211,6 +211,7 @@ class UltraProvider(BaseProvider):
_data_for_PTR = _data_for_single
_data_for_CNAME = _data_for_single
+ _data_for_ALIAS = _data_for_single
def _data_for_CAA(self, _type, records):
return {
@@ -287,7 +288,13 @@ class UltraProvider(BaseProvider):
name = zone.hostname_from_fqdn(record['ownerName'])
if record['rrtype'] == 'SOA (6)':
continue
- _type = self.RECORDS_TO_TYPE[record['rrtype']]
+ try:
+ _type = self.RECORDS_TO_TYPE[record['rrtype']]
+ except KeyError:
+ self.log.warning('populate: ignoring record with '
+ 'unsupported rrtype, %s %s',
+ name, record['rrtype'])
+ continue
values[name][_type] = record
for name, types in values.items():
@@ -368,6 +375,7 @@ class UltraProvider(BaseProvider):
}
_contents_for_PTR = _contents_for_CNAME
+ _contents_for_ALIAS = _contents_for_CNAME
def _contents_for_SRV(self, record):
return {
@@ -395,8 +403,15 @@ class UltraProvider(BaseProvider):
def _gen_data(self, record):
zone_name = self._remove_prefix(record.fqdn, record.name + '.')
+
+ # UltraDNS treats the `APEXALIAS` type as the octodns `ALIAS`.
+ if record._type == "ALIAS":
+ record_type = "APEXALIAS"
+ else:
+ record_type = record._type
+
path = '/v2/zones/{}/rrsets/{}/{}'.format(zone_name,
- record._type,
+ record_type,
record.fqdn)
contents_for = getattr(self, '_contents_for_{}'.format(record._type))
return path, contents_for(record)
@@ -438,7 +453,13 @@ class UltraProvider(BaseProvider):
existing._type == self.RECORDS_TO_TYPE[record['rrtype']]:
zone_name = self._remove_prefix(existing.fqdn,
existing.name + '.')
+
+ # UltraDNS treats the `APEXALIAS` type as the octodns `ALIAS`.
+ existing_type = existing._type
+ if existing_type == "ALIAS":
+ existing_type = "APEXALIAS"
+
path = '/v2/zones/{}/rrsets/{}/{}'.format(zone_name,
- existing._type,
+ existing_type,
existing.fqdn)
self._delete(path, json_response=False)
diff --git a/octodns/provider/yaml.py b/octodns/provider/yaml.py
index 10add5a..803bbd4 100644
--- a/octodns/provider/yaml.py
+++ b/octodns/provider/yaml.py
@@ -104,8 +104,10 @@ class YamlProvider(BaseProvider):
'''
SUPPORTS_GEO = True
SUPPORTS_DYNAMIC = True
- SUPPORTS = set(('A', 'AAAA', 'ALIAS', 'CAA', 'CNAME', 'MX', 'NAPTR', 'NS',
- 'PTR', 'SSHFP', 'SPF', 'SRV', 'TXT'))
+ SUPPORTS_MUTLIVALUE_PTR = True
+ SUPPORTS = set(('A', 'AAAA', 'ALIAS', 'CAA', 'CNAME', 'DNAME', 'LOC', 'MX',
+ 'NAPTR', 'NS', 'PTR', 'SSHFP', 'SPF', 'SRV', 'TXT',
+ 'URLFWD'))
def __init__(self, id, directory, default_ttl=3600, enforce_order=True,
populate_should_replace=False, *args, **kwargs):
@@ -239,11 +241,13 @@ class SplitYamlProvider(YamlProvider):
# instead of a file matching the record name.
CATCHALL_RECORD_NAMES = ('*', '')
- def __init__(self, id, directory, *args, **kwargs):
+ def __init__(self, id, directory, extension='.', *args, **kwargs):
super(SplitYamlProvider, self).__init__(id, directory, *args, **kwargs)
+ self.extension = extension
def _zone_directory(self, zone):
- return join(self.directory, zone.name)
+ filename = '{}{}'.format(zone.name[:-1], self.extension)
+ return join(self.directory, filename)
def populate(self, zone, target=False, lenient=False):
self.log.debug('populate: name=%s, target=%s, lenient=%s', zone.name,
diff --git a/octodns/record/__init__.py b/octodns/record/__init__.py
index 849e035..a8dd834 100644
--- a/octodns/record/__init__.py
+++ b/octodns/record/__init__.py
@@ -10,6 +10,7 @@ from logging import getLogger
import re
from six import string_types, text_type
+from fqdn import FQDN
from ..equality import EqualityTupleMixin
from .geo import GeoCodes
@@ -95,6 +96,8 @@ class Record(EqualityTupleMixin):
'ALIAS': AliasRecord,
'CAA': CaaRecord,
'CNAME': CnameRecord,
+ 'DNAME': DnameRecord,
+ 'LOC': LocRecord,
'MX': MxRecord,
'NAPTR': NaptrRecord,
'NS': NsRecord,
@@ -103,6 +106,7 @@ class Record(EqualityTupleMixin):
'SRV': SrvRecord,
'SSHFP': SshfpRecord,
'TXT': TxtRecord,
+ 'URLFWD': UrlfwdRecord,
}[_type]
except KeyError:
raise Exception('Unknown record type: "{}"'.format(_type))
@@ -125,10 +129,11 @@ class Record(EqualityTupleMixin):
if n > 253:
reasons.append('invalid fqdn, "{}" is too long at {} chars, max '
'is 253'.format(fqdn, n))
- n = len(name)
- if n > 63:
- reasons.append('invalid name, "{}" is too long at {} chars, max '
- 'is 63'.format(name, n))
+ for label in name.split('.'):
+ n = len(label)
+ if n > 63:
+ reasons.append('invalid label, "{}" is too long at {} chars, '
+ 'max is 63'.format(label, n))
try:
ttl = int(data['ttl'])
if ttl < 0:
@@ -179,15 +184,11 @@ class Record(EqualityTupleMixin):
def included(self):
return self._octodns.get('included', [])
- @property
- def healthcheck_host(self):
+ def healthcheck_host(self, value=None):
healthcheck = self._octodns.get('healthcheck', {})
if healthcheck.get('protocol', None) == 'TCP':
return None
- try:
- return healthcheck['host']
- except KeyError:
- return self.fqdn[:-1]
+ return healthcheck.get('host', self.fqdn[:-1]) or value
@property
def healthcheck_path(self):
@@ -218,6 +219,18 @@ class Record(EqualityTupleMixin):
if self.ttl != other.ttl:
return Update(self, other)
+ def copy(self, zone=None):
+ data = self.data
+ data['type'] = self._type
+
+ return Record.new(
+ zone if zone else self.zone,
+ self.name,
+ data,
+ self.source,
+ lenient=True
+ )
+
# NOTE: we're using __hash__ and ordering methods that consider Records
# equivalent if they have the same name & _type. Values are ignored. This
# is useful when computing diffs/changes.
@@ -401,6 +414,7 @@ class _ValueMixin(object):
class _DynamicPool(object):
+ log = getLogger('_DynamicPool')
def __init__(self, _id, data):
self._id = _id
@@ -413,6 +427,15 @@ class _DynamicPool(object):
]
values.sort(key=lambda d: d['value'])
+ # normalize weight of a single-value pool
+ if len(values) == 1:
+ weight = data['values'][0].get('weight', 1)
+ if weight != 1:
+ self.log.warn(
+ 'Using weight=1 instead of %s for single-value pool %s',
+ weight, _id)
+ values[0]['weight'] = 1
+
fallback = data.get('fallback', None)
self.data = {
'fallback': fallback if fallback != 'default' else None,
@@ -515,6 +538,7 @@ class _DynamicMixin(object):
pools_exist = set()
pools_seen = set()
+ pools_seen_as_fallback = set()
if not isinstance(pools, dict):
reasons.append('pools must be a dict')
elif not pools:
@@ -556,10 +580,17 @@ class _DynamicMixin(object):
reasons.append('missing value in pool "{}" '
'value {}'.format(_id, value_num))
+ if len(values) == 1 and values[0].get('weight', 1) != 1:
+ reasons.append('pool "{}" has single value with '
+ 'weight!=1'.format(_id))
+
fallback = pool.get('fallback', None)
- if fallback is not None and fallback not in pools:
- reasons.append('undefined fallback "{}" for pool "{}"'
- .format(fallback, _id))
+ if fallback is not None:
+ if fallback in pools:
+ pools_seen_as_fallback.add(fallback)
+ else:
+ reasons.append('undefined fallback "{}" for pool "{}"'
+ .format(fallback, _id))
# Check for loops
fallback = pools[_id].get('fallback', None)
@@ -587,7 +618,6 @@ class _DynamicMixin(object):
else:
seen_default = False
- # TODO: don't allow 'default' as a pool name, reserved
for i, rule in enumerate(rules):
rule_num = i + 1
try:
@@ -608,7 +638,6 @@ class _DynamicMixin(object):
if pool not in pools:
reasons.append('rule {} undefined pool "{}"'
.format(rule_num, pool))
- pools_seen.add(pool)
elif pool in pools_seen and geos:
reasons.append('rule {} invalid, target pool "{}" '
'reused'.format(rule_num, pool))
@@ -628,7 +657,7 @@ class _DynamicMixin(object):
reasons.extend(GeoCodes.validate(geo, 'rule {} '
.format(rule_num)))
- unused = pools_exist - pools_seen
+ unused = pools_exist - pools_seen - pools_seen_as_fallback
if unused:
unused = '", "'.join(sorted(unused))
reasons.append('unused pools: "{}"'.format(unused))
@@ -720,8 +749,13 @@ class _IpList(object):
@classmethod
def process(cls, values):
- # Translating None into '' so that the list will be sortable in python3
- return [v if v is not None else '' for v in values]
+ # Translating None into '' so that the list will be sortable in
+ # python3, get everything to str first
+ values = [text_type(v) if v is not None else '' for v in values]
+ # Now round trip all non-'' through the address type and back to a str
+ # to normalize the address representation.
+ return [text_type(cls._address_type(v)) if v != '' else ''
+ for v in values]
class Ipv4List(_IpList):
@@ -743,6 +777,11 @@ class _TargetValue(object):
reasons.append('empty value')
elif not data:
reasons.append('missing value')
+ # NOTE: FQDN complains if the data it receives isn't a str, it doesn't
+ # allow unicode... This is likely specific to 2.7
+ elif not FQDN(str(data), allow_underscores=True).is_valid:
+ reasons.append('{} value "{}" is not a valid FQDN'
+ .format(_type, data))
elif not data.endswith('.'):
reasons.append('{} value "{}" missing trailing .'
.format(_type, data))
@@ -759,6 +798,10 @@ class CnameValue(_TargetValue):
pass
+class DnameValue(_TargetValue):
+ pass
+
+
class ARecord(_DynamicMixin, _GeoMixin, Record):
_type = 'A'
_value_type = Ipv4List
@@ -777,6 +820,14 @@ class AliasRecord(_ValueMixin, Record):
_type = 'ALIAS'
_value_type = AliasValue
+ @classmethod
+ def validate(cls, name, fqdn, data):
+ reasons = []
+ if name != '':
+ reasons.append('non-root ALIAS not allowed')
+ reasons.extend(super(AliasRecord, cls).validate(name, fqdn, data))
+ return reasons
+
class CaaValue(EqualityTupleMixin):
# https://tools.ietf.org/html/rfc6844#page-5
@@ -842,6 +893,200 @@ class CnameRecord(_DynamicMixin, _ValueMixin, Record):
return reasons
+class DnameRecord(_DynamicMixin, _ValueMixin, Record):
+ _type = 'DNAME'
+ _value_type = DnameValue
+
+
+class LocValue(EqualityTupleMixin):
+ # TODO: work out how to do defaults per RFC
+
+ @classmethod
+ def validate(cls, data, _type):
+ int_keys = [
+ 'lat_degrees',
+ 'lat_minutes',
+ 'long_degrees',
+ 'long_minutes',
+ ]
+
+ float_keys = [
+ 'lat_seconds',
+ 'long_seconds',
+ 'altitude',
+ 'size',
+ 'precision_horz',
+ 'precision_vert',
+ ]
+
+ direction_keys = [
+ 'lat_direction',
+ 'long_direction',
+ ]
+
+ if not isinstance(data, (list, tuple)):
+ data = (data,)
+ reasons = []
+ for value in data:
+ for key in int_keys:
+ try:
+ int(value[key])
+ if (
+ (
+ key == 'lat_degrees' and
+ not 0 <= int(value[key]) <= 90
+ ) or (
+ key == 'long_degrees' and
+ not 0 <= int(value[key]) <= 180
+ ) or (
+ key in ['lat_minutes', 'long_minutes'] and
+ not 0 <= int(value[key]) <= 59
+ )
+ ):
+ reasons.append('invalid value for {} "{}"'
+ .format(key, value[key]))
+ except KeyError:
+ reasons.append('missing {}'.format(key))
+ except ValueError:
+ reasons.append('invalid {} "{}"'
+ .format(key, value[key]))
+
+ for key in float_keys:
+ try:
+ float(value[key])
+ if (
+ (
+ key in ['lat_seconds', 'long_seconds'] and
+ not 0 <= float(value[key]) <= 59.999
+ ) or (
+ key == 'altitude' and
+ not -100000.00 <= float(value[key]) <= 42849672.95
+ ) or (
+ key in ['size',
+ 'precision_horz',
+ 'precision_vert'] and
+ not 0 <= float(value[key]) <= 90000000.00
+ )
+ ):
+ reasons.append('invalid value for {} "{}"'
+ .format(key, value[key]))
+ except KeyError:
+ reasons.append('missing {}'.format(key))
+ except ValueError:
+ reasons.append('invalid {} "{}"'
+ .format(key, value[key]))
+
+ for key in direction_keys:
+ try:
+ str(value[key])
+ if (
+ key == 'lat_direction' and
+ value[key] not in ['N', 'S']
+ ):
+ reasons.append('invalid direction for {} "{}"'
+ .format(key, value[key]))
+ if (
+ key == 'long_direction' and
+ value[key] not in ['E', 'W']
+ ):
+ reasons.append('invalid direction for {} "{}"'
+ .format(key, value[key]))
+ except KeyError:
+ reasons.append('missing {}'.format(key))
+ return reasons
+
+ @classmethod
+ def process(cls, values):
+ return [LocValue(v) for v in values]
+
+ def __init__(self, value):
+ self.lat_degrees = int(value['lat_degrees'])
+ self.lat_minutes = int(value['lat_minutes'])
+ self.lat_seconds = float(value['lat_seconds'])
+ self.lat_direction = value['lat_direction'].upper()
+ self.long_degrees = int(value['long_degrees'])
+ self.long_minutes = int(value['long_minutes'])
+ self.long_seconds = float(value['long_seconds'])
+ self.long_direction = value['long_direction'].upper()
+ self.altitude = float(value['altitude'])
+ self.size = float(value['size'])
+ self.precision_horz = float(value['precision_horz'])
+ self.precision_vert = float(value['precision_vert'])
+
+ @property
+ def data(self):
+ return {
+ 'lat_degrees': self.lat_degrees,
+ 'lat_minutes': self.lat_minutes,
+ 'lat_seconds': self.lat_seconds,
+ 'lat_direction': self.lat_direction,
+ 'long_degrees': self.long_degrees,
+ 'long_minutes': self.long_minutes,
+ 'long_seconds': self.long_seconds,
+ 'long_direction': self.long_direction,
+ 'altitude': self.altitude,
+ 'size': self.size,
+ 'precision_horz': self.precision_horz,
+ 'precision_vert': self.precision_vert,
+ }
+
+ def __hash__(self):
+ return hash((
+ self.lat_degrees,
+ self.lat_minutes,
+ self.lat_seconds,
+ self.lat_direction,
+ self.long_degrees,
+ self.long_minutes,
+ self.long_seconds,
+ self.long_direction,
+ self.altitude,
+ self.size,
+ self.precision_horz,
+ self.precision_vert,
+ ))
+
+ def _equality_tuple(self):
+ return (
+ self.lat_degrees,
+ self.lat_minutes,
+ self.lat_seconds,
+ self.lat_direction,
+ self.long_degrees,
+ self.long_minutes,
+ self.long_seconds,
+ self.long_direction,
+ self.altitude,
+ self.size,
+ self.precision_horz,
+ self.precision_vert,
+ )
+
+ def __repr__(self):
+ loc_format = "'{0} {1} {2:.3f} {3} " + \
+ "{4} {5} {6:.3f} {7} " + \
+ "{8:.2f}m {9:.2f}m {10:.2f}m {11:.2f}m'"
+ return loc_format.format(
+ self.lat_degrees,
+ self.lat_minutes,
+ self.lat_seconds,
+ self.lat_direction,
+ self.long_degrees,
+ self.long_minutes,
+ self.long_seconds,
+ self.long_direction,
+ self.altitude,
+ self.size,
+ self.precision_horz,
+ self.precision_vert,
+ )
+
+
+class LocRecord(_ValuesMixin, Record):
+ _type = 'LOC'
+ _value_type = LocValue
+
+
class MxValue(EqualityTupleMixin):
@classmethod
@@ -1016,13 +1261,37 @@ class NsRecord(_ValuesMixin, Record):
class PtrValue(_TargetValue):
- pass
+ @classmethod
+ def validate(cls, values, _type):
+ if not isinstance(values, list):
+ values = [values]
+
+ reasons = []
+
+ if not values:
+ reasons.append('missing values')
+
+ for value in values:
+ reasons.extend(super(PtrValue, cls).validate(value, _type))
-class PtrRecord(_ValueMixin, Record):
+ return reasons
+
+ @classmethod
+ def process(cls, values):
+ return [super(PtrValue, cls).process(v) for v in values]
+
+
+class PtrRecord(_ValuesMixin, Record):
_type = 'PTR'
_value_type = PtrValue
+ # This is for backward compatibility with providers that don't support
+ # multi-value PTR records.
+ @property
+ def value(self):
+ return self.values[0]
+
class SshfpValue(EqualityTupleMixin):
VALID_ALGORITHMS = (1, 2, 3, 4)
@@ -1227,3 +1496,86 @@ class _TxtValue(_ChunkedValue):
class TxtRecord(_ChunkedValuesMixin, Record):
_type = 'TXT'
_value_type = _TxtValue
+
+
+class UrlfwdValue(EqualityTupleMixin):
+ VALID_CODES = (301, 302)
+ VALID_MASKS = (0, 1, 2)
+ VALID_QUERY = (0, 1)
+
+ @classmethod
+ def validate(cls, data, _type):
+ if not isinstance(data, (list, tuple)):
+ data = (data,)
+ reasons = []
+ for value in data:
+ try:
+ code = int(value['code'])
+ if code not in cls.VALID_CODES:
+ reasons.append('unrecognized return code "{}"'
+ .format(code))
+ except KeyError:
+ reasons.append('missing code')
+ except ValueError:
+ reasons.append('invalid return code "{}"'
+ .format(value['code']))
+ try:
+ masking = int(value['masking'])
+ if masking not in cls.VALID_MASKS:
+ reasons.append('unrecognized masking setting "{}"'
+ .format(masking))
+ except KeyError:
+ reasons.append('missing masking')
+ except ValueError:
+ reasons.append('invalid masking setting "{}"'
+ .format(value['masking']))
+ try:
+ query = int(value['query'])
+ if query not in cls.VALID_QUERY:
+ reasons.append('unrecognized query setting "{}"'
+ .format(query))
+ except KeyError:
+ reasons.append('missing query')
+ except ValueError:
+ reasons.append('invalid query setting "{}"'
+ .format(value['query']))
+ for k in ('path', 'target'):
+ if k not in value:
+ reasons.append('missing {}'.format(k))
+ return reasons
+
+ @classmethod
+ def process(cls, values):
+ return [UrlfwdValue(v) for v in values]
+
+ def __init__(self, value):
+ self.path = value['path']
+ self.target = value['target']
+ self.code = int(value['code'])
+ self.masking = int(value['masking'])
+ self.query = int(value['query'])
+
+ @property
+ def data(self):
+ return {
+ 'path': self.path,
+ 'target': self.target,
+ 'code': self.code,
+ 'masking': self.masking,
+ 'query': self.query,
+ }
+
+ def __hash__(self):
+ return hash(self.__repr__())
+
+ def _equality_tuple(self):
+ return (self.path, self.target, self.code, self.masking, self.query)
+
+ def __repr__(self):
+ return '"{}" "{}" {} {} {}'.format(self.path, self.target, self.code,
+ self.masking, self.query)
+
+
+class UrlfwdRecord(_ValuesMixin, Record):
+ _type = 'URLFWD'
+ _value_type = UrlfwdValue
diff --git a/octodns/source/axfr.py b/octodns/source/axfr.py
index 70569d1..7a45155 100644
--- a/octodns/source/axfr.py
+++ b/octodns/source/axfr.py
@@ -26,8 +26,8 @@ class AxfrBaseSource(BaseSource):
SUPPORTS_GEO = False
SUPPORTS_DYNAMIC = False
- SUPPORTS = set(('A', 'AAAA', 'CNAME', 'MX', 'NS', 'PTR', 'SPF',
- 'SRV', 'TXT'))
+ SUPPORTS = set(('A', 'AAAA', 'CAA', 'CNAME', 'LOC', 'MX', 'NS', 'PTR',
+ 'SPF', 'SRV', 'TXT'))
def __init__(self, id):
super(AxfrBaseSource, self).__init__(id)
@@ -43,6 +43,48 @@ class AxfrBaseSource(BaseSource):
_data_for_AAAA = _data_for_multiple
_data_for_NS = _data_for_multiple
+ def _data_for_CAA(self, _type, records):
+ values = []
+ for record in records:
+ flags, tag, value = record['value'].split(' ', 2)
+ values.append({
+ 'flags': flags,
+ 'tag': tag,
+ 'value': value.replace('"', '')
+ })
+ return {
+ 'ttl': records[0]['ttl'],
+ 'type': _type,
+ 'values': values
+ }
+
+ def _data_for_LOC(self, _type, records):
+ values = []
+ for record in records:
+ lat_degrees, lat_minutes, lat_seconds, lat_direction, \
+ long_degrees, long_minutes, long_seconds, long_direction, \
+ altitude, size, precision_horz, precision_vert = \
+ record['value'].replace('m', '').split(' ', 11)
+ values.append({
+ 'lat_degrees': lat_degrees,
+ 'lat_minutes': lat_minutes,
+ 'lat_seconds': lat_seconds,
+ 'lat_direction': lat_direction,
+ 'long_degrees': long_degrees,
+ 'long_minutes': long_minutes,
+ 'long_seconds': long_seconds,
+ 'long_direction': long_direction,
+ 'altitude': altitude,
+ 'size': size,
+ 'precision_horz': precision_horz,
+ 'precision_vert': precision_vert,
+ })
+ return {
+ 'ttl': records[0]['ttl'],
+ 'type': _type,
+ 'values': values
+ }
+
def _data_for_MX(self, _type, records):
values = []
for record in records:
@@ -191,26 +233,34 @@ class ZoneFileSource(AxfrBaseSource):
class: octodns.source.axfr.ZoneFileSource
# The directory holding the zone files
# Filenames should match zone name (eg. example.com.)
+ # with optional extension specified with file_extension
directory: ./zonefiles
+ # File extension on zone files
+ # Appended to zone name to locate file
+ # (optional, default None)
+ file_extension: zone
# Should sanity checks of the origin node be done
# (optional, default true)
check_origin: false
'''
- def __init__(self, id, directory, check_origin=True):
+ def __init__(self, id, directory, file_extension='.', check_origin=True):
self.log = logging.getLogger('ZoneFileSource[{}]'.format(id))
- self.log.debug('__init__: id=%s, directory=%s, check_origin=%s', id,
- directory, check_origin)
+ self.log.debug('__init__: id=%s, directory=%s, file_extension=%s, '
+ 'check_origin=%s', id,
+ directory, file_extension, check_origin)
super(ZoneFileSource, self).__init__(id)
self.directory = directory
+ self.file_extension = file_extension
self.check_origin = check_origin
self._zone_records = {}
def _load_zone_file(self, zone_name):
+ zone_filename = '{}{}'.format(zone_name[:-1], self.file_extension)
zonefiles = listdir(self.directory)
- if zone_name in zonefiles:
+ if zone_filename in zonefiles:
try:
- z = dns.zone.from_file(join(self.directory, zone_name),
+ z = dns.zone.from_file(join(self.directory, zone_filename),
zone_name, relativize=False,
check_origin=self.check_origin)
except DNSException as error:
diff --git a/octodns/source/base.py b/octodns/source/base.py
index 79b5a2a..6094726 100644
--- a/octodns/source/base.py
+++ b/octodns/source/base.py
@@ -8,6 +8,8 @@ from __future__ import absolute_import, division, print_function, \
class BaseSource(object):
+ SUPPORTS_MUTLIVALUE_PTR = False
+
def __init__(self, id):
self.id = id
if not getattr(self, 'log', False):
diff --git a/octodns/zone.py b/octodns/zone.py
index 5f099ac..dcc07c3 100644
--- a/octodns/zone.py
+++ b/octodns/zone.py
@@ -49,16 +49,26 @@ class Zone(object):
# optional trailing . b/c some sources don't have it on their fqdn
self._name_re = re.compile(r'\.?{}?$'.format(name))
+ # Copy-on-write semantics support, when `not None` this property will
+ # point to a location with records for this `Zone`. Once `hydrated`
+ # this property will be set to None
+ self._origin = None
+
self.log.debug('__init__: zone=%s, sub_zones=%s', self, sub_zones)
@property
def records(self):
+ if self._origin:
+ return self._origin.records
return set([r for _, node in self._records.items() for r in node])
def hostname_from_fqdn(self, fqdn):
return self._name_re.sub('', fqdn)
def add_record(self, record, replace=False, lenient=False):
+ if self._origin:
+ self.hydrate()
+
name = record.name
last = name.split('.')[-1]
@@ -94,10 +104,14 @@ class Zone(object):
node.add(record)
- def _remove_record(self, record):
- 'Only for use in tests'
+ def remove_record(self, record):
+ if self._origin:
+ self.hydrate()
self._records[record.name].discard(record)
+ # TODO: delete this
+ _remove_record = remove_record
+
def changes(self, desired, target):
self.log.debug('changes: zone=%s, target=%s', self, target)
@@ -184,5 +198,42 @@ class Zone(object):
return changes
+ def hydrate(self):
+ '''
+ Take a shallow copy Zone and make it a deeper copy holding its own
+ reference to records. These records will still be the originals and
+ they should not be modified. Changes should be made by calling
+ `add_record`, often with `replace=True`, and/or `remove_record`.
+
+ Note: This method does not need to be called under normal circumstances
+ as `add_record` and `remove_record` will automatically call it when
+ appropriate.
+ '''
+ origin = self._origin
+ if origin is None:
+ return False
+ # Need to clear this before the copy to prevent recursion
+ self._origin = None
+ for record in origin.records:
+ # Use lenient as we're copying origin and should take its records
+ # regardless
+ self.add_record(record, lenient=True)
+ return True
+
+ def copy(self):
+ '''
+ Copy-on-write semantics support. This method will create a shallow
+ clone of the zone which will be hydrated the first time `add_record` or
+ `remove_record` is called.
+
+ This allows low-cost copies of things to be made in situations where
+ changes are unlikely and only incurs the "expense" of actually
+ copying the records when required. The actual record copy will not be
+ "deep" meaning that records should not be modified directly.
+ '''
+ copy = Zone(self.name, self.sub_zones)
+ copy._origin = self
+ return copy
+
def __repr__(self):
return 'Zone<{}>'.format(self.name)
diff --git a/requirements-dev.txt b/requirements-dev.txt
index 485a33f..522f112 100644
--- a/requirements-dev.txt
+++ b/requirements-dev.txt
@@ -5,4 +5,4 @@ pycodestyle==2.6.0
pyflakes==2.2.0
readme_renderer[md]==26.0
requests_mock
-twine==1.15.0
+twine==3.2.0; python_version >= '3.2'
diff --git a/requirements.txt b/requirements.txt
index bc9a019..143ba67 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,12 +1,15 @@
-PyYaml==5.3.1
-azure-common==1.1.25
-azure-mgmt-dns==3.0.0
+PyYaml==5.4
+azure-common==1.1.27
+azure-identity==1.5.0
+azure-mgmt-dns==8.0.0
+azure-mgmt-trafficmanager==0.51.0
boto3==1.15.9
botocore==1.18.9
dnspython==1.16.0
docutils==0.16
dyn==1.8.1
edgegrid-python==1.1.1
+fqdn==1.5.0
futures==3.2.0; python_version < '3.2'
google-cloud-core==1.4.1
google-cloud-dns==0.32.0
diff --git a/script/coverage b/script/coverage
index 32bdaea..db8e219 100755
--- a/script/coverage
+++ b/script/coverage
@@ -25,9 +25,13 @@ export DYN_CUSTOMER=
export DYN_PASSWORD=
export DYN_USERNAME=
export GOOGLE_APPLICATION_CREDENTIALS=
+export ARM_CLIENT_ID=
+export ARM_CLIENT_SECRET=
+export ARM_TENANT_ID=
+export ARM_SUBSCRIPTION_ID=
# Don't allow disabling coverage
-grep -r -I --line-number "# pragma: nocover" octodns && {
+grep -r -I --line-number "# pragma: +no.*cover" octodns && {
echo "Code coverage should not be disabled"
exit 1
}
diff --git a/script/test b/script/test
index 41edfd8..98bae20 100755
--- a/script/test
+++ b/script/test
@@ -25,5 +25,9 @@ export DYN_CUSTOMER=
export DYN_PASSWORD=
export DYN_USERNAME=
export GOOGLE_APPLICATION_CREDENTIALS=
+export ARM_CLIENT_ID=
+export ARM_CLIENT_SECRET=
+export ARM_TENANT_ID=
+export ARM_SUBSCRIPTION_ID=
nosetests "$@"
diff --git a/setup.py b/setup.py
index 9394e7f..0b15571 100644
--- a/setup.py
+++ b/setup.py
@@ -69,6 +69,7 @@ setup(
'PyYaml>=4.2b1',
'dnspython>=1.15.0',
'futures>=3.2.0; python_version<"3.2"',
+ 'fqdn>=1.5.0',
'ipaddress>=1.0.22; python_version<"3.3"',
'natsort>=5.5.0',
'pycountry>=19.8.18',
@@ -81,6 +82,6 @@ setup(
long_description_content_type='text/markdown',
name='octodns',
packages=find_packages(),
- url='https://github.com/github/octodns',
+ url='https://github.com/octodns/octodns',
version=octodns.__VERSION__,
)
diff --git a/tests/config/alias-zone-loop.yaml b/tests/config/alias-zone-loop.yaml
new file mode 100644
index 0000000..df8b53f
--- /dev/null
+++ b/tests/config/alias-zone-loop.yaml
@@ -0,0 +1,21 @@
+manager:
+ max_workers: 2
+providers:
+ in:
+ class: octodns.provider.yaml.YamlProvider
+ directory: tests/config
+ dump:
+ class: octodns.provider.yaml.YamlProvider
+ directory: env/YAML_TMP_DIR
+zones:
+ unit.tests.:
+ sources:
+ - in
+ targets:
+ - dump
+
+ alias.tests.:
+ alias: unit.tests.
+
+ alias-loop.tests.:
+ alias: alias.tests.
diff --git a/tests/config/dynamic.tests.yaml b/tests/config/dynamic.tests.yaml
index 4bd97a7..d25f63a 100644
--- a/tests/config/dynamic.tests.yaml
+++ b/tests/config/dynamic.tests.yaml
@@ -109,6 +109,29 @@ cname:
- pool: iad
type: CNAME
value: target.unit.tests.
+pool-only-in-fallback:
+ dynamic:
+ pools:
+ one:
+ fallback: two
+ values:
+ - value: 1.1.1.1
+ three:
+ values:
+ - value: 3.3.3.3
+ two:
+ values:
+ - value: 2.2.2.2
+ rules:
+ - geos:
+ - NA-US
+ pool: one
+ - geos:
+ - AS-SG
+ pool: three
+ ttl: 300
+ type: A
+ values: [4.4.4.4]
real-ish-a:
dynamic:
pools:
diff --git a/tests/config/plan-output-filehandle.yaml b/tests/config/plan-output-filehandle.yaml
new file mode 100644
index 0000000..9c9bb87
--- /dev/null
+++ b/tests/config/plan-output-filehandle.yaml
@@ -0,0 +1,6 @@
+manager:
+ plan_outputs:
+ "doesntexist":
+ class: octodns.provider.plan.DoesntExist
+providers: {}
+zones: {}
diff --git a/tests/config/processors-missing-class.yaml b/tests/config/processors-missing-class.yaml
new file mode 100644
index 0000000..4594307
--- /dev/null
+++ b/tests/config/processors-missing-class.yaml
@@ -0,0 +1,23 @@
+providers:
+ config:
+ class: octodns.provider.yaml.YamlProvider
+ directory: tests/config
+ dump:
+ class: octodns.provider.yaml.YamlProvider
+ directory: env/YAML_TMP_DIR
+ geo:
+ class: helpers.GeoProvider
+ nosshfp:
+ class: helpers.NoSshFpProvider
+
+processors:
+ no-class: {}
+
+zones:
+ unit.tests.:
+ processors:
+ - noop
+ sources:
+ - in
+ targets:
+ - dump
diff --git a/tests/config/processors-wants-config.yaml b/tests/config/processors-wants-config.yaml
new file mode 100644
index 0000000..53fc397
--- /dev/null
+++ b/tests/config/processors-wants-config.yaml
@@ -0,0 +1,25 @@
+providers:
+ config:
+ class: octodns.provider.yaml.YamlProvider
+ directory: tests/config
+ dump:
+ class: octodns.provider.yaml.YamlProvider
+ directory: env/YAML_TMP_DIR
+ geo:
+ class: helpers.GeoProvider
+ nosshfp:
+ class: helpers.NoSshFpProvider
+
+processors:
+ # valid class, but it wants a param and we're not passing it
+ wants-config:
+ class: helpers.WantsConfigProcessor
+
+zones:
+ unit.tests.:
+ processors:
+ - noop
+ sources:
+ - in
+ targets:
+ - dump
diff --git a/tests/config/processors.yaml b/tests/config/processors.yaml
new file mode 100644
index 0000000..097024b
--- /dev/null
+++ b/tests/config/processors.yaml
@@ -0,0 +1,33 @@
+providers:
+ config:
+ class: octodns.provider.yaml.YamlProvider
+ directory: tests/config
+ dump:
+ class: octodns.provider.yaml.YamlProvider
+ directory: env/YAML_TMP_DIR
+ geo:
+ class: helpers.GeoProvider
+ nosshfp:
+ class: helpers.NoSshFpProvider
+
+processors:
+ # Just testing config so any processor will do
+ noop:
+ class: octodns.processor.base.BaseProcessor
+
+zones:
+ unit.tests.:
+ processors:
+ - noop
+ sources:
+ - config
+ targets:
+ - dump
+
+ bad.unit.tests.:
+ processors:
+ - doesnt-exist
+ sources:
+ - in
+ targets:
+ - dump
diff --git a/tests/config/simple-alias-zone.yaml b/tests/config/simple-alias-zone.yaml
new file mode 100644
index 0000000..32154d5
--- /dev/null
+++ b/tests/config/simple-alias-zone.yaml
@@ -0,0 +1,19 @@
+manager:
+ max_workers: 2
+providers:
+ in:
+ class: octodns.provider.yaml.YamlProvider
+ directory: tests/config
+ dump:
+ class: octodns.provider.yaml.YamlProvider
+ directory: env/YAML_TMP_DIR
+zones:
+ unit.tests.:
+ sources:
+ - in
+ targets:
+ - dump
+
+ alias.tests.:
+ alias: unit.tests.
+
diff --git a/tests/config/simple-split.yaml b/tests/config/simple-split.yaml
index d106506..a798258 100644
--- a/tests/config/simple-split.yaml
+++ b/tests/config/simple-split.yaml
@@ -4,14 +4,17 @@ providers:
in:
class: octodns.provider.yaml.SplitYamlProvider
directory: tests/config/split
+ extension: .tst
dump:
class: octodns.provider.yaml.SplitYamlProvider
directory: env/YAML_TMP_DIR
+ extension: .tst
# This is sort of ugly, but it shouldn't hurt anything. It'll just write out
# the target file twice where it and dump are both used
dump2:
class: octodns.provider.yaml.SplitYamlProvider
directory: env/YAML_TMP_DIR
+ extension: .tst
simple:
class: helpers.SimpleProvider
geo:
diff --git a/tests/config/split/dynamic.tests./a.yaml b/tests/config/split/dynamic.tests.tst/a.yaml
similarity index 100%
rename from tests/config/split/dynamic.tests./a.yaml
rename to tests/config/split/dynamic.tests.tst/a.yaml
diff --git a/tests/config/split/dynamic.tests./aaaa.yaml b/tests/config/split/dynamic.tests.tst/aaaa.yaml
similarity index 100%
rename from tests/config/split/dynamic.tests./aaaa.yaml
rename to tests/config/split/dynamic.tests.tst/aaaa.yaml
diff --git a/tests/config/split/dynamic.tests./cname.yaml b/tests/config/split/dynamic.tests.tst/cname.yaml
similarity index 100%
rename from tests/config/split/dynamic.tests./cname.yaml
rename to tests/config/split/dynamic.tests.tst/cname.yaml
diff --git a/tests/config/split/dynamic.tests./real-ish-a.yaml b/tests/config/split/dynamic.tests.tst/real-ish-a.yaml
similarity index 100%
rename from tests/config/split/dynamic.tests./real-ish-a.yaml
rename to tests/config/split/dynamic.tests.tst/real-ish-a.yaml
diff --git a/tests/config/split/dynamic.tests./simple-weighted.yaml b/tests/config/split/dynamic.tests.tst/simple-weighted.yaml
similarity index 100%
rename from tests/config/split/dynamic.tests./simple-weighted.yaml
rename to tests/config/split/dynamic.tests.tst/simple-weighted.yaml
diff --git a/tests/config/split/empty./.gitkeep b/tests/config/split/empty.tst/.gitkeep
similarity index 100%
rename from tests/config/split/empty./.gitkeep
rename to tests/config/split/empty.tst/.gitkeep
diff --git a/tests/config/split/subzone.unit.tests./12.yaml b/tests/config/split/subzone.unit.tests.tst/12.yaml
similarity index 100%
rename from tests/config/split/subzone.unit.tests./12.yaml
rename to tests/config/split/subzone.unit.tests.tst/12.yaml
diff --git a/tests/config/split/subzone.unit.tests./2.yaml b/tests/config/split/subzone.unit.tests.tst/2.yaml
similarity index 100%
rename from tests/config/split/subzone.unit.tests./2.yaml
rename to tests/config/split/subzone.unit.tests.tst/2.yaml
diff --git a/tests/config/split/subzone.unit.tests./test.yaml b/tests/config/split/subzone.unit.tests.tst/test.yaml
similarity index 100%
rename from tests/config/split/subzone.unit.tests./test.yaml
rename to tests/config/split/subzone.unit.tests.tst/test.yaml
diff --git a/tests/config/split/unit.tests./$unit.tests.yaml b/tests/config/split/unit.tests.tst/$unit.tests.yaml
similarity index 100%
rename from tests/config/split/unit.tests./$unit.tests.yaml
rename to tests/config/split/unit.tests.tst/$unit.tests.yaml
diff --git a/tests/config/split/unit.tests./_srv._tcp.yaml b/tests/config/split/unit.tests.tst/_srv._tcp.yaml
similarity index 100%
rename from tests/config/split/unit.tests./_srv._tcp.yaml
rename to tests/config/split/unit.tests.tst/_srv._tcp.yaml
diff --git a/tests/config/split/unit.tests./aaaa.yaml b/tests/config/split/unit.tests.tst/aaaa.yaml
similarity index 100%
rename from tests/config/split/unit.tests./aaaa.yaml
rename to tests/config/split/unit.tests.tst/aaaa.yaml
diff --git a/tests/config/split/unit.tests./cname.yaml b/tests/config/split/unit.tests.tst/cname.yaml
similarity index 100%
rename from tests/config/split/unit.tests./cname.yaml
rename to tests/config/split/unit.tests.tst/cname.yaml
diff --git a/tests/config/split/unit.tests.tst/dname.yaml b/tests/config/split/unit.tests.tst/dname.yaml
new file mode 100644
index 0000000..7cd1755
--- /dev/null
+++ b/tests/config/split/unit.tests.tst/dname.yaml
@@ -0,0 +1,5 @@
+---
+dname:
+ ttl: 300
+ type: DNAME
+ value: unit.tests.
diff --git a/tests/config/split/unit.tests./excluded.yaml b/tests/config/split/unit.tests.tst/excluded.yaml
similarity index 100%
rename from tests/config/split/unit.tests./excluded.yaml
rename to tests/config/split/unit.tests.tst/excluded.yaml
diff --git a/tests/config/split/unit.tests./ignored.yaml b/tests/config/split/unit.tests.tst/ignored.yaml
similarity index 100%
rename from tests/config/split/unit.tests./ignored.yaml
rename to tests/config/split/unit.tests.tst/ignored.yaml
diff --git a/tests/config/split/unit.tests./included.yaml b/tests/config/split/unit.tests.tst/included.yaml
similarity index 100%
rename from tests/config/split/unit.tests./included.yaml
rename to tests/config/split/unit.tests.tst/included.yaml
diff --git a/tests/config/split/unit.tests./mx.yaml b/tests/config/split/unit.tests.tst/mx.yaml
similarity index 100%
rename from tests/config/split/unit.tests./mx.yaml
rename to tests/config/split/unit.tests.tst/mx.yaml
diff --git a/tests/config/split/unit.tests./naptr.yaml b/tests/config/split/unit.tests.tst/naptr.yaml
similarity index 100%
rename from tests/config/split/unit.tests./naptr.yaml
rename to tests/config/split/unit.tests.tst/naptr.yaml
diff --git a/tests/config/split/unit.tests./ptr.yaml b/tests/config/split/unit.tests.tst/ptr.yaml
similarity index 56%
rename from tests/config/split/unit.tests./ptr.yaml
rename to tests/config/split/unit.tests.tst/ptr.yaml
index 0098b57..cffb50b 100644
--- a/tests/config/split/unit.tests./ptr.yaml
+++ b/tests/config/split/unit.tests.tst/ptr.yaml
@@ -2,4 +2,4 @@
ptr:
ttl: 300
type: PTR
- value: foo.bar.com.
+ values: [foo.bar.com.]
diff --git a/tests/config/split/unit.tests./spf.yaml b/tests/config/split/unit.tests.tst/spf.yaml
similarity index 100%
rename from tests/config/split/unit.tests./spf.yaml
rename to tests/config/split/unit.tests.tst/spf.yaml
diff --git a/tests/config/split/unit.tests./sub.yaml b/tests/config/split/unit.tests.tst/sub.yaml
similarity index 100%
rename from tests/config/split/unit.tests./sub.yaml
rename to tests/config/split/unit.tests.tst/sub.yaml
diff --git a/tests/config/split/unit.tests./txt.yaml b/tests/config/split/unit.tests.tst/txt.yaml
similarity index 100%
rename from tests/config/split/unit.tests./txt.yaml
rename to tests/config/split/unit.tests.tst/txt.yaml
diff --git a/tests/config/split/unit.tests.tst/urlfwd.yaml b/tests/config/split/unit.tests.tst/urlfwd.yaml
new file mode 100644
index 0000000..778b9b5
--- /dev/null
+++ b/tests/config/split/unit.tests.tst/urlfwd.yaml
@@ -0,0 +1,15 @@
+---
+urlfwd:
+ ttl: 300
+ type: URLFWD
+ values:
+ - code: 302
+ masking: 2
+ path: '/'
+ query: 0
+ target: 'http://www.unit.tests'
+ - code: 301
+ masking: 2
+ path: '/target'
+ query: 0
+ target: 'http://target.unit.tests'
diff --git a/tests/config/split/unit.tests./www.sub.yaml b/tests/config/split/unit.tests.tst/www.sub.yaml
similarity index 100%
rename from tests/config/split/unit.tests./www.sub.yaml
rename to tests/config/split/unit.tests.tst/www.sub.yaml
diff --git a/tests/config/split/unit.tests./www.yaml b/tests/config/split/unit.tests.tst/www.yaml
similarity index 100%
rename from tests/config/split/unit.tests./www.yaml
rename to tests/config/split/unit.tests.tst/www.yaml
diff --git a/tests/config/split/unordered./abc.yaml b/tests/config/split/unordered.tst/abc.yaml
similarity index 100%
rename from tests/config/split/unordered./abc.yaml
rename to tests/config/split/unordered.tst/abc.yaml
diff --git a/tests/config/split/unordered./xyz.yaml b/tests/config/split/unordered.tst/xyz.yaml
similarity index 100%
rename from tests/config/split/unordered./xyz.yaml
rename to tests/config/split/unordered.tst/xyz.yaml
diff --git a/tests/config/unit.tests.yaml b/tests/config/unit.tests.yaml
index 1da2465..aa28ee5 100644
--- a/tests/config/unit.tests.yaml
+++ b/tests/config/unit.tests.yaml
@@ -36,6 +36,22 @@
- flags: 0
tag: issue
value: ca.unit.tests
+_imap._tcp:
+ ttl: 600
+ type: SRV
+ values:
+ - port: 0
+ priority: 0
+ target: .
+ weight: 0
+_pop3._tcp:
+ ttl: 600
+ type: SRV
+ values:
+ - port: 0
+ priority: 0
+ target: .
+ weight: 0
_srv._tcp:
ttl: 600
type: SRV
@@ -56,6 +72,10 @@ cname:
ttl: 300
type: CNAME
value: unit.tests.
+dname:
+ ttl: 300
+ type: DNAME
+ value: unit.tests.
excluded:
octodns:
excluded:
@@ -73,6 +93,34 @@ included:
- test
type: CNAME
value: unit.tests.
+loc:
+ ttl: 300
+ type: LOC
+ values:
+ - altitude: 20
+ lat_degrees: 31
+ lat_direction: S
+ lat_minutes: 58
+ lat_seconds: 52.1
+ long_degrees: 115
+ long_direction: E
+ long_minutes: 49
+ long_seconds: 11.7
+ precision_horz: 10
+ precision_vert: 2
+ size: 10
+ - altitude: 20
+ lat_degrees: 53
+ lat_direction: N
+ lat_minutes: 13
+ lat_seconds: 10
+ long_degrees: 2
+ long_direction: W
+ long_minutes: 18
+ long_seconds: 26
+ precision_horz: 1000
+ precision_vert: 2
+ size: 10
mx:
ttl: 300
type: MX
@@ -104,7 +152,7 @@ naptr:
ptr:
ttl: 300
type: PTR
- value: foo.bar.com.
+ values: [foo.bar.com.]
spf:
ttl: 600
type: SPF
@@ -121,6 +169,20 @@ txt:
- Bah bah black sheep
- have you any wool.
- 'v=DKIM1\;k=rsa\;s=email\;h=sha256\;p=A/kinda+of/long/string+with+numb3rs'
+urlfwd:
+ ttl: 300
+ type: URLFWD
+ values:
+ - code: 302
+ masking: 2
+ path: '/'
+ query: 0
+ target: 'http://www.unit.tests'
+ - code: 301
+ masking: 2
+ path: '/target'
+ query: 0
+ target: 'http://target.unit.tests'
www:
ttl: 300
type: A
diff --git a/tests/config/unknown-processor.yaml b/tests/config/unknown-processor.yaml
new file mode 100644
index 0000000..4aff713
--- /dev/null
+++ b/tests/config/unknown-processor.yaml
@@ -0,0 +1,17 @@
+manager:
+ max_workers: 2
+providers:
+ in:
+ class: octodns.provider.yaml.YamlProvider
+ directory: tests/config
+ dump:
+ class: octodns.provider.yaml.YamlProvider
+ directory: env/YAML_TMP_DIR
+zones:
+ unit.tests.:
+ sources:
+ - in
+ processors:
+ - missing
+ targets:
+ - dump
diff --git a/tests/config/unknown-source-zone.yaml b/tests/config/unknown-source-zone.yaml
new file mode 100644
index 0000000..a3940ff
--- /dev/null
+++ b/tests/config/unknown-source-zone.yaml
@@ -0,0 +1,18 @@
+manager:
+ max_workers: 2
+providers:
+ in:
+ class: octodns.provider.yaml.YamlProvider
+ directory: tests/config
+ dump:
+ class: octodns.provider.yaml.YamlProvider
+ directory: env/YAML_TMP_DIR
+zones:
+ unit.tests.:
+ sources:
+ - in
+ targets:
+ - dump
+
+ alias.tests.:
+ alias: does-not-exists.tests.
diff --git a/tests/fixtures/cloudflare-dns_records-page-2.json b/tests/fixtures/cloudflare-dns_records-page-2.json
index b0bbaef..366fe9c 100644
--- a/tests/fixtures/cloudflare-dns_records-page-2.json
+++ b/tests/fixtures/cloudflare-dns_records-page-2.json
@@ -177,15 +177,15 @@
{
"id": "fc12ab34cd5611334422ab3322997656",
"type": "SRV",
- "name": "_srv._tcp.unit.tests",
+ "name": "_imap._tcp.unit.tests",
"data": {
- "service": "_srv",
+ "service": "_imap",
"proto": "_tcp",
"name": "unit.tests",
- "priority": 12,
- "weight": 20,
- "port": 30,
- "target": "foo-2.unit.tests"
+ "priority": 0,
+ "weight": 0,
+ "port": 0,
+ "target": "."
},
"proxiable": true,
"proxied": false,
@@ -202,15 +202,15 @@
{
"id": "fc12ab34cd5611334422ab3322997656",
"type": "SRV",
- "name": "_srv._tcp.unit.tests",
+ "name": "_pop3._tcp.unit.tests",
"data": {
- "service": "_srv",
- "proto": "_tcp",
+ "service": "_imap",
+ "proto": "_pop3",
"name": "unit.tests",
- "priority": 10,
- "weight": 20,
- "port": 30,
- "target": "foo-1.unit.tests"
+ "priority": 0,
+ "weight": 0,
+ "port": 0,
+ "target": "."
},
"proxiable": true,
"proxied": false,
@@ -227,10 +227,10 @@
],
"result_info": {
"page": 2,
- "per_page": 11,
- "total_pages": 2,
+ "per_page": 10,
+ "total_pages": 3,
"count": 10,
- "total_count": 20
+ "total_count": 24
},
"success": true,
"errors": [],
diff --git a/tests/fixtures/cloudflare-dns_records-page-3.json b/tests/fixtures/cloudflare-dns_records-page-3.json
new file mode 100644
index 0000000..0f06ab4
--- /dev/null
+++ b/tests/fixtures/cloudflare-dns_records-page-3.json
@@ -0,0 +1,128 @@
+{
+ "result": [
+ {
+ "id": "fc12ab34cd5611334422ab3322997656",
+ "type": "SRV",
+ "name": "_srv._tcp.unit.tests",
+ "data": {
+ "service": "_srv",
+ "proto": "_tcp",
+ "name": "unit.tests",
+ "priority": 12,
+ "weight": 20,
+ "port": 30,
+ "target": "foo-2.unit.tests"
+ },
+ "proxiable": true,
+ "proxied": false,
+ "ttl": 600,
+ "locked": false,
+ "zone_id": "ff12ab34cd5611334422ab3322997650",
+ "zone_name": "unit.tests",
+ "modified_on": "2017-03-11T18:01:43.940682Z",
+ "created_on": "2017-03-11T18:01:43.940682Z",
+ "meta": {
+ "auto_added": false
+ }
+ },
+ {
+ "id": "fc12ab34cd5611334422ab3322997656",
+ "type": "SRV",
+ "name": "_srv._tcp.unit.tests",
+ "data": {
+ "service": "_srv",
+ "proto": "_tcp",
+ "name": "unit.tests",
+ "priority": 10,
+ "weight": 20,
+ "port": 30,
+ "target": "foo-1.unit.tests"
+ },
+ "proxiable": true,
+ "proxied": false,
+ "ttl": 600,
+ "locked": false,
+ "zone_id": "ff12ab34cd5611334422ab3322997650",
+ "zone_name": "unit.tests",
+ "modified_on": "2017-03-11T18:01:43.940682Z",
+ "created_on": "2017-03-11T18:01:43.940682Z",
+ "meta": {
+ "auto_added": false
+ }
+ },
+ {
+ "id": "372e67954025e0ba6aaa6d586b9e0b59",
+ "type": "LOC",
+ "name": "loc.unit.tests",
+ "content": "IN LOC 31 58 52.1 S 115 49 11.7 E 20m 10m 10m 2m",
+ "proxiable": true,
+ "proxied": false,
+ "ttl": 300,
+ "locked": false,
+ "zone_id": "ff12ab34cd5611334422ab3322997650",
+ "zone_name": "unit.tests",
+ "created_on": "2020-01-28T05:20:00.12345Z",
+ "modified_on": "2020-01-28T05:20:00.12345Z",
+ "data": {
+ "lat_degrees": 31,
+ "lat_minutes": 58,
+ "lat_seconds": 52.1,
+ "lat_direction": "S",
+ "long_degrees": 115,
+ "long_minutes": 49,
+ "long_seconds": 11.7,
+ "long_direction": "E",
+ "altitude": 20,
+ "size": 10,
+ "precision_horz": 10,
+ "precision_vert": 2
+ },
+ "meta": {
+ "auto_added": true,
+ "source": "primary"
+ }
+ },
+ {
+ "id": "372e67954025e0ba6aaa6d586b9e0b59",
+ "type": "LOC",
+ "name": "loc.unit.tests",
+ "content": "IN LOC 53 14 10 N 2 18 26 W 20m 10m 1000m 2m",
+ "proxiable": true,
+ "proxied": false,
+ "ttl": 300,
+ "locked": false,
+ "zone_id": "ff12ab34cd5611334422ab3322997650",
+ "zone_name": "unit.tests",
+ "created_on": "2020-01-28T05:20:00.12345Z",
+ "modified_on": "2020-01-28T05:20:00.12345Z",
+ "data": {
+ "lat_degrees": 53,
+ "lat_minutes": 13,
+ "lat_seconds": 10,
+ "lat_direction": "N",
+ "long_degrees": 2,
+ "long_minutes": 18,
+ "long_seconds": 26,
+ "long_direction": "W",
+ "altitude": 20,
+ "size": 10,
+ "precision_horz": 1000,
+ "precision_vert": 2
+ },
+ "meta": {
+ "auto_added": true,
+ "source": "primary"
+ }
+ }
+ ],
+ "result_info": {
+ "page": 3,
+ "per_page": 10,
+ "total_pages": 3,
+ "count": 4,
+ "total_count": 24
+ },
+ "success": true,
+ "errors": [],
+ "messages": []
+}
diff --git a/tests/fixtures/cloudflare-pagerules.json b/tests/fixtures/cloudflare-pagerules.json
new file mode 100644
index 0000000..7efa018
--- /dev/null
+++ b/tests/fixtures/cloudflare-pagerules.json
@@ -0,0 +1,103 @@
+{
+ "result": [
+ {
+ "id": "2b1ec1793185213139f22059a165376e",
+ "targets": [
+ {
+ "target": "url",
+ "constraint": {
+ "operator": "matches",
+ "value": "urlfwd0.unit.tests/"
+ }
+ }
+ ],
+ "actions": [
+ {
+ "id": "always_use_https"
+ }
+ ],
+ "priority": 4,
+ "status": "active",
+ "created_on": "2021-06-29T17:14:28.000000Z",
+ "modified_on": "2021-06-29T17:15:33.000000Z"
+ },
+ {
+ "id": "2b1ec1793185213139f22059a165376f",
+ "targets": [
+ {
+ "target": "url",
+ "constraint": {
+ "operator": "matches",
+ "value": "urlfwd0.unit.tests/*"
+ }
+ }
+ ],
+ "actions": [
+ {
+ "id": "forwarding_url",
+ "value": {
+ "url": "https://www.unit.tests/",
+ "status_code": 301
+ }
+ }
+ ],
+ "priority": 3,
+ "status": "active",
+ "created_on": "2021-06-29T17:07:12.000000Z",
+ "modified_on": "2021-06-29T17:15:12.000000Z"
+ },
+ {
+ "id": "2b1ec1793185213139f22059a165377e",
+ "targets": [
+ {
+ "target": "url",
+ "constraint": {
+ "operator": "matches",
+ "value": "urlfwd1.unit.tests/*"
+ }
+ }
+ ],
+ "actions": [
+ {
+ "id": "forwarding_url",
+ "value": {
+ "url": "https://www.unit.tests/",
+ "status_code": 302
+ }
+ }
+ ],
+ "priority": 2,
+ "status": "active",
+ "created_on": "2021-06-28T22:42:27.000000Z",
+ "modified_on": "2021-06-28T22:43:13.000000Z"
+ },
+ {
+ "id": "2a9140b17ffb0e6aed826049eec970b8",
+ "targets": [
+ {
+ "target": "url",
+ "constraint": {
+ "operator": "matches",
+ "value": "urlfwd2.unit.tests/*"
+ }
+ }
+ ],
+ "actions": [
+ {
+ "id": "forwarding_url",
+ "value": {
+ "url": "https://www.unit.tests/",
+ "status_code": 301
+ }
+ }
+ ],
+ "priority": 1,
+ "status": "active",
+ "created_on": "2021-06-25T20:10:50.000000Z",
+ "modified_on": "2021-06-28T22:38:10.000000Z"
+ }
+ ],
+ "success": true,
+ "errors": [],
+ "messages": []
+}
diff --git a/tests/fixtures/constellix-records.json b/tests/fixtures/constellix-records.json
index 545eada..f509fe7 100644
--- a/tests/fixtures/constellix-records.json
+++ b/tests/fixtures/constellix-records.json
@@ -64,6 +64,62 @@
"roundRobinFailover": [],
"pools": [],
"poolsDetail": []
+}, {
+ "id": 1898527,
+ "type": "SRV",
+ "recordType": "srv",
+ "name": "_imap._tcp",
+ "recordOption": "roundRobin",
+ "noAnswer": false,
+ "note": "",
+ "ttl": 600,
+ "gtdRegion": 1,
+ "parentId": 123123,
+ "parent": "domain",
+ "source": "Domain",
+ "modifiedTs": 1565149714387,
+ "value": [{
+ "value": ".",
+ "priority": 0,
+ "weight": 0,
+ "port": 0,
+ "disableFlag": false
+ }],
+ "roundRobin": [{
+ "value": ".",
+ "priority": 0,
+ "weight": 0,
+ "port": 0,
+ "disableFlag": false
+ }]
+}, {
+ "id": 1898528,
+ "type": "SRV",
+ "recordType": "srv",
+ "name": "_pop3._tcp",
+ "recordOption": "roundRobin",
+ "noAnswer": false,
+ "note": "",
+ "ttl": 600,
+ "gtdRegion": 1,
+ "parentId": 123123,
+ "parent": "domain",
+ "source": "Domain",
+ "modifiedTs": 1565149714387,
+ "value": [{
+ "value": ".",
+ "priority": 0,
+ "weight": 0,
+ "port": 0,
+ "disableFlag": false
+ }],
+ "roundRobin": [{
+ "value": ".",
+ "priority": 0,
+ "weight": 0,
+ "port": 0,
+ "disableFlag": false
+ }]
}, {
"id": 1808527,
"type": "SRV",
@@ -523,43 +579,6 @@
"roundRobinFailover": [],
"pools": [],
"poolsDetail": []
-}, {
- "id": 1808603,
- "type": "ANAME",
- "recordType": "aname",
- "name": "sub",
- "recordOption": "roundRobin",
- "noAnswer": false,
- "note": "",
- "ttl": 1800,
- "gtdRegion": 1,
- "parentId": 123123,
- "parent": "domain",
- "source": "Domain",
- "modifiedTs": 1565153387855,
- "value": [{
- "value": "aname.unit.tests.",
- "disableFlag": false
- }],
- "roundRobin": [{
- "value": "aname.unit.tests.",
- "disableFlag": false
- }],
- "geolocation": null,
- "recordFailover": {
- "disabled": false,
- "failoverType": 1,
- "failoverTypeStr": "Normal (always lowest level)",
- "values": []
- },
- "failover": {
- "disabled": false,
- "failoverType": 1,
- "failoverTypeStr": "Normal (always lowest level)",
- "values": []
- },
- "pools": [],
- "poolsDetail": []
}, {
"id": 1808520,
"type": "A",
diff --git a/tests/fixtures/digitalocean-page-2.json b/tests/fixtures/digitalocean-page-2.json
index 50f17f9..1405527 100644
--- a/tests/fixtures/digitalocean-page-2.json
+++ b/tests/fixtures/digitalocean-page-2.json
@@ -76,6 +76,28 @@
"weight": null,
"flags": null,
"tag": null
+ }, {
+ "id": 11189896,
+ "type": "SRV",
+ "name": "_imap._tcp",
+ "data": ".",
+ "priority": 0,
+ "port": 0,
+ "ttl": 600,
+ "weight": 0,
+ "flags": null,
+ "tag": null
+ }, {
+ "id": 11189897,
+ "type": "SRV",
+ "name": "_pop3._tcp",
+ "data": ".",
+ "priority": 0,
+ "port": 0,
+ "ttl": 600,
+ "weight": 0,
+ "flags": null,
+ "tag": null
}],
"links": {
"pages": {
diff --git a/tests/fixtures/dnsmadeeasy-records.json b/tests/fixtures/dnsmadeeasy-records.json
index 4d3ba64..aefd6ce 100644
--- a/tests/fixtures/dnsmadeeasy-records.json
+++ b/tests/fixtures/dnsmadeeasy-records.json
@@ -320,20 +320,6 @@
"name": "",
"value": "aname.unit.tests.",
"id": 11189895,
- "type": "ANAME"
- }, {
- "failover": false,
- "monitor": false,
- "sourceId": 123123,
- "dynamicDns": false,
- "failed": false,
- "gtdLocation": "DEFAULT",
- "hardLink": false,
- "ttl": 1800,
- "source": 1,
- "name": "sub",
- "value": "aname",
- "id": 11189896,
"type": "ANAME"
}, {
"failover": false,
diff --git a/tests/fixtures/easydns-records.json b/tests/fixtures/easydns-records.json
index c3718b5..73ea953 100644
--- a/tests/fixtures/easydns-records.json
+++ b/tests/fixtures/easydns-records.json
@@ -264,10 +264,32 @@
"rdata": "v=DKIM1;k=rsa;s=email;h=sha256;p=A\/kinda+of\/long\/string+with+numb3rs",
"geozone_id": "0",
"last_mod": "2020-01-01 01:01:01"
+ },
+ {
+ "id": "12340025",
+ "domain": "unit.tests",
+ "host": "_imap._tcp",
+ "ttl": "600",
+ "prio": "0",
+ "type": "SRV",
+ "rdata": "0 0 0 .",
+ "geozone_id": "0",
+ "last_mod": "2020-01-01 01:01:01"
+ },
+ {
+ "id": "12340026",
+ "domain": "unit.tests",
+ "host": "_pop3._tcp",
+ "ttl": "600",
+ "prio": "0",
+ "type": "SRV",
+ "rdata": "0 0 0 .",
+ "geozone_id": "0",
+ "last_mod": "2020-01-01 01:01:01"
}
],
- "count": 24,
- "total": 24,
+ "count": 26,
+ "total": 26,
"start": 0,
"max": 1000,
"status": 200
diff --git a/tests/fixtures/edgedns-records.json b/tests/fixtures/edgedns-records.json
index 4693eb1..a5ce14f 100644
--- a/tests/fixtures/edgedns-records.json
+++ b/tests/fixtures/edgedns-records.json
@@ -9,6 +9,22 @@
"name": "_srv._tcp.unit.tests",
"ttl": 600
},
+ {
+ "rdata": [
+ "0 0 0 ."
+ ],
+ "type": "SRV",
+ "name": "_imap._tcp.unit.tests",
+ "ttl": 600
+ },
+ {
+ "rdata": [
+ "0 0 0 ."
+ ],
+ "type": "SRV",
+ "name": "_pop3._tcp.unit.tests",
+ "ttl": 600
+ },
{
"rdata": [
"2601:644:500:e210:62f8:1dff:feb8:947a"
@@ -151,7 +167,7 @@
}
],
"metadata": {
- "totalElements": 16,
+ "totalElements": 18,
"showAll": true
}
-}
\ No newline at end of file
+}
diff --git a/tests/fixtures/gandi-no-changes.json b/tests/fixtures/gandi-no-changes.json
new file mode 100644
index 0000000..a67dc93
--- /dev/null
+++ b/tests/fixtures/gandi-no-changes.json
@@ -0,0 +1,154 @@
+[
+ {
+ "rrset_type": "A",
+ "rrset_ttl": 300,
+ "rrset_name": "@",
+ "rrset_href": "https://api.gandi.net/v5/livedns/domains/unit.tests/records/%40/A",
+ "rrset_values": [
+ "1.2.3.4",
+ "1.2.3.5"
+ ]
+ },
+ {
+ "rrset_type": "CAA",
+ "rrset_ttl": 3600,
+ "rrset_name": "@",
+ "rrset_href": "https://api.gandi.net/v5/livedns/domains/unit.tests/records/%40/CAA",
+ "rrset_values": [
+ "0 issue \"ca.unit.tests\""
+ ]
+ },
+ {
+ "rrset_type": "SSHFP",
+ "rrset_ttl": 3600,
+ "rrset_name": "@",
+ "rrset_href": "https://api.gandi.net/v5/livedns/domains/unit.tests/records/%40/SSHFP",
+ "rrset_values": [
+ "1 1 7491973e5f8b39d5327cd4e08bc81b05f7710b49",
+ "1 1 bf6b6825d2977c511a475bbefb88aad54a92ac73"
+ ]
+ },
+ {
+ "rrset_type": "AAAA",
+ "rrset_ttl": 600,
+ "rrset_name": "aaaa",
+ "rrset_href": "https://api.gandi.net/v5/livedns/domains/unit.tests/records/aaaa/AAAA",
+ "rrset_values": [
+ "2601:644:500:e210:62f8:1dff:feb8:947a"
+ ]
+ },
+ {
+ "rrset_type": "CNAME",
+ "rrset_ttl": 300,
+ "rrset_name": "cname",
+ "rrset_href": "https://api.gandi.net/v5/livedns/domains/unit.tests/records/cname/CNAME",
+ "rrset_values": [
+ "unit.tests."
+ ]
+ },
+ {
+ "rrset_type": "DNAME",
+ "rrset_ttl": 300,
+ "rrset_name": "dname",
+ "rrset_href": "https://api.gandi.net/v5/livedns/domains/unit.tests/records/dname/DNAME",
+ "rrset_values": [
+ "unit.tests."
+ ]
+ },
+ {
+ "rrset_type": "CNAME",
+ "rrset_ttl": 3600,
+ "rrset_name": "excluded",
+ "rrset_href": "https://api.gandi.net/v5/livedns/domains/unit.tests/records/excluded/CNAME",
+ "rrset_values": [
+ "unit.tests."
+ ]
+ },
+ {
+ "rrset_type": "MX",
+ "rrset_ttl": 300,
+ "rrset_name": "mx",
+ "rrset_href": "https://api.gandi.net/v5/livedns/domains/unit.tests/records/mx/MX",
+ "rrset_values": [
+ "10 smtp-4.unit.tests.",
+ "20 smtp-2.unit.tests.",
+ "30 smtp-3.unit.tests.",
+ "40 smtp-1.unit.tests."
+ ]
+ },
+ {
+ "rrset_type": "PTR",
+ "rrset_ttl": 300,
+ "rrset_name": "ptr",
+ "rrset_href": "https://api.gandi.net/v5/livedns/domains/unit.tests/records/ptr/PTR",
+ "rrset_values": [
+ "foo.bar.com."
+ ]
+ },
+ {
+ "rrset_type": "SPF",
+ "rrset_ttl": 600,
+ "rrset_name": "spf",
+ "rrset_href": "https://api.gandi.net/v5/livedns/domains/unit.tests/records/spf/SPF",
+ "rrset_values": [
+ "\"v=spf1 ip4:192.168.0.1/16-all\""
+ ]
+ },
+ {
+ "rrset_type": "TXT",
+ "rrset_ttl": 600,
+ "rrset_name": "txt",
+ "rrset_href": "https://api.gandi.net/v5/livedns/domains/unit.tests/records/txt/TXT",
+ "rrset_values": [
+ "\"Bah bah black sheep\"",
+ "\"have you any wool.\"",
+ "\"v=DKIM1;k=rsa;s=email;h=sha256;p=A/kinda+of/long/string+with+numb3rs\""
+ ]
+ },
+ {
+ "rrset_type": "A",
+ "rrset_ttl": 300,
+ "rrset_name": "www",
+ "rrset_href": "https://api.gandi.net/v5/livedns/domains/unit.tests/records/www/A",
+ "rrset_values": [
+ "2.2.3.6"
+ ]
+ },
+ {
+ "rrset_type": "A",
+ "rrset_ttl": 300,
+ "rrset_name": "www.sub",
+ "rrset_href": "https://api.gandi.net/v5/livedns/domains/unit.tests/records/www.sub/A",
+ "rrset_values": [
+ "2.2.3.6"
+ ]
+ },
+ {
+ "rrset_type": "SRV",
+ "rrset_ttl": 600,
+ "rrset_name": "_imap._tcp",
+ "rrset_href": "https://api.gandi.net/v5/livedns/domains/unit.tests/records/_imap._tcp/SRV",
+ "rrset_values": [
+ "0 0 0 ."
+ ]
+ },
+ {
+ "rrset_type": "SRV",
+ "rrset_ttl": 600,
+ "rrset_name": "_pop3._tcp",
+ "rrset_href": "https://api.gandi.net/v5/livedns/domains/unit.tests/records/_pop3._tcp/SRV",
+ "rrset_values": [
+ "0 0 0 ."
+ ]
+ },
+ {
+ "rrset_type": "SRV",
+ "rrset_ttl": 600,
+ "rrset_name": "_srv._tcp",
+ "rrset_href": "https://api.gandi.net/v5/livedns/domains/unit.tests/records/_srv._tcp/SRV",
+ "rrset_values": [
+ "10 20 30 foo-1.unit.tests.",
+ "12 20 30 foo-2.unit.tests."
+ ]
+ }
+ ]
diff --git a/tests/fixtures/gandi-records.json b/tests/fixtures/gandi-records.json
new file mode 100644
index 0000000..01d30f7
--- /dev/null
+++ b/tests/fixtures/gandi-records.json
@@ -0,0 +1,111 @@
+[
+ {
+ "rrset_type": "A",
+ "rrset_ttl": 10800,
+ "rrset_name": "@",
+ "rrset_href": "https://api.gandi.net/v5/livedns/domains/unit.tests/records/%40/A",
+ "rrset_values": [
+ "217.70.184.38"
+ ]
+ },
+ {
+ "rrset_type": "MX",
+ "rrset_ttl": 10800,
+ "rrset_name": "@",
+ "rrset_href": "https://api.gandi.net/v5/livedns/domains/unit.tests/records/%40/MX",
+ "rrset_values": [
+ "10 spool.mail.gandi.net.",
+ "50 fb.mail.gandi.net."
+ ]
+ },
+ {
+ "rrset_type": "TXT",
+ "rrset_ttl": 10800,
+ "rrset_name": "@",
+ "rrset_href": "https://api.gandi.net/v5/livedns/domains/unit.tests/records/%40/TXT",
+ "rrset_values": [
+ "\"v=spf1 include:_mailcust.gandi.net ?all\""
+ ]
+ },
+ {
+ "rrset_type": "CNAME",
+ "rrset_ttl": 10800,
+ "rrset_name": "webmail",
+ "rrset_href": "https://api.gandi.net/v5/livedns/domains/unit.tests/records/webmail/CNAME",
+ "rrset_values": [
+ "webmail.gandi.net."
+ ]
+ },
+ {
+ "rrset_type": "CNAME",
+ "rrset_ttl": 10800,
+ "rrset_name": "www",
+ "rrset_href": "https://api.gandi.net/v5/livedns/domains/unit.tests/records/www/CNAME",
+ "rrset_values": [
+ "webredir.vip.gandi.net."
+ ]
+ },
+ {
+ "rrset_type": "SRV",
+ "rrset_ttl": 10800,
+ "rrset_name": "_imap._tcp",
+ "rrset_href": "https://api.gandi.net/v5/livedns/domains/unit.tests/records/_imap._tcp/SRV",
+ "rrset_values": [
+ "0 0 0 ."
+ ]
+ },
+ {
+ "rrset_type": "SRV",
+ "rrset_ttl": 10800,
+ "rrset_name": "_imaps._tcp",
+ "rrset_href": "https://api.gandi.net/v5/livedns/domains/unit.tests/records/_imaps._tcp/SRV",
+ "rrset_values": [
+ "0 1 993 mail.gandi.net."
+ ]
+ },
+ {
+ "rrset_type": "SRV",
+ "rrset_ttl": 10800,
+ "rrset_name": "_pop3._tcp",
+ "rrset_href": "https://api.gandi.net/v5/livedns/domains/unit.tests/records/_pop3._tcp/SRV",
+ "rrset_values": [
+ "0 0 0 ."
+ ]
+ },
+ {
+ "rrset_type": "SRV",
+ "rrset_ttl": 10800,
+ "rrset_name": "_pop3s._tcp",
+ "rrset_href": "https://api.gandi.net/v5/livedns/domains/unit.tests/records/_pop3s._tcp/SRV",
+ "rrset_values": [
+ "10 1 995 mail.gandi.net."
+ ]
+ },
+ {
+ "rrset_type": "SRV",
+ "rrset_ttl": 10800,
+ "rrset_name": "_submission._tcp",
+ "rrset_href": "https://api.gandi.net/v5/livedns/domains/unit.tests/records/_submission._tcp/SRV",
+ "rrset_values": [
+ "0 1 465 mail.gandi.net."
+ ]
+ },
+ {
+ "rrset_type": "CDS",
+ "rrset_ttl": 10800,
+ "rrset_name": "sub",
+ "rrset_href": "https://api.gandi.net/v5/livedns/domains/unit.tests/records/sub/CDS",
+ "rrset_values": [
+ "32128 13 1 6823D9BB1B03DF714DD0EB163E20B341C96D18C0"
+ ]
+ },
+ {
+ "rrset_type": "CNAME",
+ "rrset_ttl": 10800,
+ "rrset_name": "relative",
+ "rrset_href": "https://api.gandi.net/v5/livedns/domains/unit.tests/records/relative/CNAME",
+ "rrset_values": [
+ "target"
+ ]
+ }
+]
diff --git a/tests/fixtures/gandi-zone.json b/tests/fixtures/gandi-zone.json
new file mode 100644
index 0000000..e132f4c
--- /dev/null
+++ b/tests/fixtures/gandi-zone.json
@@ -0,0 +1,7 @@
+{
+ "domain_keys_href": "https://api.gandi.net/v5/livedns/domains/unit.tests/keys",
+ "fqdn": "unit.tests",
+ "automatic_snapshots": true,
+ "domain_records_href": "https://api.gandi.net/v5/livedns/domains/unit.tests/records",
+ "domain_href": "https://api.gandi.net/v5/livedns/domains/unit.tests"
+}
\ No newline at end of file
diff --git a/tests/fixtures/gcore-no-changes.json b/tests/fixtures/gcore-no-changes.json
new file mode 100644
index 0000000..b1a3b25
--- /dev/null
+++ b/tests/fixtures/gcore-no-changes.json
@@ -0,0 +1,245 @@
+{
+ "rrsets": [
+ {
+ "name": "unit.tests",
+ "type": "A",
+ "ttl": 300,
+ "resource_records": [
+ {
+ "content": [
+ "1.2.3.4"
+ ]
+ },
+ {
+ "content": [
+ "1.2.3.5"
+ ]
+ }
+ ]
+ },
+ {
+ "name": "unit.tests",
+ "type": "NS",
+ "ttl": 300,
+ "resource_records": [
+ {
+ "content": [
+ "ns2.gcdn.services"
+ ]
+ },
+ {
+ "content": [
+ "ns1.gcorelabs.net"
+ ]
+ }
+ ]
+ },
+ {
+ "name": "_imap._tcp",
+ "type": "SRV",
+ "ttl": 600,
+ "resource_records": [
+ {
+ "content": [
+ 0,
+ 0,
+ 0,
+ "."
+ ]
+ }
+ ]
+ },
+ {
+ "name": "_pop3._tcp",
+ "type": "SRV",
+ "ttl": 600,
+ "resource_records": [
+ {
+ "content": [
+ 0,
+ 0,
+ 0,
+ "."
+ ]
+ }
+ ]
+ },
+ {
+ "name": "_srv._tcp",
+ "type": "SRV",
+ "ttl": 600,
+ "resource_records": [
+ {
+ "content": [
+ 12,
+ 20,
+ 30,
+ "foo-2.unit.tests"
+ ]
+ },
+ {
+ "content": [
+ 10,
+ 20,
+ 30,
+ "foo-1.unit.tests"
+ ]
+ }
+ ]
+ },
+ {
+ "name": "aaaa.unit.tests",
+ "type": "AAAA",
+ "ttl": 600,
+ "resource_records": [
+ {
+ "content": [
+ "2601:644:500:e210:62f8:1dff:feb8:947a"
+ ]
+ }
+ ]
+ },
+ {
+ "name": "cname.unit.tests",
+ "type": "CNAME",
+ "ttl": 300,
+ "resource_records": [
+ {
+ "content": [
+ "unit.tests."
+ ]
+ }
+ ]
+ },
+ {
+ "name": "excluded.unit.tests",
+ "type": "CNAME",
+ "ttl": 3600,
+ "resource_records": [
+ {
+ "content": [
+ "unit.tests."
+ ]
+ }
+ ]
+ },
+ {
+ "name": "mx.unit.tests",
+ "type": "MX",
+ "ttl": 300,
+ "resource_records": [
+ {
+ "content": [
+ 40,
+ "smtp-1.unit.tests."
+ ]
+ },
+ {
+ "content": [
+ 20,
+ "smtp-2.unit.tests."
+ ]
+ },
+ {
+ "content": [
+ 30,
+ "smtp-3.unit.tests."
+ ]
+ },
+ {
+ "content": [
+ 10,
+ "smtp-4.unit.tests."
+ ]
+ }
+ ]
+ },
+ {
+ "name": "ptr.unit.tests.",
+ "type": "PTR",
+ "ttl": 300,
+ "resource_records": [
+ {
+ "content": [
+ "foo.bar.com"
+ ]
+ }
+ ]
+ },
+ {
+ "name": "sub.unit.tests",
+ "type": "NS",
+ "ttl": 3600,
+ "resource_records": [
+ {
+ "content": [
+ "6.2.3.4"
+ ]
+ },
+ {
+ "content": [
+ "7.2.3.4"
+ ]
+ }
+ ]
+ },
+ {
+ "name": "txt.unit.tests",
+ "type": "TXT",
+ "ttl": 600,
+ "resource_records": [
+ {
+ "content": [
+ "Bah bah black sheep"
+ ]
+ },
+ {
+ "content": [
+ "have you any wool."
+ ]
+ },
+ {
+ "content": [
+ "v=DKIM1;k=rsa;s=email;h=sha256;p=A/kinda+of/long/string+with+numb3rs"
+ ]
+ }
+ ]
+ },
+ {
+ "name": "www.unit.tests.",
+ "type": "A",
+ "ttl": 300,
+ "resource_records": [
+ {
+ "content": [
+ "2.2.3.6"
+ ]
+ }
+ ]
+ },
+ {
+ "name": "www.sub.unit.tests.",
+ "type": "A",
+ "ttl": 300,
+ "resource_records": [
+ {
+ "content": [
+ "2.2.3.6"
+ ]
+ }
+ ]
+ },
+ {
+ "name": "spf.sub.unit.tests.",
+ "type": "SPF",
+ "ttl": 600,
+ "resource_records": [
+ {
+ "content": [
+ "v=spf1 ip4:192.168.0.1/16-all"
+ ]
+ }
+ ]
+ }
+ ]
+}
\ No newline at end of file
diff --git a/tests/fixtures/gcore-records.json b/tests/fixtures/gcore-records.json
new file mode 100644
index 0000000..9bf58d7
--- /dev/null
+++ b/tests/fixtures/gcore-records.json
@@ -0,0 +1,428 @@
+{
+ "rrsets": [
+ {
+ "name": "unit.tests",
+ "type": "A",
+ "ttl": 300,
+ "resource_records": [
+ {
+ "content": [
+ "1.2.3.4"
+ ]
+ }
+ ]
+ },
+ {
+ "name": "unit.tests",
+ "type": "NS",
+ "ttl": 300,
+ "resource_records": [
+ {
+ "content": [
+ "ns2.gcdn.services"
+ ]
+ },
+ {
+ "content": [
+ "ns1.gcorelabs.net"
+ ]
+ }
+ ]
+ },
+ {
+ "name": "_imap._tcp",
+ "type": "SRV",
+ "ttl": 1200,
+ "resource_records": [
+ {
+ "content": [
+ 0,
+ 0,
+ 0,
+ "."
+ ]
+ }
+ ]
+ },
+ {
+ "name": "_pop3._tcp",
+ "type": "SRV",
+ "ttl": 1200,
+ "resource_records": [
+ {
+ "content": [
+ 0,
+ 0,
+ 0,
+ "."
+ ]
+ }
+ ]
+ },
+ {
+ "name": "_srv._tcp",
+ "type": "SRV",
+ "ttl": 1200,
+ "resource_records": [
+ {
+ "content": [
+ 12,
+ 20,
+ 30,
+ "foo-2.unit.tests."
+ ]
+ },
+ {
+ "content": [
+ 10,
+ 20,
+ 30,
+ "foo-1.unit.tests."
+ ]
+ }
+ ]
+ },
+ {
+ "name": "aaaa.unit.tests",
+ "type": "AAAA",
+ "ttl": 600,
+ "resource_records": [
+ {
+ "content": [
+ "2601:644:500:e210:62f8:1dff:feb8:947a"
+ ]
+ }
+ ]
+ },
+ {
+ "name": "cname.unit.tests",
+ "type": "CNAME",
+ "ttl": 300,
+ "resource_records": [
+ {
+ "content": [
+ "unit.tests."
+ ]
+ }
+ ]
+ },
+ {
+ "name": "mx.unit.tests",
+ "type": "MX",
+ "ttl": 600,
+ "resource_records": [
+ {
+ "content": [
+ 40,
+ "smtp-1.unit.tests."
+ ]
+ },
+ {
+ "content": [
+ 20,
+ "smtp-2.unit.tests."
+ ]
+ }
+ ]
+ },
+ {
+ "name": "ptr.unit.tests.",
+ "type": "PTR",
+ "ttl": 300,
+ "resource_records": [
+ {
+ "content": [
+ "foo.bar.com"
+ ]
+ }
+ ]
+ },
+ {
+ "name": "sub.unit.tests",
+ "type": "NS",
+ "ttl": 300,
+ "resource_records": [
+ {
+ "content": [
+ "6.2.3.4"
+ ]
+ },
+ {
+ "content": [
+ "7.2.3.4"
+ ]
+ }
+ ]
+ },
+ {
+ "name": "txt.unit.tests",
+ "type": "TXT",
+ "ttl": 300,
+ "resource_records": [
+ {
+ "content": [
+ "\"Bah bah black sheep\""
+ ]
+ },
+ {
+ "content": [
+ "\"have you any wool.\""
+ ]
+ },
+ {
+ "content": [
+ "\"v=DKIM1;k=rsa;s=email;h=sha256;p=A/kinda+of/long/string+with+numb3rs\""
+ ]
+ }
+ ]
+ },
+ {
+ "name": "www.unit.tests.",
+ "type": "A",
+ "ttl": 300,
+ "resource_records": [
+ {
+ "content": [
+ "2.2.3.6"
+ ]
+ }
+ ]
+ },
+ {
+ "name": "www.sub.unit.tests.",
+ "type": "A",
+ "ttl": 300,
+ "resource_records": [
+ {
+ "content": [
+ "2.2.3.6"
+ ]
+ }
+ ]
+ },
+ {
+ "name": "geo-A-single.unit.tests.",
+ "type": "A",
+ "ttl": 300,
+ "filters": [
+ {
+ "type": "geodns"
+ },
+ {
+ "limit": 1,
+ "strict": false,
+ "type": "default"
+ },
+ {
+ "limit": 1,
+ "type": "first_n"
+ }
+ ],
+ "resource_records": [
+ {
+ "content": [
+ "7.7.7.7"
+ ],
+ "meta": {
+ "countries": [
+ "RU"
+ ]
+ }
+ },
+ {
+ "content": [
+ "8.8.8.8"
+ ],
+ "meta": {
+ "countries": [
+ "RU"
+ ]
+ }
+ },
+ {
+ "content": [
+ "9.9.9.9"
+ ],
+ "meta": {
+ "continents": [
+ "EU"
+ ]
+ }
+ },
+ {
+ "content": [
+ "10.10.10.10"
+ ],
+ "meta": {
+ "default": true
+ }
+ }
+ ]
+ },
+ {
+ "name": "geo-no-def.unit.tests.",
+ "type": "A",
+ "ttl": 300,
+ "filters": [
+ {
+ "type": "geodns"
+ },
+ {
+ "limit": 1,
+ "strict": false,
+ "type": "default"
+ },
+ {
+ "limit": 1,
+ "type": "first_n"
+ }
+ ],
+ "resource_records": [
+ {
+ "content": [
+ "7.7.7.7"
+ ],
+ "meta": {
+ "countries": [
+ "RU"
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "name": "geo-CNAME.unit.tests.",
+ "type": "CNAME",
+ "ttl": 300,
+ "filters": [
+ {
+ "type": "geodns"
+ },
+ {
+ "limit": 1,
+ "strict": false,
+ "type": "default"
+ },
+ {
+ "limit": 1,
+ "type": "first_n"
+ }
+ ],
+ "resource_records": [
+ {
+ "content": [
+ "ru-1.unit.tests"
+ ],
+ "meta": {
+ "countries": [
+ "RU"
+ ]
+ }
+ },
+ {
+ "content": [
+ "ru-2.unit.tests"
+ ],
+ "meta": {
+ "countries": [
+ "RU"
+ ]
+ }
+ },
+ {
+ "content": [
+ "eu.unit.tests"
+ ],
+ "meta": {
+ "continents": [
+ "EU"
+ ]
+ }
+ },
+ {
+ "content": [
+ "any.unit.tests."
+ ],
+ "meta": {
+ "default": true
+ }
+ }
+ ]
+ },
+ {
+ "name": "geo-ignore-len-filters.unit.tests.",
+ "type": "A",
+ "ttl": 300,
+ "filters": [
+ {
+ "limit": 1,
+ "type": "first_n"
+ },
+ {
+ "limit": 1,
+ "strict": false,
+ "type": "default"
+ }
+ ],
+ "resource_records": [
+ {
+ "content": [
+ "7.7.7.7"
+ ]
+ }
+ ]
+ },
+ {
+ "name": "geo-ignore-types.unit.tests.",
+ "type": "A",
+ "ttl": 300,
+ "filters": [
+ {
+ "type": "geodistance"
+ },
+ {
+ "limit": 1,
+ "type": "first_n"
+ },
+ {
+ "limit": 1,
+ "strict": false,
+ "type": "default"
+ }
+ ],
+ "resource_records": [
+ {
+ "content": [
+ "7.7.7.7"
+ ]
+ }
+ ]
+ },
+ {
+ "name": "geo-ignore-limits.unit.tests.",
+ "type": "A",
+ "ttl": 300,
+ "filters": [
+ {
+ "type": "geodns"
+ },
+ {
+ "limit": 2,
+ "strict": false,
+ "type": "default"
+ },
+ {
+ "limit": 1,
+ "type": "first_n"
+ }
+ ],
+ "resource_records": [
+ {
+ "content": [
+ "7.7.7.7"
+ ]
+ }
+ ]
+ }
+ ]
+}
\ No newline at end of file
diff --git a/tests/fixtures/gcore-zone.json b/tests/fixtures/gcore-zone.json
new file mode 100644
index 0000000..925af72
--- /dev/null
+++ b/tests/fixtures/gcore-zone.json
@@ -0,0 +1,27 @@
+{
+ "id": 27757,
+ "name": "unit.test",
+ "nx_ttl": 300,
+ "retry": 5400,
+ "refresh": 0,
+ "expiry": 1209600,
+ "contact": "support@gcorelabs.com",
+ "serial": 1614752868,
+ "primary_server": "ns1.gcorelabs.net",
+ "records": [
+ {
+ "id": 12419,
+ "name": "unit.test",
+ "type": "ns",
+ "ttl": 300,
+ "short_answers": [
+ "[ns2.gcdn.services]",
+ "[ns1.gcorelabs.net]"
+ ]
+ }
+ ],
+ "dns_servers": [
+ "ns1.gcorelabs.net",
+ "ns2.gcdn.services"
+ ]
+}
\ No newline at end of file
diff --git a/tests/fixtures/hetzner-records.json b/tests/fixtures/hetzner-records.json
new file mode 100644
index 0000000..bbafdcb
--- /dev/null
+++ b/tests/fixtures/hetzner-records.json
@@ -0,0 +1,223 @@
+{
+ "records": [
+ {
+ "id": "SOA",
+ "type": "SOA",
+ "name": "@",
+ "value": "hydrogen.ns.hetzner.com. dns.hetzner.com. 1 86400 10800 3600000 3600",
+ "zone_id": "unit.tests",
+ "created": "0000-00-00 00:00:00.000 +0000 UTC",
+ "modified": "0000-00-00 00:00:00.000 +0000 UTC"
+ },
+ {
+ "id": "NS:sub:0",
+ "type": "NS",
+ "name": "sub",
+ "value": "6.2.3.4",
+ "ttl": 3600,
+ "zone_id": "unit.tests",
+ "created": "0000-00-00 00:00:00.000 +0000 UTC",
+ "modified": "0000-00-00 00:00:00.000 +0000 UTC"
+ },
+ {
+ "id": "NS:sub:1",
+ "type": "NS",
+ "name": "sub",
+ "value": "7.2.3.4",
+ "ttl": 3600,
+ "zone_id": "unit.tests",
+ "created": "0000-00-00 00:00:00.000 +0000 UTC",
+ "modified": "0000-00-00 00:00:00.000 +0000 UTC"
+ },
+ {
+ "id": "SRV:_srv._tcp:0",
+ "type": "SRV",
+ "name": "_srv._tcp",
+ "value": "10 20 30 foo-1.unit.tests",
+ "ttl": 600,
+ "zone_id": "unit.tests",
+ "created": "0000-00-00 00:00:00.000 +0000 UTC",
+ "modified": "0000-00-00 00:00:00.000 +0000 UTC"
+ },
+ {
+ "id": "SRV:_srv._tcp:1",
+ "type": "SRV",
+ "name": "_srv._tcp",
+ "value": "12 20 30 foo-2.unit.tests",
+ "ttl": 600,
+ "zone_id": "unit.tests",
+ "created": "0000-00-00 00:00:00.000 +0000 UTC",
+ "modified": "0000-00-00 00:00:00.000 +0000 UTC"
+ },
+ {
+ "id": "TXT:txt:0",
+ "type": "TXT",
+ "name": "txt",
+ "value": "\"Bah bah black sheep\"",
+ "ttl": 600,
+ "zone_id": "unit.tests",
+ "created": "0000-00-00 00:00:00.000 +0000 UTC",
+ "modified": "0000-00-00 00:00:00.000 +0000 UTC"
+ },
+ {
+ "id": "TXT:txt:1",
+ "type": "TXT",
+ "name": "txt",
+ "value": "\"have you any wool.\"",
+ "ttl": 600,
+ "zone_id": "unit.tests",
+ "created": "0000-00-00 00:00:00.000 +0000 UTC",
+ "modified": "0000-00-00 00:00:00.000 +0000 UTC"
+ },
+ {
+ "id": "A:@:0",
+ "type": "A",
+ "name": "@",
+ "value": "1.2.3.4",
+ "ttl": 300,
+ "zone_id": "unit.tests",
+ "created": "0000-00-00 00:00:00.000 +0000 UTC",
+ "modified": "0000-00-00 00:00:00.000 +0000 UTC"
+ },
+ {
+ "id": "A:@:1",
+ "type": "A",
+ "name": "@",
+ "value": "1.2.3.5",
+ "ttl": 300,
+ "zone_id": "unit.tests",
+ "created": "0000-00-00 00:00:00.000 +0000 UTC",
+ "modified": "0000-00-00 00:00:00.000 +0000 UTC"
+ },
+ {
+ "id": "A:www:0",
+ "type": "A",
+ "name": "www",
+ "value": "2.2.3.6",
+ "ttl": 300,
+ "zone_id": "unit.tests",
+ "created": "0000-00-00 00:00:00.000 +0000 UTC",
+ "modified": "0000-00-00 00:00:00.000 +0000 UTC"
+ },
+ {
+ "id": "MX:mx:0",
+ "type": "MX",
+ "name": "mx",
+ "value": "10 smtp-4.unit.tests",
+ "ttl": 300,
+ "zone_id": "unit.tests",
+ "created": "0000-00-00 00:00:00.000 +0000 UTC",
+ "modified": "0000-00-00 00:00:00.000 +0000 UTC"
+ },
+ {
+ "id": "MX:mx:1",
+ "type": "MX",
+ "name": "mx",
+ "value": "20 smtp-2.unit.tests",
+ "ttl": 300,
+ "zone_id": "unit.tests",
+ "created": "0000-00-00 00:00:00.000 +0000 UTC",
+ "modified": "0000-00-00 00:00:00.000 +0000 UTC"
+ },
+ {
+ "id": "MX:mx:2",
+ "type": "MX",
+ "name": "mx",
+ "value": "30 smtp-3.unit.tests",
+ "ttl": 300,
+ "zone_id": "unit.tests",
+ "created": "0000-00-00 00:00:00.000 +0000 UTC",
+ "modified": "0000-00-00 00:00:00.000 +0000 UTC"
+ },
+ {
+ "id": "MX:mx:3",
+ "type": "MX",
+ "name": "mx",
+ "value": "40 smtp-1.unit.tests",
+ "ttl": 300,
+ "zone_id": "unit.tests",
+ "created": "0000-00-00 00:00:00.000 +0000 UTC",
+ "modified": "0000-00-00 00:00:00.000 +0000 UTC"
+ },
+ {
+ "id": "AAAA:aaaa:0",
+ "type": "AAAA",
+ "name": "aaaa",
+ "value": "2601:644:500:e210:62f8:1dff:feb8:947a",
+ "ttl": 600,
+ "zone_id": "unit.tests",
+ "created": "0000-00-00 00:00:00.000 +0000 UTC",
+ "modified": "0000-00-00 00:00:00.000 +0000 UTC"
+ },
+ {
+ "id": "CNAME:cname:0",
+ "type": "CNAME",
+ "name": "cname",
+ "value": "unit.tests",
+ "ttl": 300,
+ "zone_id": "unit.tests",
+ "created": "0000-00-00 00:00:00.000 +0000 UTC",
+ "modified": "0000-00-00 00:00:00.000 +0000 UTC"
+ },
+ {
+ "id": "A:www.sub:0",
+ "type": "A",
+ "name": "www.sub",
+ "value": "2.2.3.6",
+ "ttl": 300,
+ "zone_id": "unit.tests",
+ "created": "0000-00-00 00:00:00.000 +0000 UTC",
+ "modified": "0000-00-00 00:00:00.000 +0000 UTC"
+ },
+ {
+ "id": "TXT:txt:2",
+ "type": "TXT",
+ "name": "txt",
+ "value": "v=DKIM1;k=rsa;s=email;h=sha256;p=A/kinda+of/long/string+with+numb3rs",
+ "ttl": 600,
+ "zone_id": "unit.tests",
+ "created": "0000-00-00 00:00:00.000 +0000 UTC",
+ "modified": "0000-00-00 00:00:00.000 +0000 UTC"
+ },
+ {
+ "id": "CAA:@:0",
+ "type": "CAA",
+ "name": "@",
+ "value": "0 issue \"ca.unit.tests\"",
+ "ttl": 3600,
+ "zone_id": "unit.tests",
+ "created": "0000-00-00 00:00:00.000 +0000 UTC",
+ "modified": "0000-00-00 00:00:00.000 +0000 UTC"
+ },
+ {
+ "id": "CNAME:included:0",
+ "type": "CNAME",
+ "name": "included",
+ "value": "unit.tests",
+ "ttl": 3600,
+ "zone_id": "unit.tests",
+ "created": "0000-00-00 00:00:00.000 +0000 UTC",
+ "modified": "0000-00-00 00:00:00.000 +0000 UTC"
+ },
+ {
+ "id": "SRV:_imap._tcp:0",
+ "type": "SRV",
+ "name": "_imap._tcp",
+ "value": "0 0 0 .",
+ "ttl": 600,
+ "zone_id": "unit.tests",
+ "created": "0000-00-00 00:00:00.000 +0000 UTC",
+ "modified": "0000-00-00 00:00:00.000 +0000 UTC"
+ },
+ {
+ "id": "SRV:_pop3._tcp:0",
+ "type": "SRV",
+ "name": "_pop3._tcp",
+ "value": "0 0 0 .",
+ "ttl": 600,
+ "zone_id": "unit.tests",
+ "created": "0000-00-00 00:00:00.000 +0000 UTC",
+ "modified": "0000-00-00 00:00:00.000 +0000 UTC"
+ }
+ ]
+}
diff --git a/tests/fixtures/hetzner-zones.json b/tests/fixtures/hetzner-zones.json
new file mode 100644
index 0000000..4d9b897
--- /dev/null
+++ b/tests/fixtures/hetzner-zones.json
@@ -0,0 +1,43 @@
+{
+ "zones": [
+ {
+ "id": "unit.tests",
+ "name": "unit.tests",
+ "ttl": 3600,
+ "registrar": "",
+ "legacy_dns_host": "",
+ "legacy_ns": [],
+ "ns": [],
+ "created": "0000-00-00 00:00:00.000 +0000 UTC",
+ "verified": "",
+ "modified": "0000-00-00 00:00:00.000 +0000 UTC",
+ "project": "",
+ "owner": "",
+ "permission": "",
+ "zone_type": {
+ "id": "",
+ "name": "",
+ "description": "",
+ "prices": null
+ },
+ "status": "verified",
+ "paused": false,
+ "is_secondary_dns": false,
+ "txt_verification": {
+ "name": "",
+ "token": ""
+ },
+ "records_count": null
+ }
+ ],
+ "meta": {
+ "pagination": {
+ "page": 1,
+ "per_page": 100,
+ "previous_page": 1,
+ "next_page": 1,
+ "last_page": 1,
+ "total_entries": 1
+ }
+ }
+}
diff --git a/tests/fixtures/mythicbeasts-list.txt b/tests/fixtures/mythicbeasts-list.txt
index ed4ea4c..006a8ff 100644
--- a/tests/fixtures/mythicbeasts-list.txt
+++ b/tests/fixtures/mythicbeasts-list.txt
@@ -5,6 +5,8 @@
@ 3600 SSHFP 1 1 bf6b6825d2977c511a475bbefb88aad54a92ac73
@ 3600 SSHFP 1 1 7491973e5f8b39d5327cd4e08bc81b05f7710b49
@ 3600 CAA 0 issue ca.unit.tests
+_imap._tcp 600 SRV 0 0 0 .
+_pop3._tcp 600 SRV 0 0 0 .
_srv._tcp 600 SRV 10 20 30 foo-1.unit.tests.
_srv._tcp 600 SRV 12 20 30 foo-2.unit.tests.
aaaa 600 AAAA 2601:644:500:e210:62f8:1dff:feb8:947a
diff --git a/tests/fixtures/powerdns-full-data.json b/tests/fixtures/powerdns-full-data.json
index 3d445d4..8feda7e 100644
--- a/tests/fixtures/powerdns-full-data.json
+++ b/tests/fixtures/powerdns-full-data.json
@@ -32,6 +32,22 @@
"ttl": 300,
"type": "MX"
},
+ {
+ "comments": [],
+ "name": "loc.unit.tests.",
+ "records": [
+ {
+ "content": "31 58 52.100 S 115 49 11.700 E 20.00m 10.00m 10.00m 2.00m",
+ "disabled": false
+ },
+ {
+ "content": "53 13 10.000 N 2 18 26.000 W 20.00m 10.00m 1000.00m 2.00m",
+ "disabled": false
+ }
+ ],
+ "ttl": 300,
+ "type": "LOC"
+ },
{
"comments": [],
"name": "sub.unit.tests.",
@@ -59,6 +75,30 @@
"ttl": 300,
"type": "A"
},
+ {
+ "comments": [],
+ "name": "_imap._tcp.unit.tests.",
+ "records": [
+ {
+ "content": "0 0 0 .",
+ "disabled": false
+ }
+ ],
+ "ttl": 600,
+ "type": "SRV"
+ },
+ {
+ "comments": [],
+ "name": "_pop3._tcp.unit.tests.",
+ "records": [
+ {
+ "content": "0 0 0 .",
+ "disabled": false
+ }
+ ],
+ "ttl": 600,
+ "type": "SRV"
+ },
{
"comments": [],
"name": "_srv._tcp.unit.tests.",
diff --git a/tests/fixtures/ultra-records-page-1.json b/tests/fixtures/ultra-records-page-1.json
index 2f5f836..8614427 100644
--- a/tests/fixtures/ultra-records-page-1.json
+++ b/tests/fixtures/ultra-records-page-1.json
@@ -87,7 +87,7 @@
}
],
"resultInfo": {
- "totalCount": 12,
+ "totalCount": 13,
"offset": 0,
"returnedCount": 10
}
diff --git a/tests/fixtures/ultra-records-page-2.json b/tests/fixtures/ultra-records-page-2.json
index db51828..274d95e 100644
--- a/tests/fixtures/ultra-records-page-2.json
+++ b/tests/fixtures/ultra-records-page-2.json
@@ -24,11 +24,28 @@
"order": "FIXED",
"description": "octodns1.test."
}
+ },
+ {
+ "ownerName": "octodns1.test.",
+ "rrtype": "APEXALIAS (65282)",
+ "ttl": 3600,
+ "rdata": [
+ "www.octodns1.test."
+ ]
+ },
+ {
+ "ownerName": "host1.octodns1.test.",
+ "rrtype": "RRSET (70)",
+ "ttl": 3600,
+ "rdata": [
+ "E3B0C44298FC1C149AFBF4C8996FB92427AE41E4649B934CA495991B7852B855"
+ ]
}
+
],
"resultInfo": {
- "totalCount": 12,
+ "totalCount": 13,
"offset": 10,
- "returnedCount": 2
+ "returnedCount": 3
}
}
\ No newline at end of file
diff --git a/tests/fixtures/ultra-zones-page-1.json b/tests/fixtures/ultra-zones-page-1.json
index ad98d48..f748d08 100644
--- a/tests/fixtures/ultra-zones-page-1.json
+++ b/tests/fixtures/ultra-zones-page-1.json
@@ -19,7 +19,7 @@
"dnssecStatus": "UNSIGNED",
"status": "ACTIVE",
"owner": "phelpstest",
- "resourceRecordCount": 5,
+ "resourceRecordCount": 6,
"lastModifiedDateTime": "2020-06-19T01:05Z"
}
},
diff --git a/tests/helpers.py b/tests/helpers.py
index ff7f7cc..eedfd8b 100644
--- a/tests/helpers.py
+++ b/tests/helpers.py
@@ -7,6 +7,10 @@ from __future__ import absolute_import, division, print_function, \
from shutil import rmtree
from tempfile import mkdtemp
+from logging import getLogger
+
+from octodns.processor.base import BaseProcessor
+from octodns.provider.base import BaseProvider
class SimpleSource(object):
@@ -90,3 +94,29 @@ class TemporaryDirectory(object):
rmtree(self.dirname)
else:
raise Exception(self.dirname)
+
+
+class WantsConfigProcessor(BaseProcessor):
+
+ def __init__(self, name, some_config):
+ super(WantsConfigProcessor, self).__init__(name)
+
+
+class PlannableProvider(BaseProvider):
+ log = getLogger('PlannableProvider')
+
+ SUPPORTS_GEO = False
+ SUPPORTS_DYNAMIC = False
+ SUPPORTS = set(('A',))
+
+ def __init__(self, *args, **kwargs):
+ super(PlannableProvider, self).__init__(*args, **kwargs)
+
+ def populate(self, zone, source=False, target=False, lenient=False):
+ pass
+
+ def supports(self, record):
+ return True
+
+ def __repr__(self):
+ return self.__class__.__name__
diff --git a/tests/test_octodns_manager.py b/tests/test_octodns_manager.py
index 9956790..c9362d4 100644
--- a/tests/test_octodns_manager.py
+++ b/tests/test_octodns_manager.py
@@ -8,16 +8,19 @@ from __future__ import absolute_import, division, print_function, \
from os import environ
from os.path import dirname, join
from six import text_type
-from unittest import TestCase
-from octodns.record import Record
from octodns.manager import _AggregateTarget, MainThreadExecutor, Manager, \
ManagerException
+from octodns.processor.base import BaseProcessor
+from octodns.record import Create, Delete, Record
from octodns.yaml import safe_load
from octodns.zone import Zone
+from mock import MagicMock, patch
+from unittest import TestCase
+
from helpers import DynamicProvider, GeoProvider, NoSshFpProvider, \
- SimpleProvider, TemporaryDirectory
+ PlannableProvider, SimpleProvider, TemporaryDirectory
config_dir = join(dirname(__file__), 'config')
@@ -118,12 +121,12 @@ class TestManager(TestCase):
environ['YAML_TMP_DIR'] = tmpdir.dirname
tc = Manager(get_config_filename('simple.yaml')) \
.sync(dry_run=False)
- self.assertEquals(21, tc)
+ self.assertEquals(26, tc)
# try with just one of the zones
tc = Manager(get_config_filename('simple.yaml')) \
.sync(dry_run=False, eligible_zones=['unit.tests.'])
- self.assertEquals(15, tc)
+ self.assertEquals(20, tc)
# the subzone, with 2 targets
tc = Manager(get_config_filename('simple.yaml')) \
@@ -138,18 +141,18 @@ class TestManager(TestCase):
# Again with force
tc = Manager(get_config_filename('simple.yaml')) \
.sync(dry_run=False, force=True)
- self.assertEquals(21, tc)
+ self.assertEquals(26, tc)
# Again with max_workers = 1
tc = Manager(get_config_filename('simple.yaml'), max_workers=1) \
.sync(dry_run=False, force=True)
- self.assertEquals(21, tc)
+ self.assertEquals(26, tc)
# Include meta
tc = Manager(get_config_filename('simple.yaml'), max_workers=1,
include_meta=True) \
.sync(dry_run=False, force=True)
- self.assertEquals(25, tc)
+ self.assertEquals(30, tc)
def test_eligible_sources(self):
with TemporaryDirectory() as tmpdir:
@@ -167,6 +170,38 @@ class TestManager(TestCase):
.sync(eligible_targets=['foo'])
self.assertEquals(0, tc)
+ def test_aliases(self):
+ with TemporaryDirectory() as tmpdir:
+ environ['YAML_TMP_DIR'] = tmpdir.dirname
+ # Alias zones with a valid target.
+ tc = Manager(get_config_filename('simple-alias-zone.yaml')) \
+ .sync()
+ self.assertEquals(0, tc)
+
+ # Alias zone with an invalid target.
+ with self.assertRaises(ManagerException) as ctx:
+ tc = Manager(get_config_filename('unknown-source-zone.yaml')) \
+ .sync()
+ self.assertEquals('Invalid alias zone alias.tests.: source zone '
+ 'does-not-exists.tests. does not exist',
+ text_type(ctx.exception))
+
+ # Alias zone that points to another alias zone.
+ with self.assertRaises(ManagerException) as ctx:
+ tc = Manager(get_config_filename('alias-zone-loop.yaml')) \
+ .sync()
+ self.assertEquals('Invalid alias zone alias-loop.tests.: source '
+ 'zone alias.tests. is an alias zone',
+ text_type(ctx.exception))
+
+ # Sync an alias without the zone it refers to
+ with self.assertRaises(ManagerException) as ctx:
+ tc = Manager(get_config_filename('simple-alias-zone.yaml')) \
+ .sync(eligible_zones=["alias.tests."])
+ self.assertEquals('Zone alias.tests. cannot be sync without zone '
+ 'unit.tests. sinced it is aliased',
+ text_type(ctx.exception))
+
def test_compare(self):
with TemporaryDirectory() as tmpdir:
environ['YAML_TMP_DIR'] = tmpdir.dirname
@@ -183,13 +218,13 @@ class TestManager(TestCase):
fh.write('---\n{}')
changes = manager.compare(['in'], ['dump'], 'unit.tests.')
- self.assertEquals(15, len(changes))
+ self.assertEquals(20, len(changes))
# Compound sources with varying support
changes = manager.compare(['in', 'nosshfp'],
['dump'],
'unit.tests.')
- self.assertEquals(14, len(changes))
+ self.assertEquals(19, len(changes))
with self.assertRaises(ManagerException) as ctx:
manager.compare(['nope'], ['dump'], 'unit.tests.')
@@ -286,6 +321,41 @@ class TestManager(TestCase):
.validate_configs()
self.assertTrue('unknown source' in text_type(ctx.exception))
+ # Alias zone using an invalid source zone.
+ with self.assertRaises(ManagerException) as ctx:
+ Manager(get_config_filename('unknown-source-zone.yaml')) \
+ .validate_configs()
+ self.assertTrue('does not exist' in
+ text_type(ctx.exception))
+
+ # Alias zone that points to another alias zone.
+ with self.assertRaises(ManagerException) as ctx:
+ Manager(get_config_filename('alias-zone-loop.yaml')) \
+ .validate_configs()
+ self.assertTrue('is an alias zone' in
+ text_type(ctx.exception))
+
+ # Valid config file using an alias zone.
+ Manager(get_config_filename('simple-alias-zone.yaml')) \
+ .validate_configs()
+
+ with self.assertRaises(ManagerException) as ctx:
+ Manager(get_config_filename('unknown-processor.yaml')) \
+ .validate_configs()
+ self.assertTrue('unknown processor' in text_type(ctx.exception))
+
+ def test_get_zone(self):
+ Manager(get_config_filename('simple.yaml')).get_zone('unit.tests.')
+
+ with self.assertRaises(ManagerException) as ctx:
+ Manager(get_config_filename('simple.yaml')).get_zone('unit.tests')
+ self.assertTrue('missing ending dot' in text_type(ctx.exception))
+
+ with self.assertRaises(ManagerException) as ctx:
+ Manager(get_config_filename('simple.yaml')) \
+ .get_zone('unknown-zone.tests.')
+ self.assertTrue('Unknown zone name' in text_type(ctx.exception))
+
def test_populate_lenient_fallback(self):
with TemporaryDirectory() as tmpdir:
environ['YAML_TMP_DIR'] = tmpdir.dirname
@@ -294,20 +364,152 @@ class TestManager(TestCase):
class NoLenient(SimpleProvider):
- def populate(self, zone, source=False):
+ def populate(self, zone):
pass
# This should be ok, we'll fall back to not passing it
- manager._populate_and_plan('unit.tests.', [NoLenient()], [])
+ manager._populate_and_plan('unit.tests.', [], [NoLenient()], [])
+
+ class OtherType(SimpleProvider):
+
+ def populate(self, zone, lenient=False):
+ raise TypeError('something else')
- class NoZone(SimpleProvider):
+ # This will blow up, we don't fallback for source
+ with self.assertRaises(TypeError) as ctx:
+ manager._populate_and_plan('unit.tests.', [], [OtherType()],
+ [])
+ self.assertEquals('something else', text_type(ctx.exception))
- def populate(self, lenient=False):
+ def test_plan_processors_fallback(self):
+ with TemporaryDirectory() as tmpdir:
+ environ['YAML_TMP_DIR'] = tmpdir.dirname
+ # Only allow a target that doesn't exist
+ manager = Manager(get_config_filename('simple.yaml'))
+
+ class NoProcessors(SimpleProvider):
+
+ def plan(self, zone):
pass
+ # This should be ok, we'll fall back to not passing it
+ manager._populate_and_plan('unit.tests.', [], [],
+ [NoProcessors()])
+
+ class OtherType(SimpleProvider):
+
+ def plan(self, zone, processors):
+ raise TypeError('something else')
+
# This will blow up, we don't fallback for source
- with self.assertRaises(TypeError):
- manager._populate_and_plan('unit.tests.', [NoZone()], [])
+ with self.assertRaises(TypeError) as ctx:
+ manager._populate_and_plan('unit.tests.', [], [],
+ [OtherType()])
+ self.assertEquals('something else', text_type(ctx.exception))
+
+ @patch('octodns.manager.Manager._get_named_class')
+ def test_sync_passes_file_handle(self, mock):
+ plan_output_mock = MagicMock()
+ plan_output_class_mock = MagicMock()
+ plan_output_class_mock.return_value = plan_output_mock
+ mock.return_value = plan_output_class_mock
+ fh_mock = MagicMock()
+
+ Manager(get_config_filename('plan-output-filehandle.yaml')
+ ).sync(plan_output_fh=fh_mock)
+
+ # Since we only care about the fh kwarg, and different _PlanOutputs are
+ # are free to require arbitrary kwargs anyway, we concern ourselves
+ # with checking the value of fh only.
+ plan_output_mock.run.assert_called()
+ _, kwargs = plan_output_mock.run.call_args
+ self.assertEqual(fh_mock, kwargs.get('fh'))
+
+ def test_processor_config(self):
+ # Smoke test loading a valid config
+ manager = Manager(get_config_filename('processors.yaml'))
+ self.assertEquals(['noop'], list(manager.processors.keys()))
+ # This zone specifies a valid processor
+ manager.sync(['unit.tests.'])
+
+ with self.assertRaises(ManagerException) as ctx:
+ # This zone specifies a non-existant processor
+ manager.sync(['bad.unit.tests.'])
+ self.assertTrue('Zone bad.unit.tests., unknown processor: '
+ 'doesnt-exist' in text_type(ctx.exception))
+
+ with self.assertRaises(ManagerException) as ctx:
+ Manager(get_config_filename('processors-missing-class.yaml'))
+ self.assertTrue('Processor no-class is missing class' in
+ text_type(ctx.exception))
+
+ with self.assertRaises(ManagerException) as ctx:
+ Manager(get_config_filename('processors-wants-config.yaml'))
+ self.assertTrue('Incorrect processor config for wants-config' in
+ text_type(ctx.exception))
+
+ def test_processors(self):
+ manager = Manager(get_config_filename('simple.yaml'))
+
+ targets = [PlannableProvider('prov')]
+
+ zone = Zone('unit.tests.', [])
+ record = Record.new(zone, 'a', {
+ 'ttl': 30,
+ 'type': 'A',
+ 'value': '1.2.3.4',
+ })
+
+ # muck with sources
+ class MockProcessor(BaseProcessor):
+
+ def process_source_zone(self, zone, sources):
+ zone = zone.copy()
+ zone.add_record(record)
+ return zone
+
+ mock = MockProcessor('mock')
+ plans, zone = manager._populate_and_plan('unit.tests.', [mock], [],
+ targets)
+ # Our mock was called and added the record
+ self.assertEquals(record, list(zone.records)[0])
+ # We got a create for the thing added to the expected state (source)
+ self.assertIsInstance(plans[0][1].changes[0], Create)
+
+ # muck with targets
+ class MockProcessor(BaseProcessor):
+
+ def process_target_zone(self, zone, target):
+ zone = zone.copy()
+ zone.add_record(record)
+ return zone
+
+ mock = MockProcessor('mock')
+ plans, zone = manager._populate_and_plan('unit.tests.', [mock], [],
+ targets)
+ # No record added since it's target this time
+ self.assertFalse(zone.records)
+ # We got a delete for the thing added to the existing state (target)
+ self.assertIsInstance(plans[0][1].changes[0], Delete)
+
+ # muck with plans
+ class MockProcessor(BaseProcessor):
+
+ def process_target_zone(self, zone, target):
+ zone = zone.copy()
+ zone.add_record(record)
+ return zone
+
+ def process_plan(self, plans, sources, target):
+ # get rid of the change
+ plans.changes.pop(0)
+
+ mock = MockProcessor('mock')
+ plans, zone = manager._populate_and_plan('unit.tests.', [mock], [],
+ targets)
+ # We planned a delete again, but this time removed it from the plan, so
+ # no plans
+ self.assertFalse(plans)
class TestMainThreadExecutor(TestCase):
diff --git a/tests/test_octodns_processor_acme.py b/tests/test_octodns_processor_acme.py
new file mode 100644
index 0000000..02177f7
--- /dev/null
+++ b/tests/test_octodns_processor_acme.py
@@ -0,0 +1,102 @@
+#
+#
+#
+
+from __future__ import absolute_import, division, print_function, \
+ unicode_literals
+
+from unittest import TestCase
+
+from octodns.processor.acme import AcmeMangingProcessor
+from octodns.record import Record
+from octodns.zone import Zone
+
+zone = Zone('unit.tests.', [])
+records = {
+ 'root-unowned': Record.new(zone, '_acme-challenge', {
+ 'ttl': 30,
+ 'type': 'TXT',
+ 'value': 'magic bit',
+ }),
+ 'sub-unowned': Record.new(zone, '_acme-challenge.sub-unowned', {
+ 'ttl': 30,
+ 'type': 'TXT',
+ 'value': 'magic bit',
+ }),
+ 'not-txt': Record.new(zone, '_acme-challenge.not-txt', {
+ 'ttl': 30,
+ 'type': 'AAAA',
+ 'value': '::1',
+ }),
+ 'not-acme': Record.new(zone, 'not-acme', {
+ 'ttl': 30,
+ 'type': 'TXT',
+ 'value': 'Hello World!',
+ }),
+ 'managed': Record.new(zone, '_acme-challenge.managed', {
+ 'ttl': 30,
+ 'type': 'TXT',
+ 'value': 'magic bit',
+ }),
+ 'owned': Record.new(zone, '_acme-challenge.owned', {
+ 'ttl': 30,
+ 'type': 'TXT',
+ 'values': ['*octoDNS*', 'magic bit'],
+ }),
+ 'going-away': Record.new(zone, '_acme-challenge.going-away', {
+ 'ttl': 30,
+ 'type': 'TXT',
+ 'values': ['*octoDNS*', 'magic bit'],
+ }),
+}
+
+
+class TestAcmeMangingProcessor(TestCase):
+
+ def test_process_zones(self):
+ acme = AcmeMangingProcessor('acme')
+
+ source = Zone(zone.name, [])
+ # Unrelated stuff that should be untouched
+ source.add_record(records['not-txt'])
+ source.add_record(records['not-acme'])
+ # A managed acme that will have ownership value added
+ source.add_record(records['managed'])
+
+ got = acme.process_source_zone(source)
+ self.assertEquals([
+ '_acme-challenge.managed',
+ '_acme-challenge.not-txt',
+ 'not-acme',
+ ], sorted([r.name for r in got.records]))
+ managed = None
+ for record in got.records:
+ if record.name.endswith('managed'):
+ managed = record
+ break
+ self.assertTrue(managed)
+ # Ownership was marked with an extra value
+ self.assertEquals(['*octoDNS*', 'magic bit'], record.values)
+
+ existing = Zone(zone.name, [])
+ # Unrelated stuff that should be untouched
+ existing.add_record(records['not-txt'])
+ existing.add_record(records['not-acme'])
+ # Stuff that will be ignored
+ existing.add_record(records['root-unowned'])
+ existing.add_record(records['sub-unowned'])
+ # A managed acme that needs ownership value added
+ existing.add_record(records['managed'])
+ # A managed acme that has ownershp managed
+ existing.add_record(records['owned'])
+ # A managed acme that needs to go away
+ existing.add_record(records['going-away'])
+
+ got = acme.process_target_zone(existing)
+ self.assertEquals([
+ '_acme-challenge.going-away',
+ '_acme-challenge.managed',
+ '_acme-challenge.not-txt',
+ '_acme-challenge.owned',
+ 'not-acme'
+ ], sorted([r.name for r in got.records]))
diff --git a/tests/test_octodns_processor_filter.py b/tests/test_octodns_processor_filter.py
new file mode 100644
index 0000000..176f7d1
--- /dev/null
+++ b/tests/test_octodns_processor_filter.py
@@ -0,0 +1,90 @@
+#
+#
+#
+
+from __future__ import absolute_import, division, print_function, \
+ unicode_literals
+
+from unittest import TestCase
+
+from octodns.processor.filter import TypeAllowlistFilter, TypeRejectlistFilter
+from octodns.record import Record
+from octodns.zone import Zone
+
+zone = Zone('unit.tests.', [])
+for record in [
+ Record.new(zone, 'a', {
+ 'ttl': 30,
+ 'type': 'A',
+ 'value': '1.2.3.4',
+ }),
+ Record.new(zone, 'aaaa', {
+ 'ttl': 30,
+ 'type': 'AAAA',
+ 'value': '::1',
+ }),
+ Record.new(zone, 'txt', {
+ 'ttl': 30,
+ 'type': 'TXT',
+ 'value': 'Hello World!',
+ }),
+ Record.new(zone, 'a2', {
+ 'ttl': 30,
+ 'type': 'A',
+ 'value': '2.3.4.5',
+ }),
+ Record.new(zone, 'txt2', {
+ 'ttl': 30,
+ 'type': 'TXT',
+ 'value': 'That will do',
+ }),
+]:
+ zone.add_record(record)
+
+
+class TestTypeAllowListFilter(TestCase):
+
+ def test_basics(self):
+ filter_a = TypeAllowlistFilter('only-a', set(('A')))
+
+ got = filter_a.process_source_zone(zone.copy())
+ self.assertEquals(['a', 'a2'], sorted([r.name for r in got.records]))
+
+ filter_aaaa = TypeAllowlistFilter('only-aaaa', ('AAAA',))
+ got = filter_aaaa.process_source_zone(zone.copy())
+ self.assertEquals(['aaaa'], sorted([r.name for r in got.records]))
+
+ filter_txt = TypeAllowlistFilter('only-txt', ['TXT'])
+ got = filter_txt.process_target_zone(zone.copy())
+ self.assertEquals(['txt', 'txt2'],
+ sorted([r.name for r in got.records]))
+
+ filter_a_aaaa = TypeAllowlistFilter('only-aaaa', set(('A', 'AAAA')))
+ got = filter_a_aaaa.process_target_zone(zone.copy())
+ self.assertEquals(['a', 'a2', 'aaaa'],
+ sorted([r.name for r in got.records]))
+
+
+class TestTypeRejectListFilter(TestCase):
+
+ def test_basics(self):
+ filter_a = TypeRejectlistFilter('not-a', set(('A')))
+
+ got = filter_a.process_source_zone(zone.copy())
+ self.assertEquals(['aaaa', 'txt', 'txt2'],
+ sorted([r.name for r in got.records]))
+
+ filter_aaaa = TypeRejectlistFilter('not-aaaa', ('AAAA',))
+ got = filter_aaaa.process_source_zone(zone.copy())
+ self.assertEquals(['a', 'a2', 'txt', 'txt2'],
+ sorted([r.name for r in got.records]))
+
+ filter_txt = TypeRejectlistFilter('not-txt', ['TXT'])
+ got = filter_txt.process_target_zone(zone.copy())
+ self.assertEquals(['a', 'a2', 'aaaa'],
+ sorted([r.name for r in got.records]))
+
+ filter_a_aaaa = TypeRejectlistFilter('not-a-aaaa', set(('A', 'AAAA')))
+ got = filter_a_aaaa.process_target_zone(zone.copy())
+ self.assertEquals(['txt', 'txt2'],
+ sorted([r.name for r in got.records]))
diff --git a/tests/test_octodns_processor_ownership.py b/tests/test_octodns_processor_ownership.py
new file mode 100644
index 0000000..e6b248b
--- /dev/null
+++ b/tests/test_octodns_processor_ownership.py
@@ -0,0 +1,146 @@
+#
+#
+#
+
+from __future__ import absolute_import, division, print_function, \
+ unicode_literals
+
+from unittest import TestCase
+
+from octodns.processor.ownership import OwnershipProcessor
+from octodns.record import Delete, Record
+from octodns.zone import Zone
+
+from helpers import PlannableProvider
+
+
+zone = Zone('unit.tests.', [])
+records = {}
+for record in [
+ Record.new(zone, '', {
+ 'ttl': 30,
+ 'type': 'A',
+ 'values': [
+ '1.2.3.4',
+ '5.6.7.8',
+ ],
+ }),
+ Record.new(zone, 'the-a', {
+ 'ttl': 30,
+ 'type': 'A',
+ 'value': '1.2.3.4',
+ }),
+ Record.new(zone, 'the-aaaa', {
+ 'ttl': 30,
+ 'type': 'AAAA',
+ 'value': '::1',
+ }),
+ Record.new(zone, 'the-txt', {
+ 'ttl': 30,
+ 'type': 'TXT',
+ 'value': 'Hello World!',
+ }),
+ Record.new(zone, '*', {
+ 'ttl': 30,
+ 'type': 'A',
+ 'value': '4.3.2.1',
+ }),
+]:
+ records[record.name] = record
+ zone.add_record(record)
+
+
+class TestOwnershipProcessor(TestCase):
+
+ def test_process_source_zone(self):
+ ownership = OwnershipProcessor('ownership')
+
+ got = ownership.process_source_zone(zone.copy())
+ self.assertEquals([
+ '',
+ '*',
+ '_owner.a',
+ '_owner.a._wildcard',
+ '_owner.a.the-a',
+ '_owner.aaaa.the-aaaa',
+ '_owner.txt.the-txt',
+ 'the-a',
+ 'the-aaaa',
+ 'the-txt',
+ ], sorted([r.name for r in got.records]))
+
+ found = False
+ for record in got.records:
+ if record.name.startswith(ownership.txt_name):
+ self.assertEquals([ownership.txt_value], record.values)
+ # test _is_ownership while we're in here
+ self.assertTrue(ownership._is_ownership(record))
+ found = True
+ else:
+ self.assertFalse(ownership._is_ownership(record))
+ self.assertTrue(found)
+
+ def test_process_plan(self):
+ ownership = OwnershipProcessor('ownership')
+ provider = PlannableProvider('helper')
+
+ # No plan, is a quick noop
+ self.assertFalse(ownership.process_plan(None))
+
+ # Nothing exists create both records and ownership
+ ownership_added = ownership.process_source_zone(zone.copy())
+ plan = provider.plan(ownership_added)
+ self.assertTrue(plan)
+ # Double the number of records
+ self.assertEquals(len(records) * 2, len(plan.changes))
+ # Now process the plan, shouldn't make any changes, we're creating
+ # everything
+ got = ownership.process_plan(plan)
+ self.assertTrue(got)
+ self.assertEquals(len(records) * 2, len(got.changes))
+
+ # Something extra exists and doesn't have ownership TXT, leave it
+ # alone, we don't own it.
+ extra_a = Record.new(zone, 'extra-a', {
+ 'ttl': 30,
+ 'type': 'A',
+ 'value': '4.4.4.4',
+ })
+ plan.existing.add_record(extra_a)
+ # If we'd done a "real" plan we'd have a delete for the extra thing.
+ plan.changes.append(Delete(extra_a))
+ # Process the plan, shouldn't make any changes since the extra bit is
+ # something we don't own
+ got = ownership.process_plan(plan)
+ self.assertTrue(got)
+ self.assertEquals(len(records) * 2, len(got.changes))
+
+ # Something extra exists and does have an ownership record so we will
+ # delete it...
+ copy = Zone('unit.tests.', [])
+ for record in records.values():
+ if record.name != 'the-a':
+ copy.add_record(record)
+ # New ownership, without the `the-a`
+ ownership_added = ownership.process_source_zone(copy)
+ self.assertEquals(len(records) * 2 - 2, len(ownership_added.records))
+ plan = provider.plan(ownership_added)
+ # Fake the extra existing by adding the record, its ownership, and the
+ # two delete changes.
+ the_a = records['the-a']
+ plan.existing.add_record(the_a)
+ name = '{}.a.the-a'.format(ownership.txt_name)
+ the_a_ownership = Record.new(zone, name, {
+ 'ttl': 30,
+ 'type': 'TXT',
+ 'value': ownership.txt_value,
+ })
+ plan.existing.add_record(the_a_ownership)
+ plan.changes.append(Delete(the_a))
+ plan.changes.append(Delete(the_a_ownership))
+ # Finally process the plan, should be a noop and we should get the same
+ # plan out, meaning the planned deletes were allowed to happen.
+ got = ownership.process_plan(plan)
+ self.assertTrue(got)
+ self.assertEquals(plan, got)
+ self.assertEquals(len(plan.changes), len(got.changes))
diff --git a/tests/test_octodns_provider_azuredns.py b/tests/test_octodns_provider_azuredns.py
index 1769cef..b3b52e5 100644
--- a/tests/test_octodns_provider_azuredns.py
+++ b/tests/test_octodns_provider_azuredns.py
@@ -5,19 +5,23 @@
from __future__ import absolute_import, division, print_function, \
unicode_literals
-from octodns.record import Create, Delete, Record
+from octodns.record import Create, Update, Delete, Record
from octodns.provider.azuredns import _AzureRecord, AzureProvider, \
- _check_endswith_dot, _parse_azure_type
+ _check_endswith_dot, _parse_azure_type, _root_traffic_manager_name, \
+ _get_monitor, _profile_is_match, AzureException
from octodns.zone import Zone
from octodns.provider.base import Plan
from azure.mgmt.dns.models import ARecord, AaaaRecord, CaaRecord, \
CnameRecord, MxRecord, SrvRecord, NsRecord, PtrRecord, TxtRecord, \
- RecordSet, SoaRecord, Zone as AzureZone
+ RecordSet, SoaRecord, SubResource, Zone as AzureZone
+from azure.mgmt.trafficmanager.models import Profile, DnsConfig, \
+ MonitorConfig, Endpoint, MonitorConfigCustomHeadersItem
from msrestazure.azure_exceptions import CloudError
+from six import text_type
from unittest import TestCase
-from mock import Mock, patch
+from mock import Mock, patch, call
zone = Zone(name='unit.tests.', sub_zones=[])
@@ -134,14 +138,31 @@ octo_records.append(Record.new(zone, 'txt2', {
'type': 'TXT',
'values': ['txt multiple test', 'txt multiple test 2']}))
+long_txt = "v=spf1 ip4:10.10.0.0/24 ip4:10.10.1.0/24 ip4:10.10.2.0/24"
+long_txt += " ip4:10.10.3.0/24 ip4:10.10.4.0/24 ip4:10.10.5.0/24 "
+long_txt += " 10.6.0/24 ip4:10.10.7.0/24 ip4:10.10.8.0/24 "
+long_txt += " ip4:10.10.10.0/24 ip4:10.10.11.0/24 ip4:10.10.12.0/24"
+long_txt += " ip4:10.10.13.0/24 ip4:10.10.14.0/24 ip4:10.10.15.0/24"
+long_txt += " ip4:10.10.16.0/24 ip4:10.10.17.0/24 ip4:10.10.18.0/24"
+long_txt += " ip4:10.10.19.0/24 ip4:10.10.20.0/24 ~all"
+octo_records.append(Record.new(zone, 'txt3', {
+ 'ttl': 10,
+ 'type': 'TXT',
+ 'values': ['txt multiple test', long_txt]}))
+
+octo_records.append(Record.new(zone, 'ptr2', {
+ 'ttl': 11,
+ 'type': 'PTR',
+ 'values': ['ptr21.unit.tests.', 'ptr22.unit.tests.']}))
+
azure_records = []
_base0 = _AzureRecord('TestAzure', octo_records[0])
_base0.zone_name = 'unit.tests'
_base0.relative_record_set_name = '@'
_base0.record_type = 'A'
_base0.params['ttl'] = 0
-_base0.params['arecords'] = [ARecord(ipv4_address='1.2.3.4'),
- ARecord(ipv4_address='10.10.10.10')]
+_base0.params['a_records'] = [ARecord(ipv4_address='1.2.3.4'),
+ ARecord(ipv4_address='10.10.10.10')]
azure_records.append(_base0)
_base1 = _AzureRecord('TestAzure', octo_records[1])
@@ -149,8 +170,8 @@ _base1.zone_name = 'unit.tests'
_base1.relative_record_set_name = 'a'
_base1.record_type = 'A'
_base1.params['ttl'] = 1
-_base1.params['arecords'] = [ARecord(ipv4_address='1.2.3.4'),
- ARecord(ipv4_address='1.1.1.1')]
+_base1.params['a_records'] = [ARecord(ipv4_address='1.2.3.4'),
+ ARecord(ipv4_address='1.1.1.1')]
azure_records.append(_base1)
_base2 = _AzureRecord('TestAzure', octo_records[2])
@@ -158,7 +179,7 @@ _base2.zone_name = 'unit.tests'
_base2.relative_record_set_name = 'aa'
_base2.record_type = 'A'
_base2.params['ttl'] = 9001
-_base2.params['arecords'] = ARecord(ipv4_address='1.2.4.3')
+_base2.params['a_records'] = ARecord(ipv4_address='1.2.4.3')
azure_records.append(_base2)
_base3 = _AzureRecord('TestAzure', octo_records[3])
@@ -166,7 +187,7 @@ _base3.zone_name = 'unit.tests'
_base3.relative_record_set_name = 'aaa'
_base3.record_type = 'A'
_base3.params['ttl'] = 2
-_base3.params['arecords'] = ARecord(ipv4_address='1.1.1.3')
+_base3.params['a_records'] = ARecord(ipv4_address='1.1.1.3')
azure_records.append(_base3)
_base4 = _AzureRecord('TestAzure', octo_records[4])
@@ -306,6 +327,31 @@ _base17.params['txt_records'] = [TxtRecord(value=['txt multiple test']),
TxtRecord(value=['txt multiple test 2'])]
azure_records.append(_base17)
+long_txt_az1 = "v=spf1 ip4:10.10.0.0/24 ip4:10.10.1.0/24 ip4:10.10.2.0/24"
+long_txt_az1 += " ip4:10.10.3.0/24 ip4:10.10.4.0/24 ip4:10.10.5.0/24 "
+long_txt_az1 += " 10.6.0/24 ip4:10.10.7.0/24 ip4:10.10.8.0/24 "
+long_txt_az1 += " ip4:10.10.10.0/24 ip4:10.10.11.0/24 ip4:10.10.12.0/24"
+long_txt_az1 += " ip4:10.10.13.0/24 ip4:10.10.14.0/24 ip4:10.10."
+long_txt_az2 = "15.0/24 ip4:10.10.16.0/24 ip4:10.10.17.0/24 ip4:10.10.18.0/24"
+long_txt_az2 += " ip4:10.10.19.0/24 ip4:10.10.20.0/24 ~all"
+_base18 = _AzureRecord('TestAzure', octo_records[18])
+_base18.zone_name = 'unit.tests'
+_base18.relative_record_set_name = 'txt3'
+_base18.record_type = 'TXT'
+_base18.params['ttl'] = 10
+_base18.params['txt_records'] = [TxtRecord(value=['txt multiple test']),
+ TxtRecord(value=[long_txt_az1, long_txt_az2])]
+azure_records.append(_base18)
+
+_base19 = _AzureRecord('TestAzure', octo_records[19])
+_base19.zone_name = 'unit.tests'
+_base19.relative_record_set_name = 'ptr2'
+_base19.record_type = 'PTR'
+_base19.params['ttl'] = 11
+_base19.params['ptr_records'] = [PtrRecord(ptrdname='ptr21.unit.tests.'),
+ PtrRecord(ptrdname='ptr22.unit.tests.')]
+azure_records.append(_base19)
+
class Test_AzureRecord(TestCase):
def test_azure_record(self):
@@ -315,6 +361,43 @@ class Test_AzureRecord(TestCase):
assert(azure_records[i]._equals(octo))
+class Test_DynamicAzureRecord(TestCase):
+ def test_azure_record(self):
+ tm_profile = Profile()
+ data = {
+ 'ttl': 60,
+ 'type': 'CNAME',
+ 'value': 'default.unit.tests.',
+ 'dynamic': {
+ 'pools': {
+ 'one': {
+ 'values': [
+ {'value': 'one.unit.tests.', 'weight': 1}
+ ],
+ 'fallback': 'two',
+ },
+ 'two': {
+ 'values': [
+ {'value': 'two.unit.tests.', 'weight': 1}
+ ],
+ },
+ },
+ 'rules': [
+ {'geos': ['AF'], 'pool': 'one'},
+ {'pool': 'two'},
+ ],
+ }
+ }
+ octo_record = Record.new(zone, 'foo', data)
+ azure_record = _AzureRecord('TestAzure', octo_record,
+ traffic_manager=tm_profile)
+ self.assertEqual(azure_record.zone_name, zone.name[:-1])
+ self.assertEqual(azure_record.relative_record_set_name, 'foo')
+ self.assertEqual(azure_record.record_type, 'CNAME')
+ self.assertEqual(azure_record.params['ttl'], 60)
+ self.assertEqual(azure_record.params['target_resource'], tm_profile)
+
+
class Test_ParseAzureType(TestCase):
def test_parse_azure_type(self):
for expected, test in [['A', 'Microsoft.Network/dnszones/A'],
@@ -333,44 +416,366 @@ class Test_CheckEndswithDot(TestCase):
self.assertEquals(expected, _check_endswith_dot(test))
+class Test_RootTrafficManagerName(TestCase):
+ def test_root_traffic_manager_name(self):
+ test = Record.new(zone, 'foo', data={
+ 'ttl': 60, 'type': 'CNAME', 'value': 'default.unit.tests.',
+ })
+ self.assertEqual(_root_traffic_manager_name(test), 'foo--unit--tests')
+
+
+class Test_GetMonitor(TestCase):
+ def test_get_monitor(self):
+ record = Record.new(zone, 'foo', data={
+ 'type': 'CNAME', 'ttl': 60, 'value': 'default.unit.tests.',
+ 'octodns': {
+ 'healthcheck': {
+ 'path': '/_ping',
+ 'port': 4443,
+ 'protocol': 'HTTPS',
+ }
+ },
+ })
+
+ monitor = _get_monitor(record)
+ self.assertEqual(monitor.protocol, 'HTTPS')
+ self.assertEqual(monitor.port, 4443)
+ self.assertEqual(monitor.path, '/_ping')
+ headers = monitor.custom_headers
+ self.assertIsInstance(headers, list)
+ self.assertEquals(len(headers), 1)
+ headers = headers[0]
+ self.assertEqual(headers.name, 'Host')
+ self.assertEqual(headers.value, record.healthcheck_host())
+
+ # test TCP monitor
+ record._octodns['healthcheck']['protocol'] = 'TCP'
+ monitor = _get_monitor(record)
+ self.assertEqual(monitor.protocol, 'TCP')
+ self.assertIsNone(monitor.custom_headers)
+
+
+class Test_ProfileIsMatch(TestCase):
+ def test_profile_is_match(self):
+ is_match = _profile_is_match
+
+ self.assertFalse(is_match(None, Profile()))
+
+ # Profile object builder with default property values that can be
+ # overridden for testing below
+ def profile(
+ name = 'foo-unit-tests',
+ ttl = 60,
+ method = 'Geographic',
+ dns_name = None,
+ monitor_proto = 'HTTPS',
+ monitor_port = 4443,
+ monitor_path = '/_ping',
+ endpoints = 1,
+ endpoint_name = 'name',
+ endpoint_type = 'profile/nestedEndpoints',
+ target = 'target.unit.tests',
+ target_id = 'resource/id',
+ geos = ['GEO-AF'],
+ weight = 1,
+ priority = 1,
+ ):
+ dns = DnsConfig(relative_name=(dns_name or name), ttl=ttl)
+ return Profile(
+ name=name, traffic_routing_method=method, dns_config=dns,
+ monitor_config=MonitorConfig(
+ protocol=monitor_proto,
+ port=monitor_port,
+ path=monitor_path,
+ ),
+ endpoints=[Endpoint(
+ name=endpoint_name,
+ type=endpoint_type,
+ target=target,
+ target_resource_id=target_id,
+ geo_mapping=geos,
+ weight=weight,
+ priority=priority,
+ )] + [Endpoint()] * (endpoints - 1),
+ )
+
+ self.assertTrue(is_match(profile(), profile()))
+
+ self.assertFalse(is_match(profile(), profile(name='two')))
+ self.assertFalse(is_match(profile(), profile(endpoints=2)))
+ self.assertFalse(is_match(profile(), profile(dns_name='two')))
+ self.assertFalse(is_match(profile(), profile(monitor_proto='HTTP')))
+ self.assertFalse(is_match(profile(), profile(endpoint_name='a')))
+ self.assertFalse(is_match(profile(), profile(endpoint_type='b')))
+ self.assertFalse(
+ is_match(profile(endpoint_type='b'), profile(endpoint_type='b'))
+ )
+ self.assertFalse(is_match(profile(), profile(target_id='rsrc/id2')))
+ self.assertFalse(is_match(profile(), profile(geos=['IN'])))
+
+ def wprofile(**kwargs):
+ kwargs['method'] = 'Weighted'
+ kwargs['endpoint_type'] = 'profile/externalEndpoints'
+ return profile(**kwargs)
+
+ self.assertFalse(is_match(wprofile(), wprofile(target='bar.unit')))
+ self.assertFalse(is_match(wprofile(), wprofile(weight=3)))
+
+
class TestAzureDnsProvider(TestCase):
def _provider(self):
return self._get_provider('mock_spc', 'mock_dns_client')
+ @patch('octodns.provider.azuredns.TrafficManagerManagementClient')
@patch('octodns.provider.azuredns.DnsManagementClient')
+ @patch('octodns.provider.azuredns.ClientSecretCredential')
@patch('octodns.provider.azuredns.ServicePrincipalCredentials')
- def _get_provider(self, mock_spc, mock_dns_client):
+ def _get_provider(self, mock_spc, mock_css, mock_dns_client,
+ mock_tm_client):
'''Returns a mock AzureProvider object to use in testing.
:param mock_spc: placeholder
:type mock_spc: str
:param mock_dns_client: placeholder
:type mock_dns_client: str
+ :param mock_tm_client: placeholder
+ :type mock_tm_client: str
:type return: AzureProvider
'''
- return AzureProvider('mock_id', 'mock_client', 'mock_key',
- 'mock_directory', 'mock_sub', 'mock_rg')
+ provider = AzureProvider('mock_id', 'mock_client', 'mock_key',
+ 'mock_directory', 'mock_sub', 'mock_rg'
+ )
+
+ # Fetch the client to force it to load the creds
+ provider._dns_client
+
+ # set critical functions to return properly
+ tm_list = provider._tm_client.profiles.list_by_resource_group
+ tm_list.return_value = []
+ tm_sync = provider._tm_client.profiles.create_or_update
+
+ def side_effect(rg, name, profile):
+ return Profile(
+ id=profile.id,
+ name=profile.name,
+ traffic_routing_method=profile.traffic_routing_method,
+ dns_config=profile.dns_config,
+ monitor_config=profile.monitor_config,
+ endpoints=profile.endpoints,
+ )
+
+ tm_sync.side_effect = side_effect
+
+ return provider
+
+ def _get_dynamic_record(self, zone):
+ return Record.new(zone, 'foo', data={
+ 'type': 'CNAME',
+ 'ttl': 60,
+ 'value': 'default.unit.tests.',
+ 'dynamic': {
+ 'pools': {
+ 'one': {
+ 'values': [
+ {'value': 'one.unit.tests.', 'weight': 1},
+ ],
+ 'fallback': 'two',
+ },
+ 'two': {
+ 'values': [
+ {'value': 'two1.unit.tests.', 'weight': 3},
+ {'value': 'two2.unit.tests.', 'weight': 4},
+ ],
+ 'fallback': 'three',
+ },
+ 'three': {
+ 'values': [
+ {'value': 'three.unit.tests.', 'weight': 1},
+ ],
+ },
+ },
+ 'rules': [
+ {'geos': ['AF', 'EU-DE', 'NA-US-CA', 'OC'], 'pool': 'one'},
+ {'pool': 'two'},
+ ],
+ },
+ 'octodns': {
+ 'healthcheck': {
+ 'path': '/_ping',
+ 'port': 4443,
+ 'protocol': 'HTTPS',
+ }
+ },
+ })
+
+ def _get_tm_profiles(self, provider):
+ sub = provider._dns_client_subscription_id
+ rg = provider._resource_group
+ base_id = '/subscriptions/' + sub + \
+ '/resourceGroups/' + rg + \
+ '/providers/Microsoft.Network/trafficManagerProfiles/'
+ prefix = 'foo--unit--tests'
+ name_format = prefix + '-{}'
+ id_format = base_id + name_format
+
+ header = MonitorConfigCustomHeadersItem(name='Host',
+ value='foo.unit.tests')
+ monitor = MonitorConfig(protocol='HTTPS', port=4443, path='/_ping',
+ custom_headers=[header])
+ external = 'Microsoft.Network/trafficManagerProfiles/externalEndpoints'
+ nested = 'Microsoft.Network/trafficManagerProfiles/nestedEndpoints'
+
+ profiles = [
+ Profile(
+ id=id_format.format('pool-two'),
+ name=name_format.format('pool-two'),
+ traffic_routing_method='Weighted',
+ dns_config=DnsConfig(ttl=60),
+ monitor_config=monitor,
+ endpoints=[
+ Endpoint(
+ name='two--two1.unit.tests',
+ type=external,
+ target='two1.unit.tests',
+ weight=3,
+ ),
+ Endpoint(
+ name='two--two2.unit.tests',
+ type=external,
+ target='two2.unit.tests',
+ weight=4,
+ ),
+ ],
+ ),
+ Profile(
+ id=id_format.format('rule-one'),
+ name=name_format.format('rule-one'),
+ traffic_routing_method='Priority',
+ dns_config=DnsConfig(ttl=60),
+ monitor_config=monitor,
+ endpoints=[
+ Endpoint(
+ name='one',
+ type=external,
+ target='one.unit.tests',
+ priority=1,
+ ),
+ Endpoint(
+ name='two',
+ type=nested,
+ target_resource_id=id_format.format('pool-two'),
+ priority=2,
+ ),
+ Endpoint(
+ name='three',
+ type=external,
+ target='three.unit.tests',
+ priority=3,
+ ),
+ Endpoint(
+ name='--default--',
+ type=external,
+ target='default.unit.tests',
+ priority=4,
+ ),
+ ],
+ ),
+ Profile(
+ id=id_format.format('rule-two'),
+ name=name_format.format('rule-two'),
+ traffic_routing_method='Priority',
+ dns_config=DnsConfig(ttl=60),
+ monitor_config=monitor,
+ endpoints=[
+ Endpoint(
+ name='two',
+ type=nested,
+ target_resource_id=id_format.format('pool-two'),
+ priority=1,
+ ),
+ Endpoint(
+ name='three',
+ type=external,
+ target='three.unit.tests',
+ priority=2,
+ ),
+ Endpoint(
+ name='--default--',
+ type=external,
+ target='default.unit.tests',
+ priority=3,
+ ),
+ ],
+ ),
+ Profile(
+ id=base_id + prefix,
+ name=prefix,
+ traffic_routing_method='Geographic',
+ dns_config=DnsConfig(ttl=60),
+ monitor_config=monitor,
+ endpoints=[
+ Endpoint(
+ geo_mapping=['GEO-AF', 'DE', 'US-CA', 'GEO-AP'],
+ name='one',
+ type=nested,
+ target_resource_id=id_format.format('rule-one'),
+ ),
+ Endpoint(
+ geo_mapping=['WORLD'],
+ name='two',
+ type=nested,
+ target_resource_id=id_format.format('rule-two'),
+ ),
+ ],
+ ),
+ ]
+
+ for profile in profiles:
+ profile.dns_config.relative_name = profile.name
+
+ return profiles
+
+ def _get_dynamic_package(self):
+ '''Convenience function to setup a sample dynamic record.
+ '''
+ provider = self._get_provider()
+
+ # setup traffic manager profiles
+ tm_list = provider._tm_client.profiles.list_by_resource_group
+ tm_list.return_value = self._get_tm_profiles(provider)
+
+ # setup zone with dynamic record
+ zone = Zone(name='unit.tests.', sub_zones=[])
+ record = self._get_dynamic_record(zone)
+ zone.add_record(record)
+
+ # return everything
+ return provider, zone, record
def test_populate_records(self):
provider = self._get_provider()
rs = []
- recordSet = RecordSet(arecords=[ARecord(ipv4_address='1.1.1.1')])
+ recordSet = RecordSet(a_records=[ARecord(ipv4_address='1.1.1.1')])
recordSet.name, recordSet.ttl, recordSet.type = 'a1', 0, 'A'
+ recordSet.target_resource = SubResource()
rs.append(recordSet)
- recordSet = RecordSet(arecords=[ARecord(ipv4_address='1.1.1.1'),
- ARecord(ipv4_address='2.2.2.2')])
+ recordSet = RecordSet(a_records=[ARecord(ipv4_address='1.1.1.1'),
+ ARecord(ipv4_address='2.2.2.2')])
recordSet.name, recordSet.ttl, recordSet.type = 'a2', 1, 'A'
+ recordSet.target_resource = SubResource()
rs.append(recordSet)
aaaa1 = AaaaRecord(ipv6_address='1:1ec:1::1')
recordSet = RecordSet(aaaa_records=[aaaa1])
recordSet.name, recordSet.ttl, recordSet.type = 'aaaa1', 2, 'AAAA'
+ recordSet.target_resource = SubResource()
rs.append(recordSet)
aaaa2 = AaaaRecord(ipv6_address='1:1ec:1::2')
recordSet = RecordSet(aaaa_records=[aaaa1,
aaaa2])
recordSet.name, recordSet.ttl, recordSet.type = 'aaaa2', 3, 'AAAA'
+ recordSet.target_resource = SubResource()
rs.append(recordSet)
recordSet = RecordSet(caa_records=[CaaRecord(flags=0,
tag='issue',
@@ -388,9 +793,7 @@ class TestAzureDnsProvider(TestCase):
cname1 = CnameRecord(cname='cname.unit.test.')
recordSet = RecordSet(cname_record=cname1)
recordSet.name, recordSet.ttl, recordSet.type = 'cname1', 5, 'CNAME'
- rs.append(recordSet)
- recordSet = RecordSet(cname_record=None)
- recordSet.name, recordSet.ttl, recordSet.type = 'cname2', 6, 'CNAME'
+ recordSet.target_resource = SubResource()
rs.append(recordSet)
recordSet = RecordSet(mx_records=[MxRecord(preference=10,
exchange='mx1.unit.test.')])
@@ -413,9 +816,6 @@ class TestAzureDnsProvider(TestCase):
recordSet = RecordSet(ptr_records=[ptr1])
recordSet.name, recordSet.ttl, recordSet.type = 'ptr1', 11, 'PTR'
rs.append(recordSet)
- recordSet = RecordSet(ptr_records=[PtrRecord(ptrdname=None)])
- recordSet.name, recordSet.ttl, recordSet.type = 'ptr2', 12, 'PTR'
- rs.append(recordSet)
recordSet = RecordSet(srv_records=[SrvRecord(priority=1,
weight=2,
port=3,
@@ -434,59 +834,1493 @@ class TestAzureDnsProvider(TestCase):
rs.append(recordSet)
recordSet = RecordSet(txt_records=[TxtRecord(value='sample text1')])
recordSet.name, recordSet.ttl, recordSet.type = 'txt1', 15, 'TXT'
+ recordSet.target_resource = SubResource()
rs.append(recordSet)
recordSet = RecordSet(txt_records=[TxtRecord(value='sample text1'),
TxtRecord(value='sample text2')])
recordSet.name, recordSet.ttl, recordSet.type = 'txt2', 16, 'TXT'
+ recordSet.target_resource = SubResource()
rs.append(recordSet)
recordSet = RecordSet(soa_record=[SoaRecord()])
recordSet.name, recordSet.ttl, recordSet.type = '', 17, 'SOA'
rs.append(recordSet)
+ long_txt = "v=spf1 ip4:10.10.0.0/24 ip4:10.10.1.0/24 ip4:10.10.2.0/24"
+ long_txt += " ip4:10.10.3.0/24 ip4:10.10.4.0/24 ip4:10.10.5.0/24 "
+ long_txt += " 10.6.0/24 ip4:10.10.7.0/24 ip4:10.10.8.0/24 "
+ long_txt += " ip4:10.10.10.0/24 ip4:10.10.11.0/24 ip4:10.10.12.0/24"
+ long_txt += " ip4:10.10.13.0/24 ip4:10.10.14.0/24 ip4:10.10.15.0/24"
+ long_txt += " ip4:10.10.16.0/24 ip4:10.10.17.0/24 ip4:10.10.18.0/24"
+ long_txt += " ip4:10.10.19.0/24 ip4:10.10.20.0/24 ~all"
+ recordSet = RecordSet(txt_records=[TxtRecord(value='sample value1'),
+ TxtRecord(value=long_txt)])
+ recordSet.name, recordSet.ttl, recordSet.type = 'txt3', 18, 'TXT'
+ recordSet.target_resource = SubResource()
+ rs.append(recordSet)
record_list = provider._dns_client.record_sets.list_by_dns_zone
record_list.return_value = rs
+ zone_list = provider._dns_client.zones.list_by_resource_group
+ zone_list.return_value = [zone]
+
exists = provider.populate(zone)
- self.assertTrue(exists)
- self.assertEquals(len(zone.records), 18)
+ self.assertEquals(len(zone.records), 17)
+ self.assertTrue(exists)
def test_populate_zone(self):
provider = self._get_provider()
zone_list = provider._dns_client.zones.list_by_resource_group
- zone_list.return_value = [AzureZone(location='global'),
- AzureZone(location='global')]
+ zone_1 = AzureZone(location='global')
+ # This is far from ideal but the
+ # zone constructor doesn't let me set it on creation
+ zone_1.name = "zone-1"
+ zone_2 = AzureZone(location='global')
+ # This is far from ideal but the
+ # zone constructor doesn't let me set it on creation
+ zone_2.name = "zone-2"
+ zone_list.return_value = [zone_1,
+ zone_2,
+ zone_1]
provider._populate_zones()
- self.assertEquals(len(provider._azure_zones), 1)
+ # This should be returning two zones since two zones are the same
+ self.assertEquals(len(provider._azure_zones), 2)
def test_bad_zone_response(self):
provider = self._get_provider()
_get = provider._dns_client.zones.get
_get.side_effect = CloudError(Mock(status=404), 'Azure Error')
- trip = False
- try:
- provider._check_zone('unit.test', create=False)
- except CloudError:
- trip = True
- self.assertEquals(trip, True)
+ self.assertEquals(
+ provider._check_zone('unit.test', create=False),
+ None
+ )
+
+ def test_extra_changes(self):
+ provider, existing, record = self._get_dynamic_package()
+
+ # test simple records produce no extra changes
+ desired = Zone(name=existing.name, sub_zones=[])
+ simple = Record.new(desired, 'simple', data={
+ 'type': record._type,
+ 'ttl': record.ttl,
+ 'value': record.value,
+ })
+ desired.add_record(simple)
+ extra = provider._extra_changes(desired, desired, [Create(simple)])
+ self.assertEqual(len(extra), 0)
+
+ # test an unchanged dynamic record produces no extra changes
+ desired.add_record(record)
+ extra = provider._extra_changes(existing, desired, [])
+ self.assertEqual(len(extra), 0)
+
+ # test unused TM produces the extra change for clean up
+ sample_profile = self._get_tm_profiles(provider)[0]
+ tm_id = provider._profile_name_to_id
+ root_profile_name = _root_traffic_manager_name(record)
+ extra_profile = Profile(
+ id=tm_id('{}-pool-random'.format(root_profile_name)),
+ name='{}-pool-random'.format(root_profile_name),
+ traffic_routing_method='Weighted',
+ dns_config=sample_profile.dns_config,
+ monitor_config=sample_profile.monitor_config,
+ endpoints=sample_profile.endpoints,
+ )
+ tm_list = provider._tm_client.profiles.list_by_resource_group
+ tm_list.return_value.append(extra_profile)
+ provider._populate_traffic_managers()
+ extra = provider._extra_changes(existing, desired, [])
+ self.assertEqual(len(extra), 1)
+ extra = extra[0]
+ self.assertIsInstance(extra, Update)
+ self.assertEqual(extra.new, record)
+ desired._remove_record(record)
+ tm_list.return_value.pop()
+
+ # test new dynamic record does not produce an extra change for it
+ new_dynamic = Record.new(desired, record.name + '2', data={
+ 'type': record._type,
+ 'ttl': record.ttl,
+ 'value': record.value,
+ 'dynamic': record.dynamic._data(),
+ 'octodns': record._octodns,
+ })
+ # test change in healthcheck by using a different port number
+ update_dynamic = Record.new(desired, record.name, data={
+ 'type': record._type,
+ 'ttl': record.ttl,
+ 'value': record.value,
+ 'dynamic': record.dynamic._data(),
+ 'octodns': {
+ 'healthcheck': {
+ 'path': '/_ping',
+ 'port': 443,
+ 'protocol': 'HTTPS',
+ },
+ },
+ })
+ desired.add_record(new_dynamic)
+ desired.add_record(update_dynamic)
+ changes = [Create(new_dynamic)]
+ extra = provider._extra_changes(existing, desired, changes)
+ # implicitly asserts that new_dynamic was not added to extra changes
+ # as it was already in the `changes` list
+ self.assertEqual(len(extra), 1)
+ extra = extra[0]
+ self.assertIsInstance(extra, Update)
+ self.assertEqual(extra.new, update_dynamic)
+
+ # test dynamic record of unsupported type throws exception
+ unsupported_dynamic = Record.new(desired, record.name + '3', data={
+ 'type': 'DNAME',
+ 'ttl': record.ttl,
+ 'value': 'default.unit.tests.',
+ 'dynamic': {
+ 'pools': {
+ 'one': {'values': [{'value': 'one.unit.tests.'}]},
+ },
+ 'rules': [
+ {'pool': 'one'},
+ ],
+ },
+ })
+ desired.add_record(unsupported_dynamic)
+ changes = [Create(unsupported_dynamic)]
+ with self.assertRaises(AzureException) as ctx:
+ provider._extra_changes(existing, desired, changes)
+ self.assertTrue(text_type(ctx).endswith(
+ 'must be of type CNAME'
+ ))
+ desired._remove_record(unsupported_dynamic)
+
+ # test colliding ATM names throws exception
+ record1 = Record.new(desired, 'sub.www', data={
+ 'type': record._type,
+ 'ttl': record.ttl,
+ 'value': record.value,
+ 'dynamic': record.dynamic._data(),
+ })
+ record2 = Record.new(desired, 'sub--www', data={
+ 'type': record._type,
+ 'ttl': record.ttl,
+ 'value': record.value,
+ 'dynamic': record.dynamic._data(),
+ })
+ desired.add_record(record1)
+ desired.add_record(record2)
+ changes = [Create(record1), Create(record2)]
+ with self.assertRaises(AzureException) as ctx:
+ provider._extra_changes(existing, desired, changes)
+ self.assertTrue(text_type(ctx).startswith(
+ 'Collision in Traffic Manager'
+ ))
+
+ @patch(
+ 'octodns.provider.azuredns.AzureProvider._generate_traffic_managers')
+ def test_extra_changes_non_last_fallback_contains_default(self, mock_gtm):
+ provider = self._get_provider()
+
+ desired = Zone(zone.name, sub_zones=[])
+ record = Record.new(desired, 'foo', {
+ 'type': 'CNAME',
+ 'ttl': 60,
+ 'value': 'default.unit.tests.',
+ 'dynamic': {
+ 'pools': {
+ 'one': {
+ 'values': [{'value': 'one.unit.tests.'}],
+ 'fallback': 'def',
+ },
+ 'def': {
+ 'values': [{'value': 'default.unit.tests.'}],
+ 'fallback': 'two',
+ },
+ 'two': {
+ 'values': [{'value': 'two.unit.tests.'}],
+ },
+ },
+ 'rules': [
+ {'pool': 'one'},
+ ]
+ }
+ })
+ desired.add_record(record)
+ changes = [Create(record)]
+
+ # assert that no exception is raised
+ provider._extra_changes(zone, desired, changes)
+
+ # simulate duplicate endpoint and assert exception
+ endpoint = Endpoint(target='dup.unit.tests.')
+ mock_gtm.return_value = [Profile(
+ name='test-profile',
+ endpoints=[endpoint, endpoint],
+ )]
+ with self.assertRaises(AzureException) as ctx:
+ provider._extra_changes(zone, desired, changes)
+ self.assertTrue('duplicate endpoint' in text_type(ctx))
+
+ def test_extra_changes_A_multi_defaults(self):
+ provider = self._get_provider()
+
+ record = Record.new(zone, 'foo', data={
+ 'type': 'A',
+ 'ttl': 60,
+ 'values': ['1.1.1.1', '8.8.8.8'],
+ 'dynamic': {
+ 'pools': {
+ 'one': {
+ 'values': [{'value': '1.1.1.1'}],
+ },
+ },
+ 'rules': [
+ {'pool': 'one'},
+ ],
+ }
+ })
+
+ # test that extra changes doesn't show any changes
+ desired = Zone(zone.name, sub_zones=[])
+ desired.add_record(record)
+ with self.assertRaises(AzureException) as ctx:
+ provider._extra_changes(zone, desired, [])
+ self.assertEqual('single value' in text_type(ctx))
+
+ def test_generate_tm_profile(self):
+ provider, zone, record = self._get_dynamic_package()
+ profile_gen = provider._generate_tm_profile
+
+ label = 'foobar'
+ routing = 'Priority'
+ endpoints = [
+ Endpoint(target='one.unit.tests'),
+ Endpoint(target_resource_id='/s/1/rg/foo/tm/foobar2'),
+ Endpoint(name='invalid'),
+ ]
+
+ # invalid endpoint raises exception
+ with self.assertRaises(AzureException):
+ profile_gen(routing, endpoints, record, label)
+
+ # regular test
+ endpoints.pop()
+ profile = profile_gen(routing, endpoints, record, label)
+
+ # implicitly tests _profile_name_to_id
+ sub = provider._dns_client_subscription_id
+ rg = provider._resource_group
+ expected_name = 'foo--unit--tests-rule-foobar'
+ expected_id = '/subscriptions/' + sub + \
+ '/resourceGroups/' + rg + \
+ '/providers/Microsoft.Network/trafficManagerProfiles/' + \
+ expected_name
+ self.assertEqual(profile.id, expected_id)
+ self.assertEqual(profile.name, expected_name)
+ self.assertEqual(profile.name, profile.dns_config.relative_name)
+ self.assertEqual(profile.traffic_routing_method, routing)
+ self.assertEqual(profile.dns_config.ttl, record.ttl)
+ self.assertEqual(len(profile.endpoints), len(endpoints))
+
+ self.assertEqual(
+ profile.endpoints[0].type,
+ 'Microsoft.Network/trafficManagerProfiles/externalEndpoints'
+ )
+ self.assertEqual(
+ profile.endpoints[1].type,
+ 'Microsoft.Network/trafficManagerProfiles/nestedEndpoints'
+ )
+
+ def test_dynamic_record(self):
+ provider, zone, record = self._get_dynamic_package()
+ profiles = provider._generate_traffic_managers(record)
+
+ # check that every profile is a match with what we expect
+ expected_profiles = self._get_tm_profiles(provider)
+ self.assertEqual(len(expected_profiles), len(profiles))
+ for have, expected in zip(profiles, expected_profiles):
+ self.assertTrue(_profile_is_match(have, expected))
+
+ # check that dynamic record is populated back from profiles
+ azrecord = RecordSet(
+ ttl=60,
+ target_resource=SubResource(id=profiles[-1].id),
+ )
+ azrecord.name = record.name or '@'
+ azrecord.type = 'Microsoft.Network/dnszones/{}'.format(record._type)
+ record2 = provider._populate_record(zone, azrecord)
+ self.assertEqual(record2.dynamic._data(), record.dynamic._data())
+
+ # test that extra changes doesn't show any changes
+ desired = Zone(zone.name, sub_zones=[])
+ desired.add_record(record)
+ changes = provider._extra_changes(zone, desired, [])
+ self.assertEqual(len(changes), 0)
+
+ def test_generate_traffic_managers_middle_east(self):
+ # check Asia/Middle East test case
+ provider, zone, record = self._get_dynamic_package()
+ record.dynamic._data()['rules'][0]['geos'].append('AS')
+ profiles = provider._generate_traffic_managers(record)
+ self.assertIn('GEO-ME', profiles[-1].endpoints[0].geo_mapping)
+ self.assertIn('GEO-AS', profiles[-1].endpoints[0].geo_mapping)
+
+ def test_populate_dynamic_middle_east(self):
+ # Middle east without Asia raises exception
+ provider, zone, record = self._get_dynamic_package()
+ tm_suffix = _root_traffic_manager_name(record)
+ tm_id = provider._profile_name_to_id
+ tm_list = provider._tm_client.profiles.list_by_resource_group
+ tm_list.return_value = [
+ Profile(
+ id=tm_id(tm_suffix),
+ name=tm_suffix,
+ traffic_routing_method='Geographic',
+ endpoints=[
+ Endpoint(
+ geo_mapping=['GEO-ME'],
+ ),
+ ],
+ ),
+ ]
+ azrecord = RecordSet(
+ ttl=60,
+ target_resource=SubResource(id=tm_id(tm_suffix)),
+ )
+ azrecord.name = record.name or '@'
+ azrecord.type = 'Microsoft.Network/dnszones/{}'.format(record._type)
+ with self.assertRaises(AzureException) as ctx:
+ provider._populate_record(zone, azrecord)
+ self.assertTrue(text_type(ctx).startswith(
+ 'Middle East (GEO-ME) is not supported'
+ ))
+
+ # valid profiles with Middle East test case
+ provider, zone, record = self._get_dynamic_package()
+ geo_profile = provider._get_tm_for_dynamic_record(record)
+ geo_profile.endpoints[0].geo_mapping.extend(['GEO-ME', 'GEO-AS'])
+ record = provider._populate_record(zone, azrecord)
+ self.assertIn('AS', record.dynamic.rules[0].data['geos'])
+ self.assertNotIn('ME', record.dynamic.rules[0].data['geos'])
+
+ def test_dynamic_no_geo(self):
+ # test that traffic managers are generated as expected
+ provider = self._get_provider()
+ external = 'Microsoft.Network/trafficManagerProfiles/externalEndpoints'
+
+ record = Record.new(zone, 'foo', data={
+ 'type': 'CNAME',
+ 'ttl': 60,
+ 'value': 'default.unit.tests.',
+ 'dynamic': {
+ 'pools': {
+ 'one': {
+ 'values': [
+ {'value': 'one.unit.tests.'},
+ ],
+ },
+ },
+ 'rules': [
+ {'pool': 'one'},
+ ],
+ }
+ })
+ profiles = provider._generate_traffic_managers(record)
+
+ self.assertEqual(len(profiles), 1)
+ self.assertTrue(_profile_is_match(profiles[0], Profile(
+ name='foo--unit--tests',
+ traffic_routing_method='Priority',
+ dns_config=DnsConfig(
+ relative_name='foo--unit--tests', ttl=60),
+ monitor_config=_get_monitor(record),
+ endpoints=[
+ Endpoint(
+ name='one',
+ type=external,
+ target='one.unit.tests',
+ priority=1,
+ ),
+ Endpoint(
+ name='--default--',
+ type=external,
+ target='default.unit.tests',
+ priority=2,
+ ),
+ ],
+ )))
+
+ # test that same record gets populated back from traffic managers
+ tm_list = provider._tm_client.profiles.list_by_resource_group
+ tm_list.return_value = profiles
+ azrecord = RecordSet(
+ ttl=60,
+ target_resource=SubResource(id=profiles[-1].id),
+ )
+ azrecord.name = record.name or '@'
+ azrecord.type = 'Microsoft.Network/dnszones/{}'.format(record._type)
+ record2 = provider._populate_record(zone, azrecord)
+ self.assertEqual(record2.dynamic._data(), record.dynamic._data())
+
+ # test that extra changes doesn't show any changes
+ desired = Zone(zone.name, sub_zones=[])
+ desired.add_record(record)
+ changes = provider._extra_changes(zone, desired, [])
+ self.assertEqual(len(changes), 0)
+
+ def test_dynamic_fallback_is_default(self):
+ # test that traffic managers are generated as expected
+ provider = self._get_provider()
+ external = 'Microsoft.Network/trafficManagerProfiles/externalEndpoints'
+
+ record = Record.new(zone, 'foo', data={
+ 'type': 'CNAME',
+ 'ttl': 60,
+ 'value': 'default.unit.tests.',
+ 'dynamic': {
+ 'pools': {
+ 'def': {
+ 'values': [
+ {'value': 'default.unit.tests.'},
+ ],
+ },
+ },
+ 'rules': [
+ {'geos': ['AF'], 'pool': 'def'},
+ ],
+ }
+ })
+ profiles = provider._generate_traffic_managers(record)
+
+ self.assertEqual(len(profiles), 1)
+ self.assertTrue(_profile_is_match(profiles[0], Profile(
+ name='foo--unit--tests',
+ traffic_routing_method='Geographic',
+ dns_config=DnsConfig(
+ relative_name='foo--unit--tests', ttl=60),
+ monitor_config=_get_monitor(record),
+ endpoints=[
+ Endpoint(
+ name='def--default--',
+ type=external,
+ target='default.unit.tests',
+ geo_mapping=['GEO-AF'],
+ ),
+ ],
+ )))
+
+ # test that same record gets populated back from traffic managers
+ tm_list = provider._tm_client.profiles.list_by_resource_group
+ tm_list.return_value = profiles
+ azrecord = RecordSet(
+ ttl=60,
+ target_resource=SubResource(id=profiles[-1].id),
+ )
+ azrecord.name = record.name or '@'
+ azrecord.type = 'Microsoft.Network/dnszones/{}'.format(record._type)
+ record2 = provider._populate_record(zone, azrecord)
+ self.assertEqual(record2.dynamic._data(), record.dynamic._data())
+
+ # test that extra changes doesn't show any changes
+ desired = Zone(zone.name, sub_zones=[])
+ desired.add_record(record)
+ changes = provider._extra_changes(zone, desired, [])
+ self.assertEqual(len(changes), 0)
+
+ def test_dynamic_pool_contains_default(self):
+ # test that traffic managers are generated as expected
+ provider = self._get_provider()
+ external = 'Microsoft.Network/trafficManagerProfiles/externalEndpoints'
+ nested = 'Microsoft.Network/trafficManagerProfiles/nestedEndpoints'
+
+ record = Record.new(zone, 'foo', data={
+ 'type': 'CNAME',
+ 'ttl': 60,
+ 'value': 'default.unit.tests.',
+ 'dynamic': {
+ 'pools': {
+ 'rr': {
+ 'values': [
+ {'value': 'one.unit.tests.'},
+ {'value': 'two.unit.tests.'},
+ {'value': 'default.unit.tests.'},
+ {'value': 'final.unit.tests.'},
+ ],
+ },
+ },
+ 'rules': [
+ {'geos': ['AF'], 'pool': 'rr'},
+ ],
+ }
+ })
+ profiles = provider._generate_traffic_managers(record)
+
+ self.assertEqual(len(profiles), 2)
+ self.assertTrue(_profile_is_match(profiles[0], Profile(
+ name='foo--unit--tests-pool-rr',
+ traffic_routing_method='Weighted',
+ dns_config=DnsConfig(
+ relative_name='foo--unit--tests-pool-rr', ttl=60),
+ monitor_config=_get_monitor(record),
+ endpoints=[
+ Endpoint(
+ name='rr--one.unit.tests',
+ type=external,
+ target='one.unit.tests',
+ weight=1,
+ ),
+ Endpoint(
+ name='rr--two.unit.tests',
+ type=external,
+ target='two.unit.tests',
+ weight=1,
+ ),
+ Endpoint(
+ name='rr--default.unit.tests--default--',
+ type=external,
+ target='default.unit.tests',
+ weight=1,
+ ),
+ Endpoint(
+ name='rr--final.unit.tests',
+ type=external,
+ target='final.unit.tests',
+ weight=1,
+ ),
+ ],
+ )))
+ self.assertTrue(_profile_is_match(profiles[1], Profile(
+ name='foo--unit--tests',
+ traffic_routing_method='Geographic',
+ dns_config=DnsConfig(
+ relative_name='foo--unit--tests', ttl=60),
+ monitor_config=_get_monitor(record),
+ endpoints=[
+ Endpoint(
+ name='rr',
+ type=nested,
+ target_resource_id=profiles[0].id,
+ geo_mapping=['GEO-AF'],
+ ),
+ ],
+ )))
+
+ # test that same record gets populated back from traffic managers
+ tm_list = provider._tm_client.profiles.list_by_resource_group
+ tm_list.return_value = profiles
+ azrecord = RecordSet(
+ ttl=60,
+ target_resource=SubResource(id=profiles[-1].id),
+ )
+ azrecord.name = record.name or '@'
+ azrecord.type = 'Microsoft.Network/dnszones/{}'.format(record._type)
+ record2 = provider._populate_record(zone, azrecord)
+ self.assertEqual(record2.dynamic._data(), record.dynamic._data())
+
+ # test that extra changes doesn't show any changes
+ desired = Zone(zone.name, sub_zones=[])
+ desired.add_record(record)
+ changes = provider._extra_changes(zone, desired, [])
+ self.assertEqual(len(changes), 0)
+
+ def test_dynamic_pool_contains_default_no_geo(self):
+ # test that traffic managers are generated as expected
+ provider = self._get_provider()
+ external = 'Microsoft.Network/trafficManagerProfiles/externalEndpoints'
+
+ record = Record.new(zone, 'foo', data={
+ 'type': 'CNAME',
+ 'ttl': 60,
+ 'value': 'default.unit.tests.',
+ 'dynamic': {
+ 'pools': {
+ 'rr': {
+ 'values': [
+ {'value': 'one.unit.tests.'},
+ {'value': 'two.unit.tests.'},
+ {'value': 'default.unit.tests.'},
+ {'value': 'final.unit.tests.'},
+ ],
+ },
+ },
+ 'rules': [
+ {'pool': 'rr'},
+ ],
+ }
+ })
+ profiles = provider._generate_traffic_managers(record)
+
+ self.assertEqual(len(profiles), 1)
+ self.assertTrue(_profile_is_match(profiles[0], Profile(
+ name='foo--unit--tests',
+ traffic_routing_method='Weighted',
+ dns_config=DnsConfig(
+ relative_name='foo--unit--tests', ttl=60),
+ monitor_config=_get_monitor(record),
+ endpoints=[
+ Endpoint(
+ name='rr--one.unit.tests',
+ type=external,
+ target='one.unit.tests',
+ weight=1,
+ ),
+ Endpoint(
+ name='rr--two.unit.tests',
+ type=external,
+ target='two.unit.tests',
+ weight=1,
+ ),
+ Endpoint(
+ name='rr--default.unit.tests--default--',
+ type=external,
+ target='default.unit.tests',
+ weight=1,
+ ),
+ Endpoint(
+ name='rr--final.unit.tests',
+ type=external,
+ target='final.unit.tests',
+ weight=1,
+ ),
+ ],
+ )))
+
+ # test that same record gets populated back from traffic managers
+ tm_list = provider._tm_client.profiles.list_by_resource_group
+ tm_list.return_value = profiles
+ azrecord = RecordSet(
+ ttl=60,
+ target_resource=SubResource(id=profiles[-1].id),
+ )
+ azrecord.name = record.name or '@'
+ azrecord.type = 'Microsoft.Network/dnszones/{}'.format(record._type)
+ record2 = provider._populate_record(zone, azrecord)
+ self.assertEqual(record2.dynamic._data(), record.dynamic._data())
+
+ # test that extra changes doesn't show any changes
+ desired = Zone(zone.name, sub_zones=[])
+ desired.add_record(record)
+ changes = provider._extra_changes(zone, desired, [])
+ self.assertEqual(len(changes), 0)
+
+ def test_dynamic_last_pool_contains_default_no_geo(self):
+ # test that traffic managers are generated as expected
+ provider = self._get_provider()
+ external = 'Microsoft.Network/trafficManagerProfiles/externalEndpoints'
+ nested = 'Microsoft.Network/trafficManagerProfiles/nestedEndpoints'
+
+ record = Record.new(zone, 'foo', data={
+ 'type': 'CNAME',
+ 'ttl': 60,
+ 'value': 'default.unit.tests.',
+ 'dynamic': {
+ 'pools': {
+ 'cloud': {
+ 'values': [
+ {'value': 'cloud.unit.tests.'},
+ ],
+ 'fallback': 'rr',
+ },
+ 'rr': {
+ 'values': [
+ {'value': 'one.unit.tests.'},
+ {'value': 'two.unit.tests.'},
+ {'value': 'default.unit.tests.'},
+ {'value': 'final.unit.tests.'},
+ ],
+ },
+ },
+ 'rules': [
+ {'pool': 'cloud'},
+ ],
+ }
+ })
+ profiles = provider._generate_traffic_managers(record)
+
+ self.assertEqual(len(profiles), 2)
+ self.assertTrue(_profile_is_match(profiles[0], Profile(
+ name='foo--unit--tests-pool-rr',
+ traffic_routing_method='Weighted',
+ dns_config=DnsConfig(
+ relative_name='foo--unit--tests-pool-rr', ttl=60),
+ monitor_config=_get_monitor(record),
+ endpoints=[
+ Endpoint(
+ name='rr--one.unit.tests',
+ type=external,
+ target='one.unit.tests',
+ weight=1,
+ ),
+ Endpoint(
+ name='rr--two.unit.tests',
+ type=external,
+ target='two.unit.tests',
+ weight=1,
+ ),
+ Endpoint(
+ name='rr--default.unit.tests--default--',
+ type=external,
+ target='default.unit.tests',
+ weight=1,
+ ),
+ Endpoint(
+ name='rr--final.unit.tests',
+ type=external,
+ target='final.unit.tests',
+ weight=1,
+ ),
+ ],
+ )))
+ self.assertTrue(_profile_is_match(profiles[1], Profile(
+ name='foo--unit--tests',
+ traffic_routing_method='Priority',
+ dns_config=DnsConfig(
+ relative_name='foo--unit--tests', ttl=60),
+ monitor_config=_get_monitor(record),
+ endpoints=[
+ Endpoint(
+ name='cloud',
+ type=external,
+ target='cloud.unit.tests',
+ priority=1,
+ ),
+ Endpoint(
+ name='rr',
+ type=nested,
+ target_resource_id=profiles[0].id,
+ priority=2,
+ ),
+ ],
+ )))
+
+ # test that same record gets populated back from traffic managers
+ tm_list = provider._tm_client.profiles.list_by_resource_group
+ tm_list.return_value = profiles
+ azrecord = RecordSet(
+ ttl=60,
+ target_resource=SubResource(id=profiles[-1].id),
+ )
+ azrecord.name = record.name or '@'
+ azrecord.type = 'Microsoft.Network/dnszones/{}'.format(record._type)
+ record2 = provider._populate_record(zone, azrecord)
+ self.assertEqual(record2.dynamic._data(), record.dynamic._data())
+
+ # test that extra changes doesn't show any changes
+ desired = Zone(zone.name, sub_zones=[])
+ desired.add_record(record)
+ changes = provider._extra_changes(zone, desired, [])
+ self.assertEqual(len(changes), 0)
+
+ def test_dynamic_unique_traffic_managers(self):
+ record = self._get_dynamic_record(zone)
+ data = {
+ 'type': record._type,
+ 'ttl': record.ttl,
+ 'value': record.value,
+ 'dynamic': record.dynamic._data()
+ }
+ record_names = [
+ 'www.foo', 'www-foo'
+ ]
+ provider = self._get_provider()
+
+ seen = set()
+ for name in record_names:
+ record = Record.new(zone, name, data=data)
+ tms = provider._generate_traffic_managers(record)
+ for tm in tms:
+ self.assertNotIn(tm.name, seen)
+ seen.add(tm.name)
+
+ def test_dynamic_reused_pool(self):
+ # test that traffic managers are generated as expected
+ provider = self._get_provider()
+ nested = 'Microsoft.Network/trafficManagerProfiles/nestedEndpoints'
+
+ record = Record.new(zone, 'foo', data={
+ 'type': 'CNAME',
+ 'ttl': 60,
+ 'value': 'default.unit.tests.',
+ 'dynamic': {
+ 'pools': {
+ 'sto': {
+ 'values': [
+ {'value': 'sto.unit.tests.'},
+ ],
+ 'fallback': 'iad',
+ },
+ 'iad': {
+ 'values': [
+ {'value': 'iad.unit.tests.'},
+ ],
+ 'fallback': 'lhr',
+ },
+ 'lhr': {
+ 'values': [
+ {'value': 'lhr.unit.tests.'},
+ ],
+ },
+ },
+ 'rules': [
+ {'geos': ['EU'], 'pool': 'iad'},
+ {'geos': ['EU-GB'], 'pool': 'lhr'},
+ {'geos': ['EU-SE'], 'pool': 'sto'},
+ {'pool': 'lhr'},
+ ],
+ }
+ })
+ profiles = provider._generate_traffic_managers(record)
+
+ self.assertEqual(len(profiles), 4)
+ self.assertTrue(_profile_is_match(profiles[-1], Profile(
+ name='foo--unit--tests',
+ traffic_routing_method='Geographic',
+ dns_config=DnsConfig(
+ relative_name='foo--unit--tests', ttl=record.ttl),
+ monitor_config=_get_monitor(record),
+ endpoints=[
+ Endpoint(
+ name='iad',
+ type=nested,
+ target_resource_id=profiles[0].id,
+ geo_mapping=['GEO-EU'],
+ ),
+ Endpoint(
+ name='lhr',
+ type=nested,
+ target_resource_id=profiles[1].id,
+ geo_mapping=['GB', 'WORLD'],
+ ),
+ Endpoint(
+ name='sto',
+ type=nested,
+ target_resource_id=profiles[2].id,
+ geo_mapping=['SE'],
+ ),
+ ],
+ )))
+
+ # test that same record gets populated back from traffic managers
+ tm_list = provider._tm_client.profiles.list_by_resource_group
+ tm_list.return_value = profiles
+ azrecord = RecordSet(
+ ttl=60,
+ target_resource=SubResource(id=profiles[-1].id),
+ )
+ azrecord.name = record.name or '@'
+ azrecord.type = 'Microsoft.Network/dnszones/{}'.format(record._type)
+ record2 = provider._populate_record(zone, azrecord)
+ self.assertEqual(record2.dynamic._data(), record.dynamic._data())
+
+ # test that extra changes doesn't show any changes
+ desired = Zone(zone.name, sub_zones=[])
+ desired.add_record(record)
+ changes = provider._extra_changes(zone, desired, [])
+ self.assertEqual(len(changes), 0)
+
+ def test_dynamic_A(self):
+ provider = self._get_provider()
+ external = 'Microsoft.Network/trafficManagerProfiles/externalEndpoints'
+ nested = 'Microsoft.Network/trafficManagerProfiles/nestedEndpoints'
+
+ record = Record.new(zone, 'foo', data={
+ 'type': 'A',
+ 'ttl': 60,
+ 'values': ['9.9.9.9'],
+ 'dynamic': {
+ 'pools': {
+ 'one': {
+ 'values': [
+ {'value': '11.11.11.11'},
+ {'value': '12.12.12.12'},
+ ],
+ 'fallback': 'two'
+ },
+ 'two': {
+ 'values': [
+ {'value': '2.2.2.2'},
+ ],
+ },
+ },
+ 'rules': [
+ {'geos': ['AF'], 'pool': 'one'},
+ {'pool': 'two'},
+ ],
+ }
+ })
+
+ profiles = provider._generate_traffic_managers(record)
+
+ self.assertEqual(len(profiles), 4)
+ self.assertTrue(_profile_is_match(profiles[0], Profile(
+ name='foo--unit--tests-A-pool-one',
+ traffic_routing_method='Weighted',
+ dns_config=DnsConfig(
+ relative_name='foo--unit--tests-a-pool-one', ttl=record.ttl),
+ monitor_config=_get_monitor(record),
+ endpoints=[
+ Endpoint(
+ name='one--11.11.11.11',
+ type=external,
+ target='11.11.11.11',
+ weight=1,
+ ),
+ Endpoint(
+ name='one--12.12.12.12',
+ type=external,
+ target='12.12.12.12',
+ weight=1,
+ ),
+ ],
+ )))
+ self.assertTrue(_profile_is_match(profiles[1], Profile(
+ name='foo--unit--tests-A-rule-one',
+ traffic_routing_method='Priority',
+ dns_config=DnsConfig(
+ relative_name='foo--unit--tests-a-rule-one', ttl=record.ttl),
+ monitor_config=_get_monitor(record),
+ endpoints=[
+ Endpoint(
+ name='one',
+ type=nested,
+ target_resource_id=profiles[0].id,
+ priority=1,
+ ),
+ Endpoint(
+ name='two',
+ type=external,
+ target='2.2.2.2',
+ priority=2,
+ ),
+ Endpoint(
+ name='--default--',
+ type=external,
+ target='9.9.9.9',
+ priority=3,
+ ),
+ ],
+ )))
+ self.assertTrue(_profile_is_match(profiles[2], Profile(
+ name='foo--unit--tests-A-rule-two',
+ traffic_routing_method='Priority',
+ dns_config=DnsConfig(
+ relative_name='foo--unit--tests-a-rule-two', ttl=record.ttl),
+ monitor_config=_get_monitor(record),
+ endpoints=[
+ Endpoint(
+ name='two',
+ type=external,
+ target='2.2.2.2',
+ priority=1,
+ ),
+ Endpoint(
+ name='--default--',
+ type=external,
+ target='9.9.9.9',
+ priority=2,
+ ),
+ ],
+ )))
+ self.assertTrue(_profile_is_match(profiles[3], Profile(
+ name='foo--unit--tests-A',
+ traffic_routing_method='Geographic',
+ dns_config=DnsConfig(
+ relative_name='foo--unit--tests-a', ttl=record.ttl),
+ monitor_config=_get_monitor(record),
+ endpoints=[
+ Endpoint(
+ name='one',
+ type=nested,
+ target_resource_id=profiles[1].id,
+ geo_mapping=['GEO-AF'],
+ ),
+ Endpoint(
+ name='two',
+ type=nested,
+ target_resource_id=profiles[2].id,
+ geo_mapping=['WORLD'],
+ ),
+ ],
+ )))
+
+ # test that the record and ATM profile gets created
+ tm_sync = provider._tm_client.profiles.create_or_update
+ create = provider._dns_client.record_sets.create_or_update
+ provider._apply_Create(Create(record))
+ self.assertEqual(tm_sync.call_count, len(profiles) + 1)
+ create.assert_called_once()
+
+ # test broken alias
+ azrecord = RecordSet(
+ ttl=60, target_resource=SubResource(id=None))
+ azrecord.name = record.name or '@'
+ azrecord.type = 'Microsoft.Network/dnszones/{}'.format(record._type)
+ record2 = provider._populate_record(zone, azrecord)
+ self.assertEqual(record2.values, ['255.255.255.255'])
+
+ # test that same record gets populated back from traffic managers
+ tm_list = provider._tm_client.profiles.list_by_resource_group
+ tm_list.return_value = profiles
+ provider._populate_traffic_managers()
+ azrecord = RecordSet(
+ ttl=60,
+ target_resource=SubResource(id=profiles[-1].id),
+ )
+ azrecord.name = record.name or '@'
+ azrecord.type = 'Microsoft.Network/dnszones/{}'.format(record._type)
+ record2 = provider._populate_record(zone, azrecord)
+ self.assertEqual(record2.dynamic._data(), record.dynamic._data())
+
+ # test that extra changes doesn't show any changes
+ desired = Zone(zone.name, sub_zones=[])
+ desired.add_record(record)
+ changes = provider._extra_changes(zone, desired, [])
+ self.assertEqual(len(changes), 0)
+
+ def test_dynamic_AAAA(self):
+ provider = self._get_provider()
+ external = 'Microsoft.Network/trafficManagerProfiles/externalEndpoints'
+
+ record = Record.new(zone, 'foo', data={
+ 'type': 'AAAA',
+ 'ttl': 60,
+ 'values': ['1::1', '2::2'],
+ 'dynamic': {
+ 'pools': {
+ 'one': {
+ 'values': [
+ {'value': '1::1'},
+ {'value': '2::2'},
+ ],
+ },
+ },
+ 'rules': [
+ {'pool': 'one'},
+ ],
+ }
+ })
+ profiles = provider._generate_traffic_managers(record)
+
+ self.assertEqual(len(profiles), 1)
+ self.assertTrue(_profile_is_match(profiles[0], Profile(
+ name='foo--unit--tests-AAAA',
+ traffic_routing_method='Weighted',
+ dns_config=DnsConfig(
+ relative_name='foo--unit--tests-aaaa', ttl=record.ttl),
+ monitor_config=_get_monitor(record),
+ endpoints=[
+ Endpoint(
+ name='one--1--1--default--',
+ type=external,
+ target='1::1',
+ weight=1,
+ ),
+ Endpoint(
+ name='one--2--2--default--',
+ type=external,
+ target='2::2',
+ weight=1,
+ ),
+ ],
+ )))
+
+ # test that the record and ATM profile gets created
+ tm_sync = provider._tm_client.profiles.create_or_update
+ create = provider._dns_client.record_sets.create_or_update
+ provider._apply_Create(Create(record))
+ # A dynamic record can only have 1 profile
+ tm_sync.assert_called_once()
+ create.assert_called_once()
+
+ # test broken alias
+ azrecord = RecordSet(
+ ttl=60, target_resource=SubResource(id=None))
+ azrecord.name = record.name or '@'
+ azrecord.type = 'Microsoft.Network/dnszones/{}'.format(record._type)
+ record2 = provider._populate_record(zone, azrecord)
+ self.assertEqual(record2.values, ['::1'])
+
+ # test that same record gets populated back from traffic managers
+ tm_list = provider._tm_client.profiles.list_by_resource_group
+ tm_list.return_value = profiles
+ azrecord = RecordSet(
+ ttl=60,
+ target_resource=SubResource(id=profiles[-1].id),
+ )
+ azrecord.name = record.name or '@'
+ azrecord.type = 'Microsoft.Network/dnszones/{}'.format(record._type)
+ record2 = provider._populate_record(zone, azrecord)
+ self.assertEqual(record2.dynamic._data(), record.dynamic._data())
+
+ # test that extra changes doesn't show any changes
+ desired = Zone(zone.name, sub_zones=[])
+ desired.add_record(record)
+ changes = provider._extra_changes(zone, desired, [])
+ self.assertEqual(len(changes), 0)
+
+ def test_sync_traffic_managers(self):
+ provider, zone, record = self._get_dynamic_package()
+ provider._populate_traffic_managers()
+
+ tm_sync = provider._tm_client.profiles.create_or_update
+
+ prefix = 'foo--unit--tests'
+ expected_seen = {
+ prefix, '{}-pool-two'.format(prefix),
+ '{}-rule-one'.format(prefix), '{}-rule-two'.format(prefix),
+ }
+
+ # test no change
+ profiles = provider._generate_traffic_managers(record)
+ seen = provider._sync_traffic_managers(profiles)
+ self.assertEqual(seen, expected_seen)
+ tm_sync.assert_not_called()
+
+ # test that changing weight causes update API call
+ dynamic = record.dynamic._data()
+ dynamic['pools']['two']['values'][0]['weight'] = 14
+ data = {
+ 'type': 'CNAME',
+ 'ttl': record.ttl,
+ 'value': record.value,
+ 'dynamic': dynamic,
+ 'octodns': record._octodns,
+ }
+ new_record = Record.new(zone, record.name, data)
+ tm_sync.reset_mock()
+ profiles = provider._generate_traffic_managers(new_record)
+ seen2 = provider._sync_traffic_managers(profiles)
+ self.assertEqual(seen2, expected_seen)
+ tm_sync.assert_called_once()
+
+ # test that new profile was successfully inserted in cache
+ new_profile = provider._get_tm_profile_by_name(
+ '{}-pool-two'.format(prefix)
+ )
+ self.assertEqual(new_profile.endpoints[0].weight, 14)
+
+ def test_sync_traffic_managers_duplicate(self):
+ provider, zone, record = self._get_dynamic_package()
+ tm_sync = provider._tm_client.profiles.create_or_update
+
+ # change and duplicate profiles
+ profile = self._get_tm_profiles(provider)[0]
+ profile.name = 'changing_this_to_trigger_sync'
+ provider._sync_traffic_managers([profile, profile])
+
+ # it should only be called once for duplicate profiles
+ tm_sync.assert_called_once()
+
+ def test_find_traffic_managers(self):
+ provider, zone, record = self._get_dynamic_package()
+
+ # insert a non-matching profile
+ sample_profile = self._get_tm_profiles(provider)[0]
+ # dummy record for generating suffix
+ record2 = Record.new(zone, record.name + '2', data={
+ 'type': record._type,
+ 'ttl': record.ttl,
+ 'value': record.value,
+ })
+ prefix2 = _root_traffic_manager_name(record2)
+ tm_id = provider._profile_name_to_id
+ extra_profile = Profile(
+ id=tm_id('{}-pool-random'.format(prefix2)),
+ name='{}-pool-random'.format(prefix2),
+ traffic_routing_method='Weighted',
+ dns_config=sample_profile.dns_config,
+ monitor_config=sample_profile.monitor_config,
+ endpoints=sample_profile.endpoints,
+ )
+ tm_list = provider._tm_client.profiles.list_by_resource_group
+ tm_list.return_value.append(extra_profile)
+ provider._populate_traffic_managers()
+
+ # implicitly asserts that non-matching profile is not included
+ prefix = _root_traffic_manager_name(record)
+ self.assertEqual(provider._find_traffic_managers(record), {
+ prefix, '{}-pool-two'.format(prefix),
+ '{}-rule-one'.format(prefix), '{}-rule-two'.format(prefix),
+ })
+
+ def test_traffic_manager_gc(self):
+ provider, zone, record = self._get_dynamic_package()
+ provider._populate_traffic_managers()
+
+ profiles = provider._find_traffic_managers(record)
+ profile_delete_mock = provider._tm_client.profiles.delete
+
+ provider._traffic_managers_gc(record, profiles)
+ profile_delete_mock.assert_not_called()
+
+ profile_delete_mock.reset_mock()
+ remove = list(profiles)[3]
+ profiles.discard(remove)
+
+ provider._traffic_managers_gc(record, profiles)
+ profile_delete_mock.assert_has_calls(
+ [call(provider._resource_group, remove)]
+ )
def test_apply(self):
provider = self._get_provider()
- changes = []
- deletes = []
- for i in octo_records:
- changes.append(Create(i))
- deletes.append(Delete(i))
+ expected_n = len(octo_records)
+ half = int(expected_n / 2)
+ changes = [Create(r) for r in octo_records[:half]] + \
+ [Update(r, r) for r in octo_records[half:]]
+ deletes = [Delete(r) for r in octo_records]
+
+ self.assertEquals(expected_n, provider.apply(Plan(None, zone,
+ changes, True)))
+ self.assertEquals(expected_n, provider.apply(Plan(zone, zone,
+ deletes, True)))
+
+ def test_apply_create_dynamic(self):
+ provider = self._get_provider()
+
+ tm_list = provider._tm_client.profiles.list_by_resource_group
+ tm_list.return_value = []
+
+ tm_sync = provider._tm_client.profiles.create_or_update
+
+ record = self._get_dynamic_record(zone)
+
+ profiles = self._get_tm_profiles(provider)
+
+ provider._apply_Create(Create(record))
+ # create was called as many times as number of profiles required for
+ # the dynamic record
+ self.assertEqual(tm_sync.call_count, len(profiles))
+
+ create = provider._dns_client.record_sets.create_or_update
+ create.assert_called_once()
+
+ def test_apply_update_dynamic(self):
+ # existing is simple, new is dynamic
+ provider = self._get_provider()
+ tm_list = provider._tm_client.profiles.list_by_resource_group
+ tm_list.return_value = []
+ profiles = self._get_tm_profiles(provider)
+ dynamic_record = self._get_dynamic_record(zone)
+ simple_record = Record.new(zone, dynamic_record.name, data={
+ 'type': 'CNAME',
+ 'ttl': 3600,
+ 'value': 'cname.unit.tests.',
+ })
+ change = Update(simple_record, dynamic_record)
+ provider._apply_Update(change)
+ tm_sync, dns_update, tm_delete = (
+ provider._tm_client.profiles.create_or_update,
+ provider._dns_client.record_sets.create_or_update,
+ provider._tm_client.profiles.delete
+ )
+ self.assertEqual(tm_sync.call_count, len(profiles))
+ dns_update.assert_called_once()
+ tm_delete.assert_not_called()
+
+ # existing is dynamic, new is simple
+ provider, existing, dynamic_record = self._get_dynamic_package()
+ profiles = self._get_tm_profiles(provider)
+ change = Update(dynamic_record, simple_record)
+ provider._apply_Update(change)
+ tm_sync, dns_update, tm_delete = (
+ provider._tm_client.profiles.create_or_update,
+ provider._dns_client.record_sets.create_or_update,
+ provider._tm_client.profiles.delete
+ )
+ tm_sync.assert_not_called()
+ dns_update.assert_called_once()
+ self.assertEqual(tm_delete.call_count, len(profiles))
+
+ # both are dynamic, healthcheck port is changed
+ provider, existing, dynamic_record = self._get_dynamic_package()
+ profiles = self._get_tm_profiles(provider)
+ dynamic_record2 = self._get_dynamic_record(existing)
+ dynamic_record2._octodns['healthcheck']['port'] += 1
+ change = Update(dynamic_record, dynamic_record2)
+ provider._apply_Update(change)
+ tm_sync, dns_update, tm_delete = (
+ provider._tm_client.profiles.create_or_update,
+ provider._dns_client.record_sets.create_or_update,
+ provider._tm_client.profiles.delete
+ )
+ self.assertEqual(tm_sync.call_count, len(profiles))
+ dns_update.assert_not_called()
+ tm_delete.assert_not_called()
+
+ # both are dynamic, extra profile should be deleted
+ provider, existing, dynamic_record = self._get_dynamic_package()
+ sample_profile = self._get_tm_profiles(provider)[0]
+ tm_id = provider._profile_name_to_id
+ root_profile_name = _root_traffic_manager_name(dynamic_record)
+ extra_profile = Profile(
+ id=tm_id('{}-pool-random'.format(root_profile_name)),
+ name='{}-pool-random'.format(root_profile_name),
+ traffic_routing_method='Weighted',
+ dns_config=sample_profile.dns_config,
+ monitor_config=sample_profile.monitor_config,
+ endpoints=sample_profile.endpoints,
+ )
+ tm_list = provider._tm_client.profiles.list_by_resource_group
+ tm_list.return_value.append(extra_profile)
+ change = Update(dynamic_record, dynamic_record)
+ provider._apply_Update(change)
+ tm_sync, dns_update, tm_delete = (
+ provider._tm_client.profiles.create_or_update,
+ provider._dns_client.record_sets.create_or_update,
+ provider._tm_client.profiles.delete
+ )
+ tm_sync.assert_not_called()
+ dns_update.assert_not_called()
+ tm_delete.assert_called_once()
+
+ # both are dynamic but alias is broken
+ provider, existing, record1 = self._get_dynamic_package()
+ azrecord = RecordSet(
+ ttl=record1.ttl, target_resource=SubResource(id=None))
+ azrecord.name = record1.name or '@'
+ azrecord.type = 'Microsoft.Network/dnszones/{}'.format(record1._type)
+
+ record2 = provider._populate_record(zone, azrecord)
+ self.assertEqual(record2.value, 'iam.invalid.')
+
+ change = Update(record2, record1)
+ provider._apply_Update(change)
+ tm_sync, dns_update, tm_delete = (
+ provider._tm_client.profiles.create_or_update,
+ provider._dns_client.record_sets.create_or_update,
+ provider._tm_client.profiles.delete
+ )
+ tm_sync.assert_not_called()
+ dns_update.assert_called_once()
+ tm_delete.assert_not_called()
+
+ def test_apply_update_dynamic_A(self):
+ # existing is simple, new is dynamic
+ provider = self._get_provider()
+ simple_record = Record.new(zone, 'foo', data={
+ 'type': 'A',
+ 'ttl': 3600,
+ 'values': ['1.1.1.1', '2.2.2.2'],
+ })
+ dynamic_record = Record.new(zone, simple_record.name, data={
+ 'type': 'A',
+ 'ttl': 60,
+ 'values': ['1.1.1.1'],
+ 'dynamic': {
+ 'pools': {
+ 'one': {
+ 'values': [
+ {'value': '8.8.8.8'},
+ {'value': '4.4.4.4'},
+ ],
+ 'fallback': 'two',
+ },
+ 'two': {
+ 'values': [{'value': '9.9.9.9'}],
+ },
+ },
+ 'rules': [
+ {'geos': ['AF'], 'pool': 'two'},
+ {'pool': 'one'},
+ ],
+ }
+ })
+ num_tms = len(provider._generate_traffic_managers(dynamic_record))
+ change = Update(simple_record, dynamic_record)
+ provider._apply_Update(change)
+ tm_sync, dns_update, tm_delete = (
+ provider._tm_client.profiles.create_or_update,
+ provider._dns_client.record_sets.create_or_update,
+ provider._tm_client.profiles.delete
+ )
+ # sync is called once for each profile, plus 1 at the end for nested
+ # endpoints to workaround A/AAAA nesting limitation in Azure
+ self.assertEqual(tm_sync.call_count, num_tms + 1)
+ dns_update.assert_called_once()
+ tm_delete.assert_not_called()
+
+ # both are dynamic, healthcheck port is changed to trigger sync on
+ # all profiles
+ provider = self._get_provider()
+ dynamic_record2 = Record.new(zone, dynamic_record.name, data={
+ 'type': dynamic_record._type,
+ 'ttl': 300,
+ 'values': dynamic_record.values,
+ 'dynamic': dynamic_record.dynamic._data(),
+ 'octodns': {
+ 'healthcheck': {'port': 4433},
+ }
+ })
+ change = Update(dynamic_record, dynamic_record2)
+ provider._apply_Update(change)
+ tm_sync, dns_update, tm_delete = (
+ provider._tm_client.profiles.create_or_update,
+ provider._dns_client.record_sets.create_or_update,
+ provider._tm_client.profiles.delete
+ )
+ # sync is called once for each profile, extra call at the end is not
+ # needed when existing dynamic record is already aliased to its root
+ # profile
+ self.assertEqual(tm_sync.call_count, num_tms)
+ dns_update.assert_not_called()
+ tm_delete.assert_not_called()
+
+ def test_apply_update_dynamic_A_singluar(self):
+ # existing is simple, new is dynamic that needs only one profile
+ provider = self._get_provider()
+ simple_record = Record.new(zone, 'foo', data={
+ 'type': 'A',
+ 'ttl': 3600,
+ 'values': ['1.1.1.1', '2.2.2.2'],
+ })
+ dynamic_record = Record.new(zone, simple_record.name, data={
+ 'type': 'A',
+ 'ttl': 60,
+ 'values': ['1.1.1.1'],
+ 'dynamic': {
+ 'pools': {
+ 'one': {
+ 'values': [
+ {'value': '8.8.8.8'},
+ {'value': '1.1.1.1'},
+ ],
+ },
+ },
+ 'rules': [
+ {'pool': 'one'},
+ ],
+ }
+ })
+ num_tms = len(provider._generate_traffic_managers(dynamic_record))
+ self.assertEqual(num_tms, 1)
+ change = Update(simple_record, dynamic_record)
+ provider._apply_Update(change)
+ tm_sync, dns_update, tm_delete = (
+ provider._tm_client.profiles.create_or_update,
+ provider._dns_client.record_sets.create_or_update,
+ provider._tm_client.profiles.delete
+ )
+ self.assertEqual(tm_sync.call_count, num_tms)
+ dns_update.assert_called_once()
+ tm_delete.assert_not_called()
- self.assertEquals(18, provider.apply(Plan(None, zone,
- changes, True)))
- self.assertEquals(18, provider.apply(Plan(zone, zone,
- deletes, True)))
+ def test_apply_delete_dynamic(self):
+ provider, existing, record = self._get_dynamic_package()
+ provider._populate_traffic_managers()
+ profiles = self._get_tm_profiles(provider)
+ change = Delete(record)
+ provider._apply_Delete(change)
+ dns_delete, tm_delete = (
+ provider._dns_client.record_sets.delete,
+ provider._tm_client.profiles.delete
+ )
+ dns_delete.assert_called_once()
+ self.assertEqual(tm_delete.call_count, len(profiles))
def test_create_zone(self):
provider = self._get_provider()
@@ -501,18 +2335,19 @@ class TestAzureDnsProvider(TestCase):
_get = provider._dns_client.zones.get
_get.side_effect = CloudError(Mock(status=404), err_msg)
- self.assertEquals(18, provider.apply(Plan(None, desired, changes,
- True)))
+ expected_n = len(octo_records)
+ self.assertEquals(expected_n, provider.apply(Plan(None, desired,
+ changes, True)))
def test_check_zone_no_create(self):
provider = self._get_provider()
rs = []
- recordSet = RecordSet(arecords=[ARecord(ipv4_address='1.1.1.1')])
+ recordSet = RecordSet(a_records=[ARecord(ipv4_address='1.1.1.1')])
recordSet.name, recordSet.ttl, recordSet.type = 'a1', 0, 'A'
rs.append(recordSet)
- recordSet = RecordSet(arecords=[ARecord(ipv4_address='1.1.1.1'),
- ARecord(ipv4_address='2.2.2.2')])
+ recordSet = RecordSet(a_records=[ARecord(ipv4_address='1.1.1.1'),
+ ARecord(ipv4_address='2.2.2.2')])
recordSet.name, recordSet.ttl, recordSet.type = 'a2', 1, 'A'
rs.append(recordSet)
diff --git a/tests/test_octodns_provider_base.py b/tests/test_octodns_provider_base.py
index f33db0f..cee7c2c 100644
--- a/tests/test_octodns_provider_base.py
+++ b/tests/test_octodns_provider_base.py
@@ -6,12 +6,15 @@ from __future__ import absolute_import, division, print_function, \
unicode_literals
from logging import getLogger
+from mock import MagicMock, call
from six import text_type
from unittest import TestCase
-from octodns.record import Create, Delete, Record, Update
+from octodns.processor.base import BaseProcessor
+from octodns.provider import SupportsException
from octodns.provider.base import BaseProvider
from octodns.provider.plan import Plan, UnsafePlan
+from octodns.record import Create, Delete, Record, Update
from octodns.zone import Zone
@@ -20,8 +23,9 @@ class HelperProvider(BaseProvider):
SUPPORTS = set(('A',))
id = 'test'
+ strict_supports = False
- def __init__(self, extra_changes, apply_disabled=False,
+ def __init__(self, extra_changes=[], apply_disabled=False,
include_change_callback=None):
self.__extra_changes = extra_changes
self.apply_disabled = apply_disabled
@@ -43,6 +47,29 @@ class HelperProvider(BaseProvider):
pass
+class TrickyProcessor(BaseProcessor):
+
+ def __init__(self, name, add_during_process_target_zone):
+ super(TrickyProcessor, self).__init__(name)
+ self.add_during_process_target_zone = add_during_process_target_zone
+ self.reset()
+
+ def reset(self):
+ self.existing = None
+ self.target = None
+
+ def process_target_zone(self, existing, target):
+ self.existing = existing
+ self.target = target
+
+ new = existing.copy()
+ for record in existing.records:
+ new.add_record(record, replace=True)
+ for record in self.add_during_process_target_zone:
+ new.add_record(record, replace=True)
+ return new
+
+
class TestBaseProvider(TestCase):
def test_base_provider(self):
@@ -138,6 +165,45 @@ class TestBaseProvider(TestCase):
self.assertTrue(plan)
self.assertEquals(1, len(plan.changes))
+ def test_plan_with_processors(self):
+ zone = Zone('unit.tests.', [])
+
+ record = Record.new(zone, 'a', {
+ 'ttl': 30,
+ 'type': 'A',
+ 'value': '1.2.3.4',
+ })
+ provider = HelperProvider()
+ # Processor that adds a record to the zone, which planning will then
+ # delete since it won't know anything about it
+ tricky = TrickyProcessor('tricky', [record])
+ plan = provider.plan(zone, processors=[tricky])
+ self.assertTrue(plan)
+ self.assertEquals(1, len(plan.changes))
+ self.assertIsInstance(plan.changes[0], Delete)
+ # Called processor stored its params
+ self.assertTrue(tricky.existing)
+ self.assertEquals(zone.name, tricky.existing.name)
+
+ # Chain of processors happen one after the other
+ other = Record.new(zone, 'b', {
+ 'ttl': 30,
+ 'type': 'A',
+ 'value': '5.6.7.8',
+ })
+ # Another processor will add its record, thus 2 deletes
+ another = TrickyProcessor('tricky', [other])
+ plan = provider.plan(zone, processors=[tricky, another])
+ self.assertTrue(plan)
+ self.assertEquals(2, len(plan.changes))
+ self.assertIsInstance(plan.changes[0], Delete)
+ self.assertIsInstance(plan.changes[1], Delete)
+ # 2nd processor stored its params, and we'll see the record the
+ # first one added
+ self.assertTrue(another.existing)
+ self.assertEquals(zone.name, another.existing.name)
+ self.assertEquals(1, len(another.existing.records))
+
def test_apply(self):
ignored = Zone('unit.tests.', [])
@@ -167,6 +233,20 @@ class TestBaseProvider(TestCase):
# We filtered out the only change
self.assertFalse(plan)
+ def test_process_desired_zone(self):
+ zone1 = Zone('unit.tests.', [])
+ record1 = Record.new(zone1, 'ptr', {
+ 'type': 'PTR',
+ 'ttl': 3600,
+ 'values': ['foo.com.', 'bar.com.'],
+ })
+ zone1.add_record(record1)
+
+ zone2 = HelperProvider('hasptr')._process_desired_zone(zone1)
+ record2 = list(zone2.records)[0]
+
+ self.assertEqual(len(record2.values), 1)
+
def test_safe_none(self):
# No changes is safe
Plan(None, None, [], True).raise_if_unsafe()
@@ -366,3 +446,27 @@ class TestBaseProvider(TestCase):
delete_pcent_threshold=safe_pcent).raise_if_unsafe()
self.assertTrue('Too many deletes' in text_type(ctx.exception))
+
+ def test_supports_warn_or_except(self):
+ class MinimalProvider(BaseProvider):
+ SUPPORTS = set()
+ SUPPORTS_GEO = False
+
+ def __init__(self, **kwargs):
+ self.log = MagicMock()
+ super(MinimalProvider, self).__init__('minimal', **kwargs)
+
+ normal = MinimalProvider(strict_supports=False)
+ # Should log and not expect
+ normal.supports_warn_or_except('Hello World!', 'Goodbye')
+ normal.log.warning.assert_called_once()
+ normal.log.warning.assert_has_calls([
+ call('Hello World!; Goodbye')
+ ])
+
+ strict = MinimalProvider(strict_supports=True)
+ # Should log and not expect
+ with self.assertRaises(SupportsException) as ctx:
+ strict.supports_warn_or_except('Hello World!', 'Will not see')
+ self.assertEquals('minimal: Hello World!', text_type(ctx.exception))
+ strict.log.warning.assert_not_called()
diff --git a/tests/test_octodns_provider_cloudflare.py b/tests/test_octodns_provider_cloudflare.py
index 735d95c..2cc11cb 100644
--- a/tests/test_octodns_provider_cloudflare.py
+++ b/tests/test_octodns_provider_cloudflare.py
@@ -166,9 +166,15 @@ class TestCloudflareProvider(TestCase):
json={'result': [], 'result_info': {'count': 0,
'per_page': 0}})
+ base = '{}/234234243423aaabb334342aaa343435'.format(base)
+
+ # pagerules/URLFWD
+ with open('tests/fixtures/cloudflare-pagerules.json') as fh:
+ mock.get('{}/pagerules?status=active'.format(base),
+ status_code=200, text=fh.read())
+
# records
- base = '{}/234234243423aaabb334342aaa343435/dns_records' \
- .format(base)
+ base = '{}/dns_records'.format(base)
with open('tests/fixtures/cloudflare-dns_records-'
'page-1.json') as fh:
mock.get('{}?page=1'.format(base), status_code=200,
@@ -177,19 +183,23 @@ class TestCloudflareProvider(TestCase):
'page-2.json') as fh:
mock.get('{}?page=2'.format(base), status_code=200,
text=fh.read())
+ with open('tests/fixtures/cloudflare-dns_records-'
+ 'page-3.json') as fh:
+ mock.get('{}?page=3'.format(base), status_code=200,
+ text=fh.read())
zone = Zone('unit.tests.', [])
provider.populate(zone)
- self.assertEquals(13, len(zone.records))
+ self.assertEquals(19, len(zone.records))
changes = self.expected.changes(zone, provider)
- self.assertEquals(0, len(changes))
+ self.assertEquals(4, len(changes))
# re-populating the same zone/records comes out of cache, no calls
again = Zone('unit.tests.', [])
provider.populate(again)
- self.assertEquals(13, len(again.records))
+ self.assertEquals(19, len(again.records))
def test_apply(self):
provider = CloudflareProvider('test', 'email', 'token', retry_period=0)
@@ -203,12 +213,12 @@ class TestCloudflareProvider(TestCase):
'id': 42,
}
}, # zone create
- ] + [None] * 22 # individual record creates
+ ] + [None] * 27 # individual record creates
# non-existent zone, create everything
plan = provider.plan(self.expected)
- self.assertEquals(13, len(plan.changes))
- self.assertEquals(13, provider.apply(plan))
+ self.assertEquals(17, len(plan.changes))
+ self.assertEquals(17, provider.apply(plan))
self.assertFalse(plan.exists)
provider._request.assert_has_calls([
@@ -232,9 +242,31 @@ class TestCloudflareProvider(TestCase):
'name': 'txt.unit.tests',
'ttl': 600
}),
+ # create at least one pagerules
+ call('POST', '/zones/42/pagerules', data={
+ 'targets': [
+ {
+ 'target': 'url',
+ 'constraint': {
+ 'operator': 'matches',
+ 'value': 'urlfwd.unit.tests/'
+ }
+ }
+ ],
+ 'actions': [
+ {
+ 'id': 'forwarding_url',
+ 'value': {
+ 'url': 'http://www.unit.tests',
+ 'status_code': 302
+ }
+ }
+ ],
+ 'status': 'active'
+ }),
], True)
# expected number of total calls
- self.assertEquals(23, provider._request.call_count)
+ self.assertEquals(29, provider._request.call_count)
provider._request.reset_mock()
@@ -307,6 +339,56 @@ class TestCloudflareProvider(TestCase):
"auto_added": False
}
},
+ {
+ "id": "2a9140b17ffb0e6aed826049eec970b7",
+ "targets": [
+ {
+ "target": "url",
+ "constraint": {
+ "operator": "matches",
+ "value": "urlfwd.unit.tests/"
+ }
+ }
+ ],
+ "actions": [
+ {
+ "id": "forwarding_url",
+ "value": {
+ "url": "https://www.unit.tests",
+ "status_code": 302
+ }
+ }
+ ],
+ "priority": 1,
+ "status": "active",
+ "created_on": "2021-06-25T20:10:50.000000Z",
+ "modified_on": "2021-06-28T22:38:10.000000Z"
+ },
+ {
+ "id": "2a9141b18ffb0e6aed826050eec970b8",
+ "targets": [
+ {
+ "target": "url",
+ "constraint": {
+ "operator": "matches",
+ "value": "urlfwdother.unit.tests/target"
+ }
+ }
+ ],
+ "actions": [
+ {
+ "id": "forwarding_url",
+ "value": {
+ "url": "https://target.unit.tests",
+ "status_code": 301
+ }
+ }
+ ],
+ "priority": 2,
+ "status": "active",
+ "created_on": "2021-06-25T20:10:50.000000Z",
+ "modified_on": "2021-06-28T22:38:10.000000Z"
+ },
])
# we don't care about the POST/create return values
@@ -315,7 +397,7 @@ class TestCloudflareProvider(TestCase):
# Test out the create rate-limit handling, then 9 successes
provider._request.side_effect = [
CloudflareRateLimitError('{}'),
- ] + ([None] * 3)
+ ] + ([None] * 5)
wanted = Zone('unit.tests.', [])
wanted.add_record(Record.new(wanted, 'nc', {
@@ -328,14 +410,31 @@ class TestCloudflareProvider(TestCase):
'type': 'A',
'value': '3.2.3.4'
}))
+ wanted.add_record(Record.new(wanted, 'urlfwd', {
+ 'ttl': 300,
+ 'type': 'URLFWD',
+ 'value': {
+ 'path': '/*', # path change
+ 'target': 'https://www.unit.tests/', # target change
+ 'code': 301, # status_code change
+ 'masking': '2',
+ 'query': 0,
+ }
+ }))
plan = provider.plan(wanted)
# only see the delete & ttl update, below min-ttl is filtered out
- self.assertEquals(2, len(plan.changes))
- self.assertEquals(2, provider.apply(plan))
+ self.assertEquals(4, len(plan.changes))
+ self.assertEquals(4, provider.apply(plan))
self.assertTrue(plan.exists)
# creates a the new value and then deletes all the old
provider._request.assert_has_calls([
+ call('DELETE', '/zones/42/'
+ 'pagerules/2a9141b18ffb0e6aed826050eec970b8'),
+ call('DELETE', '/zones/ff12ab34cd5611334422ab3322997650/'
+ 'dns_records/fc12ab34cd5611334422ab3322997653'),
+ call('DELETE', '/zones/ff12ab34cd5611334422ab3322997650/'
+ 'dns_records/fc12ab34cd5611334422ab3322997654'),
call('PUT', '/zones/42/dns_records/'
'fc12ab34cd5611334422ab3322997655', data={
'content': '3.2.3.4',
@@ -344,10 +443,28 @@ class TestCloudflareProvider(TestCase):
'proxied': False,
'ttl': 300
}),
- call('DELETE', '/zones/ff12ab34cd5611334422ab3322997650/'
- 'dns_records/fc12ab34cd5611334422ab3322997653'),
- call('DELETE', '/zones/ff12ab34cd5611334422ab3322997650/'
- 'dns_records/fc12ab34cd5611334422ab3322997654')
+ call('PUT', '/zones/42/pagerules/'
+ '2a9140b17ffb0e6aed826049eec970b7', data={
+ 'targets': [
+ {
+ 'target': 'url',
+ 'constraint': {
+ 'operator': 'matches',
+ 'value': 'urlfwd.unit.tests/*'
+ }
+ }
+ ],
+ 'actions': [
+ {
+ 'id': 'forwarding_url',
+ 'value': {
+ 'url': 'https://www.unit.tests/',
+ 'status_code': 301
+ }
+ }
+ ],
+ 'status': 'active',
+ }),
])
def test_update_add_swap(self):
@@ -496,6 +613,56 @@ class TestCloudflareProvider(TestCase):
"auto_added": False
}
},
+ {
+ "id": "2a9140b17ffb0e6aed826049eec974b7",
+ "targets": [
+ {
+ "target": "url",
+ "constraint": {
+ "operator": "matches",
+ "value": "urlfwd1.unit.tests/"
+ }
+ }
+ ],
+ "actions": [
+ {
+ "id": "forwarding_url",
+ "value": {
+ "url": "https://www.unit.tests",
+ "status_code": 302
+ }
+ }
+ ],
+ "priority": 1,
+ "status": "active",
+ "created_on": "2021-06-25T20:10:50.000000Z",
+ "modified_on": "2021-06-28T22:38:10.000000Z"
+ },
+ {
+ "id": "2a9141b18ffb0e6aed826054eec970b8",
+ "targets": [
+ {
+ "target": "url",
+ "constraint": {
+ "operator": "matches",
+ "value": "urlfwd1.unit.tests/target"
+ }
+ }
+ ],
+ "actions": [
+ {
+ "id": "forwarding_url",
+ "value": {
+ "url": "https://target.unit.tests",
+ "status_code": 301
+ }
+ }
+ ],
+ "priority": 2,
+ "status": "active",
+ "created_on": "2021-06-25T20:10:50.000000Z",
+ "modified_on": "2021-06-28T22:38:10.000000Z"
+ },
])
provider._request = Mock()
@@ -509,6 +676,8 @@ class TestCloudflareProvider(TestCase):
}, # zone create
None,
None,
+ None,
+ None,
]
# Add something and delete something
@@ -519,14 +688,46 @@ class TestCloudflareProvider(TestCase):
# This matches the zone data above, one to delete, one to leave
'values': ['ns1.foo.bar.', 'ns2.foo.bar.'],
})
+ exstingurlfwd = Record.new(zone, 'urlfwd1', {
+ 'ttl': 300,
+ 'type': 'URLFWD',
+ 'values': [
+ {
+ 'path': '/',
+ 'target': 'https://www.unit.tests',
+ 'code': 302,
+ 'masking': '2',
+ 'query': 0,
+ },
+ {
+ 'path': '/target',
+ 'target': 'https://target.unit.tests',
+ 'code': 301,
+ 'masking': '2',
+ 'query': 0,
+ }
+ ]
+ })
new = Record.new(zone, '', {
'ttl': 300,
'type': 'NS',
# This leaves one and deletes one
'value': 'ns2.foo.bar.',
})
+ newurlfwd = Record.new(zone, 'urlfwd1', {
+ 'ttl': 300,
+ 'type': 'URLFWD',
+ 'value': {
+ 'path': '/',
+ 'target': 'https://www.unit.tests',
+ 'code': 302,
+ 'masking': '2',
+ 'query': 0,
+ }
+ })
change = Update(existing, new)
- plan = Plan(zone, zone, [change], True)
+ changeurlfwd = Update(exstingurlfwd, newurlfwd)
+ plan = Plan(zone, zone, [change, changeurlfwd], True)
provider._apply(plan)
# Get zones, create zone, create a record, delete a record
@@ -544,7 +745,31 @@ class TestCloudflareProvider(TestCase):
'ttl': 300
}),
call('DELETE', '/zones/42/dns_records/'
- 'fc12ab34cd5611334422ab3322997653')
+ 'fc12ab34cd5611334422ab3322997653'),
+ call('PUT', '/zones/42/pagerules/'
+ '2a9140b17ffb0e6aed826049eec974b7', data={
+ 'targets': [
+ {
+ 'target': 'url',
+ 'constraint': {
+ 'operator': 'matches',
+ 'value': 'urlfwd1.unit.tests/'
+ }
+ }
+ ],
+ 'actions': [
+ {
+ 'id': 'forwarding_url',
+ 'value': {
+ 'url': 'https://www.unit.tests',
+ 'status_code': 302
+ }
+ }
+ ],
+ 'status': 'active'
+ }),
+ call('DELETE', '/zones/42/pagerules/'
+ '2a9141b18ffb0e6aed826054eec970b8'),
])
def test_ptr(self):
@@ -566,6 +791,52 @@ class TestCloudflareProvider(TestCase):
'content': 'foo.bar.com.'
}, list(ptr_record_contents)[0])
+ def test_loc(self):
+ self.maxDiff = None
+ provider = CloudflareProvider('test', 'email', 'token')
+
+ zone = Zone('unit.tests.', [])
+ # LOC record
+ loc_record = Record.new(zone, 'example', {
+ 'ttl': 300,
+ 'type': 'LOC',
+ 'value': {
+ 'lat_degrees': 31,
+ 'lat_minutes': 58,
+ 'lat_seconds': 52.1,
+ 'lat_direction': 'S',
+ 'long_degrees': 115,
+ 'long_minutes': 49,
+ 'long_seconds': 11.7,
+ 'long_direction': 'E',
+ 'altitude': 20,
+ 'size': 10,
+ 'precision_horz': 10,
+ 'precision_vert': 2,
+ }
+ })
+
+ loc_record_contents = provider._gen_data(loc_record)
+ self.assertEquals({
+ 'name': 'example.unit.tests',
+ 'ttl': 300,
+ 'type': 'LOC',
+ 'data': {
+ 'lat_degrees': 31,
+ 'lat_minutes': 58,
+ 'lat_seconds': 52.1,
+ 'lat_direction': 'S',
+ 'long_degrees': 115,
+ 'long_minutes': 49,
+ 'long_seconds': 11.7,
+ 'long_direction': 'E',
+ 'altitude': 20,
+ 'size': 10,
+ 'precision_horz': 10,
+ 'precision_vert': 2,
+ }
+ }, list(loc_record_contents)[0])
+
def test_srv(self):
provider = CloudflareProvider('test', 'email', 'token')
@@ -697,6 +968,23 @@ class TestCloudflareProvider(TestCase):
},
'type': 'SRV',
}),
+ ('31 58 52.1 S 115 49 11.7 E 20 10 10 2', {
+ 'data': {
+ 'lat_degrees': 31,
+ 'lat_minutes': 58,
+ 'lat_seconds': 52.1,
+ 'lat_direction': 'S',
+ 'long_degrees': 115,
+ 'long_minutes': 49,
+ 'long_seconds': 11.7,
+ 'long_direction': 'E',
+ 'altitude': 20,
+ 'size': 10,
+ 'precision_horz': 10,
+ 'precision_vert': 2,
+ },
+ 'type': 'LOC',
+ }),
):
self.assertEqual(expected, provider._gen_key(data))
@@ -1343,3 +1631,11 @@ class TestCloudflareProvider(TestCase):
with self.assertRaises(CloudflareRateLimitError) as ctx:
provider.zone_records(zone)
self.assertEquals('last', text_type(ctx.exception))
+
+ def test_ttl_mapping(self):
+ provider = CloudflareProvider('test', 'email', 'token')
+
+ self.assertEquals(120, provider._ttl_data(120))
+ self.assertEquals(120, provider._ttl_data(120))
+ self.assertEquals(3600, provider._ttl_data(3600))
+ self.assertEquals(300, provider._ttl_data(1))
diff --git a/tests/test_octodns_provider_constellix.py b/tests/test_octodns_provider_constellix.py
index 1ca3179..d15f611 100644
--- a/tests/test_octodns_provider_constellix.py
+++ b/tests/test_octodns_provider_constellix.py
@@ -172,7 +172,7 @@ class TestConstellixProvider(TestCase):
plan = provider.plan(self.expected)
# No root NS, no ignored, no excluded, no unsupported
- n = len(self.expected.records) - 5
+ n = len(self.expected.records) - 8
self.assertEquals(n, len(plan.changes))
self.assertEquals(n, provider.apply(plan))
diff --git a/tests/test_octodns_provider_digitalocean.py b/tests/test_octodns_provider_digitalocean.py
index ebb5319..9ed54bf 100644
--- a/tests/test_octodns_provider_digitalocean.py
+++ b/tests/test_octodns_provider_digitalocean.py
@@ -83,14 +83,14 @@ class TestDigitalOceanProvider(TestCase):
zone = Zone('unit.tests.', [])
provider.populate(zone)
- self.assertEquals(12, len(zone.records))
+ self.assertEquals(14, len(zone.records))
changes = self.expected.changes(zone, provider)
self.assertEquals(0, len(changes))
# 2nd populate makes no network calls/all from cache
again = Zone('unit.tests.', [])
provider.populate(again)
- self.assertEquals(12, len(again.records))
+ self.assertEquals(14, len(again.records))
# bust the cache
del provider._zone_records[zone.name]
@@ -163,7 +163,7 @@ class TestDigitalOceanProvider(TestCase):
plan = provider.plan(self.expected)
# No root NS, no ignored, no excluded, no unsupported
- n = len(self.expected.records) - 7
+ n = len(self.expected.records) - 10
self.assertEquals(n, len(plan.changes))
self.assertEquals(n, provider.apply(plan))
self.assertFalse(plan.exists)
@@ -190,6 +190,24 @@ class TestDigitalOceanProvider(TestCase):
'flags': 0, 'name': '@',
'tag': 'issue',
'ttl': 3600, 'type': 'CAA'}),
+ call('POST', '/domains/unit.tests/records', data={
+ 'name': '_imap._tcp',
+ 'weight': 0,
+ 'data': '.',
+ 'priority': 0,
+ 'ttl': 600,
+ 'type': 'SRV',
+ 'port': 0
+ }),
+ call('POST', '/domains/unit.tests/records', data={
+ 'name': '_pop3._tcp',
+ 'weight': 0,
+ 'data': '.',
+ 'priority': 0,
+ 'ttl': 600,
+ 'type': 'SRV',
+ 'port': 0
+ }),
call('POST', '/domains/unit.tests/records', data={
'name': '_srv._tcp',
'weight': 20,
@@ -200,7 +218,7 @@ class TestDigitalOceanProvider(TestCase):
'port': 30
}),
])
- self.assertEquals(24, provider._client._request.call_count)
+ self.assertEquals(26, provider._client._request.call_count)
provider._client._request.reset_mock()
diff --git a/tests/test_octodns_provider_dnsimple.py b/tests/test_octodns_provider_dnsimple.py
index b918962..0b8d209 100644
--- a/tests/test_octodns_provider_dnsimple.py
+++ b/tests/test_octodns_provider_dnsimple.py
@@ -137,7 +137,7 @@ class TestDnsimpleProvider(TestCase):
plan = provider.plan(self.expected)
# No root NS, no ignored, no excluded
- n = len(self.expected.records) - 3
+ n = len(self.expected.records) - 8
self.assertEquals(n, len(plan.changes))
self.assertEquals(n, provider.apply(plan))
self.assertFalse(plan.exists)
diff --git a/tests/test_octodns_provider_dnsmadeeasy.py b/tests/test_octodns_provider_dnsmadeeasy.py
index ba61b94..9efc81d 100644
--- a/tests/test_octodns_provider_dnsmadeeasy.py
+++ b/tests/test_octodns_provider_dnsmadeeasy.py
@@ -44,12 +44,6 @@ class TestDnsMadeEasyProvider(TestCase):
'value': 'aname.unit.tests.'
}))
- expected.add_record(Record.new(expected, 'sub', {
- 'ttl': 1800,
- 'type': 'ALIAS',
- 'value': 'aname.unit.tests.'
- }))
-
for record in list(expected.records):
if record.name == 'sub' and record._type == 'NS':
expected._remove_record(record)
@@ -108,14 +102,14 @@ class TestDnsMadeEasyProvider(TestCase):
zone = Zone('unit.tests.', [])
provider.populate(zone)
- self.assertEquals(15, len(zone.records))
+ self.assertEquals(14, len(zone.records))
changes = self.expected.changes(zone, provider)
self.assertEquals(0, len(changes))
# 2nd populate makes no network calls/all from cache
again = Zone('unit.tests.', [])
provider.populate(again)
- self.assertEquals(15, len(again.records))
+ self.assertEquals(14, len(again.records))
# bust the cache
del provider._zone_records[zone.name]
@@ -140,7 +134,7 @@ class TestDnsMadeEasyProvider(TestCase):
plan = provider.plan(self.expected)
# No root NS, no ignored, no excluded, no unsupported
- n = len(self.expected.records) - 5
+ n = len(self.expected.records) - 10
self.assertEquals(n, len(plan.changes))
self.assertEquals(n, provider.apply(plan))
@@ -180,7 +174,7 @@ class TestDnsMadeEasyProvider(TestCase):
'port': 30
}),
])
- self.assertEquals(27, provider._client._request.call_count)
+ self.assertEquals(26, provider._client._request.call_count)
provider._client._request.reset_mock()
diff --git a/tests/test_octodns_provider_easydns.py b/tests/test_octodns_provider_easydns.py
index 2681bf4..85492eb 100644
--- a/tests/test_octodns_provider_easydns.py
+++ b/tests/test_octodns_provider_easydns.py
@@ -80,14 +80,14 @@ class TestEasyDNSProvider(TestCase):
text=fh.read())
provider.populate(zone)
- self.assertEquals(13, len(zone.records))
+ self.assertEquals(15, len(zone.records))
changes = self.expected.changes(zone, provider)
self.assertEquals(0, len(changes))
# 2nd populate makes no network calls/all from cache
again = Zone('unit.tests.', [])
provider.populate(again)
- self.assertEquals(13, len(again.records))
+ self.assertEquals(15, len(again.records))
# bust the cache
del provider._zone_records[zone.name]
@@ -374,12 +374,12 @@ class TestEasyDNSProvider(TestCase):
plan = provider.plan(self.expected)
# No root NS, no ignored, no excluded, no unsupported
- n = len(self.expected.records) - 6
+ n = len(self.expected.records) - 9
self.assertEquals(n, len(plan.changes))
self.assertEquals(n, provider.apply(plan))
self.assertFalse(plan.exists)
- self.assertEquals(23, provider._client._request.call_count)
+ self.assertEquals(25, provider._client._request.call_count)
provider._client._request.reset_mock()
diff --git a/tests/test_octodns_provider_edgedns.py b/tests/test_octodns_provider_edgedns.py
index 20a9a07..694c762 100644
--- a/tests/test_octodns_provider_edgedns.py
+++ b/tests/test_octodns_provider_edgedns.py
@@ -77,14 +77,14 @@ class TestEdgeDnsProvider(TestCase):
zone = Zone('unit.tests.', [])
provider.populate(zone)
- self.assertEquals(16, len(zone.records))
+ self.assertEquals(18, len(zone.records))
changes = self.expected.changes(zone, provider)
self.assertEquals(0, len(changes))
# 2nd populate makes no network calls/all from cache
again = Zone('unit.tests.', [])
provider.populate(again)
- self.assertEquals(16, len(again.records))
+ self.assertEquals(18, len(again.records))
# bust the cache
del provider._zone_records[zone.name]
@@ -105,7 +105,7 @@ class TestEdgeDnsProvider(TestCase):
mock.delete(ANY, status_code=204)
changes = provider.apply(plan)
- self.assertEquals(29, changes)
+ self.assertEquals(31, changes)
# Test against a zone that doesn't exist yet
with requests_mock() as mock:
@@ -118,7 +118,7 @@ class TestEdgeDnsProvider(TestCase):
mock.delete(ANY, status_code=204)
changes = provider.apply(plan)
- self.assertEquals(14, changes)
+ self.assertEquals(16, changes)
# Test against a zone that doesn't exist yet, but gid not provided
with requests_mock() as mock:
@@ -132,7 +132,7 @@ class TestEdgeDnsProvider(TestCase):
mock.delete(ANY, status_code=204)
changes = provider.apply(plan)
- self.assertEquals(14, changes)
+ self.assertEquals(16, changes)
# Test against a zone that doesn't exist, but cid not provided
diff --git a/tests/test_octodns_provider_gandi.py b/tests/test_octodns_provider_gandi.py
new file mode 100644
index 0000000..f2e3028
--- /dev/null
+++ b/tests/test_octodns_provider_gandi.py
@@ -0,0 +1,377 @@
+#
+#
+#
+
+from __future__ import absolute_import, division, print_function, \
+ unicode_literals
+
+from mock import Mock, call
+from os.path import dirname, join
+from requests import HTTPError
+from requests_mock import ANY, mock as requests_mock
+from six import text_type
+from unittest import TestCase
+
+from octodns.record import Record
+from octodns.provider.gandi import GandiProvider, GandiClientBadRequest, \
+ GandiClientUnauthorized, GandiClientForbidden, GandiClientNotFound, \
+ GandiClientUnknownDomainName
+from octodns.provider.yaml import YamlProvider
+from octodns.zone import Zone
+
+
+class TestGandiProvider(TestCase):
+ expected = Zone('unit.tests.', [])
+ source = YamlProvider('test', join(dirname(__file__), 'config'))
+ source.populate(expected)
+
+ # We remove this record from the test zone as Gandi API reject it
+ # (rightfully).
+ expected._remove_record(Record.new(expected, 'sub', {
+ 'ttl': 1800,
+ 'type': 'NS',
+ 'values': [
+ '6.2.3.4.',
+ '7.2.3.4.'
+ ]
+ }))
+
+ def test_populate(self):
+
+ provider = GandiProvider('test_id', 'token')
+
+ # 400 - Bad Request.
+ with requests_mock() as mock:
+ mock.get(ANY, status_code=400,
+ text='{"status": "error", "errors": [{"location": '
+ '"body", "name": "items", "description": '
+ '"\'6.2.3.4.\': invalid hostname (param: '
+ '{\'rrset_type\': u\'NS\', \'rrset_ttl\': 3600, '
+ '\'rrset_name\': u\'sub\', \'rrset_values\': '
+ '[u\'6.2.3.4.\', u\'7.2.3.4.\']})"}, {"location": '
+ '"body", "name": "items", "description": '
+ '"\'7.2.3.4.\': invalid hostname (param: '
+ '{\'rrset_type\': u\'NS\', \'rrset_ttl\': 3600, '
+ '\'rrset_name\': u\'sub\', \'rrset_values\': '
+ '[u\'6.2.3.4.\', u\'7.2.3.4.\']})"}]}')
+
+ with self.assertRaises(GandiClientBadRequest) as ctx:
+ zone = Zone('unit.tests.', [])
+ provider.populate(zone)
+ self.assertIn('"status": "error"', text_type(ctx.exception))
+
+ # 401 - Unauthorized.
+ with requests_mock() as mock:
+ mock.get(ANY, status_code=401,
+ text='{"code":401,"message":"The server could not verify '
+ 'that you authorized to access the document you '
+ 'requested. Either you supplied the wrong '
+ 'credentials (e.g., bad api key), or your access '
+ 'token has expired","object":"HTTPUnauthorized",'
+ '"cause":"Unauthorized"}')
+
+ with self.assertRaises(GandiClientUnauthorized) as ctx:
+ zone = Zone('unit.tests.', [])
+ provider.populate(zone)
+ self.assertIn('"cause":"Unauthorized"', text_type(ctx.exception))
+
+ # 403 - Forbidden.
+ with requests_mock() as mock:
+ mock.get(ANY, status_code=403,
+ text='{"code":403,"message":"Access was denied to this '
+ 'resource.","object":"HTTPForbidden","cause":'
+ '"Forbidden"}')
+
+ with self.assertRaises(GandiClientForbidden) as ctx:
+ zone = Zone('unit.tests.', [])
+ provider.populate(zone)
+ self.assertIn('"cause":"Forbidden"', text_type(ctx.exception))
+
+ # 404 - Not Found.
+ with requests_mock() as mock:
+ mock.get(ANY, status_code=404,
+ text='{"code": 404, "message": "The resource could not '
+ 'be found.", "object": "HTTPNotFound", "cause": '
+ '"Not Found"}')
+
+ with self.assertRaises(GandiClientNotFound) as ctx:
+ zone = Zone('unit.tests.', [])
+ provider._client.zone(zone)
+ self.assertIn('"cause": "Not Found"', text_type(ctx.exception))
+
+ # General error
+ with requests_mock() as mock:
+ mock.get(ANY, status_code=502, text='Things caught fire')
+
+ with self.assertRaises(HTTPError) as ctx:
+ zone = Zone('unit.tests.', [])
+ provider.populate(zone)
+ self.assertEquals(502, ctx.exception.response.status_code)
+
+ # No diffs == no changes
+ with requests_mock() as mock:
+ base = 'https://api.gandi.net/v5/livedns/domains/unit.tests' \
+ '/records'
+ with open('tests/fixtures/gandi-no-changes.json') as fh:
+ mock.get(base, text=fh.read())
+
+ zone = Zone('unit.tests.', [])
+ provider.populate(zone)
+ self.assertEquals(16, len(zone.records))
+ changes = self.expected.changes(zone, provider)
+ self.assertEquals(0, len(changes))
+
+ del provider._zone_records[zone.name]
+
+ # Default Gandi zone file.
+ with requests_mock() as mock:
+ base = 'https://api.gandi.net/v5/livedns/domains/unit.tests' \
+ '/records'
+ with open('tests/fixtures/gandi-records.json') as fh:
+ mock.get(base, text=fh.read())
+
+ zone = Zone('unit.tests.', [])
+ provider.populate(zone)
+ self.assertEquals(11, len(zone.records))
+ changes = self.expected.changes(zone, provider)
+ self.assertEquals(24, len(changes))
+
+ # 2nd populate makes no network calls/all from cache
+ again = Zone('unit.tests.', [])
+ provider.populate(again)
+ self.assertEquals(11, len(again.records))
+
+ # bust the cache
+ del provider._zone_records[zone.name]
+
+ def test_apply(self):
+ provider = GandiProvider('test_id', 'token')
+
+ # Zone does not exists but can be created.
+ with requests_mock() as mock:
+ mock.get(ANY, status_code=404,
+ text='{"code": 404, "message": "The resource could not '
+ 'be found.", "object": "HTTPNotFound", "cause": '
+ '"Not Found"}')
+ mock.post(ANY, status_code=201,
+ text='{"message": "Domain Created"}')
+
+ plan = provider.plan(self.expected)
+ provider.apply(plan)
+
+ # Zone does not exists and can't be created.
+ with requests_mock() as mock:
+ mock.get(ANY, status_code=404,
+ text='{"code": 404, "message": "The resource could not '
+ 'be found.", "object": "HTTPNotFound", "cause": '
+ '"Not Found"}')
+ mock.post(ANY, status_code=404,
+ text='{"code": 404, "message": "The resource could not '
+ 'be found.", "object": "HTTPNotFound", "cause": '
+ '"Not Found"}')
+
+ with self.assertRaises((GandiClientNotFound,
+ GandiClientUnknownDomainName)) as ctx:
+ plan = provider.plan(self.expected)
+ provider.apply(plan)
+ self.assertIn('This domain is not registered at Gandi.',
+ text_type(ctx.exception))
+
+ resp = Mock()
+ resp.json = Mock()
+ provider._client._request = Mock(return_value=resp)
+
+ with open('tests/fixtures/gandi-zone.json') as fh:
+ zone = fh.read()
+
+ # non-existent domain
+ resp.json.side_effect = [
+ GandiClientNotFound(resp), # no zone in populate
+ GandiClientNotFound(resp), # no domain during apply
+ zone
+ ]
+ plan = provider.plan(self.expected)
+
+ # No root NS, no ignored, no excluded, no LOC
+ n = len(self.expected.records) - 6
+ self.assertEquals(n, len(plan.changes))
+ self.assertEquals(n, provider.apply(plan))
+ self.assertFalse(plan.exists)
+
+ provider._client._request.assert_has_calls([
+ call('GET', '/livedns/domains/unit.tests/records'),
+ call('GET', '/livedns/domains/unit.tests'),
+ call('POST', '/livedns/domains', data={
+ 'fqdn': 'unit.tests',
+ 'zone': {}
+ }),
+ call('POST', '/livedns/domains/unit.tests/records', data={
+ 'rrset_name': 'www.sub',
+ 'rrset_ttl': 300,
+ 'rrset_type': 'A',
+ 'rrset_values': ['2.2.3.6']
+ }),
+ call('POST', '/livedns/domains/unit.tests/records', data={
+ 'rrset_name': 'www',
+ 'rrset_ttl': 300,
+ 'rrset_type': 'A',
+ 'rrset_values': ['2.2.3.6']
+ }),
+ call('POST', '/livedns/domains/unit.tests/records', data={
+ 'rrset_name': 'txt',
+ 'rrset_ttl': 600,
+ 'rrset_type': 'TXT',
+ 'rrset_values': [
+ 'Bah bah black sheep',
+ 'have you any wool.',
+ 'v=DKIM1;k=rsa;s=email;h=sha256;p=A/kinda+of/long/string'
+ '+with+numb3rs'
+ ]
+ }),
+ call('POST', '/livedns/domains/unit.tests/records', data={
+ 'rrset_name': 'spf',
+ 'rrset_ttl': 600,
+ 'rrset_type': 'SPF',
+ 'rrset_values': ['v=spf1 ip4:192.168.0.1/16-all']
+ }),
+ call('POST', '/livedns/domains/unit.tests/records', data={
+ 'rrset_name': 'ptr',
+ 'rrset_ttl': 300,
+ 'rrset_type': 'PTR',
+ 'rrset_values': ['foo.bar.com.']
+ }),
+ call('POST', '/livedns/domains/unit.tests/records', data={
+ 'rrset_name': 'mx',
+ 'rrset_ttl': 300,
+ 'rrset_type': 'MX',
+ 'rrset_values': [
+ '10 smtp-4.unit.tests.',
+ '20 smtp-2.unit.tests.',
+ '30 smtp-3.unit.tests.',
+ '40 smtp-1.unit.tests.'
+ ]
+ }),
+ call('POST', '/livedns/domains/unit.tests/records', data={
+ 'rrset_name': 'excluded',
+ 'rrset_ttl': 3600,
+ 'rrset_type': 'CNAME',
+ 'rrset_values': ['unit.tests.']
+ }),
+ call('POST', '/livedns/domains/unit.tests/records', data={
+ 'rrset_name': 'dname',
+ 'rrset_ttl': 300,
+ 'rrset_type': 'DNAME',
+ 'rrset_values': ['unit.tests.']
+ }),
+ call('POST', '/livedns/domains/unit.tests/records', data={
+ 'rrset_name': 'cname',
+ 'rrset_ttl': 300,
+ 'rrset_type': 'CNAME',
+ 'rrset_values': ['unit.tests.']
+ }),
+ call('POST', '/livedns/domains/unit.tests/records', data={
+ 'rrset_name': 'aaaa',
+ 'rrset_ttl': 600,
+ 'rrset_type': 'AAAA',
+ 'rrset_values': ['2601:644:500:e210:62f8:1dff:feb8:947a']
+ }),
+ call('POST', '/livedns/domains/unit.tests/records', data={
+ 'rrset_name': '_srv._tcp',
+ 'rrset_ttl': 600,
+ 'rrset_type': 'SRV',
+ 'rrset_values': [
+ '10 20 30 foo-1.unit.tests.',
+ '12 20 30 foo-2.unit.tests.'
+ ]
+ }),
+ call('POST', '/livedns/domains/unit.tests/records', data={
+ 'rrset_name': '_pop3._tcp',
+ 'rrset_ttl': 600,
+ 'rrset_type': 'SRV',
+ 'rrset_values': [
+ '0 0 0 .',
+ ]
+ }),
+ call('POST', '/livedns/domains/unit.tests/records', data={
+ 'rrset_name': '_imap._tcp',
+ 'rrset_ttl': 600,
+ 'rrset_type': 'SRV',
+ 'rrset_values': [
+ '0 0 0 .',
+ ]
+ }),
+ call('POST', '/livedns/domains/unit.tests/records', data={
+ 'rrset_name': '@',
+ 'rrset_ttl': 3600,
+ 'rrset_type': 'SSHFP',
+ 'rrset_values': [
+ '1 1 7491973e5f8b39d5327cd4e08bc81b05f7710b49',
+ '1 1 bf6b6825d2977c511a475bbefb88aad54a92ac73'
+ ]
+ }),
+ call('POST', '/livedns/domains/unit.tests/records', data={
+ 'rrset_name': '@',
+ 'rrset_ttl': 3600,
+ 'rrset_type': 'CAA',
+ 'rrset_values': ['0 issue "ca.unit.tests"']
+ }),
+ call('POST', '/livedns/domains/unit.tests/records', data={
+ 'rrset_name': '@',
+ 'rrset_ttl': 300,
+ 'rrset_type': 'A',
+ 'rrset_values': ['1.2.3.4', '1.2.3.5']
+ })
+ ])
+ # expected number of total calls
+ self.assertEquals(19, provider._client._request.call_count)
+
+ provider._client._request.reset_mock()
+
+ # delete 1 and update 1
+ provider._client.zone_records = Mock(return_value=[
+ {
+ 'rrset_name': 'www',
+ 'rrset_ttl': 300,
+ 'rrset_type': 'A',
+ 'rrset_values': ['1.2.3.4']
+ },
+ {
+ 'rrset_name': 'www',
+ 'rrset_ttl': 300,
+ 'rrset_type': 'A',
+ 'rrset_values': ['2.2.3.4']
+ },
+ {
+ 'rrset_name': 'ttl',
+ 'rrset_ttl': 600,
+ 'rrset_type': 'A',
+ 'rrset_values': ['3.2.3.4']
+ }
+ ])
+
+ # Domain exists, we don't care about return
+ resp.json.side_effect = ['{}']
+
+ wanted = Zone('unit.tests.', [])
+ wanted.add_record(Record.new(wanted, 'ttl', {
+ 'ttl': 300,
+ 'type': 'A',
+ 'value': '3.2.3.4'
+ }))
+
+ plan = provider.plan(wanted)
+ self.assertTrue(plan.exists)
+ self.assertEquals(2, len(plan.changes))
+ self.assertEquals(2, provider.apply(plan))
+
+ # recreate for update, and deletes for the 2 parts of the other
+ provider._client._request.assert_has_calls([
+ call('DELETE', '/livedns/domains/unit.tests/records/www/A'),
+ call('DELETE', '/livedns/domains/unit.tests/records/ttl/A'),
+ call('POST', '/livedns/domains/unit.tests/records', data={
+ 'rrset_name': 'ttl',
+ 'rrset_ttl': 300,
+ 'rrset_type': 'A',
+ 'rrset_values': ['3.2.3.4']
+ })
+ ], any_order=True)
diff --git a/tests/test_octodns_provider_gcore.py b/tests/test_octodns_provider_gcore.py
new file mode 100644
index 0000000..2151440
--- /dev/null
+++ b/tests/test_octodns_provider_gcore.py
@@ -0,0 +1,672 @@
+#
+#
+#
+
+from __future__ import (
+ absolute_import,
+ division,
+ print_function,
+ unicode_literals,
+)
+
+from mock import Mock, call
+from os.path import dirname, join
+from requests_mock import ANY, mock as requests_mock
+from six import text_type
+from unittest import TestCase
+
+from octodns.record import Record, Update, Delete, Create
+from octodns.provider.gcore import (
+ GCoreProvider,
+ GCoreClientBadRequest,
+ GCoreClientNotFound,
+ GCoreClientException,
+)
+from octodns.provider.yaml import YamlProvider
+from octodns.zone import Zone
+
+
+class TestGCoreProvider(TestCase):
+ expected = Zone("unit.tests.", [])
+ source = YamlProvider("test", join(dirname(__file__), "config"))
+ source.populate(expected)
+
+ default_filters = [
+ {"type": "geodns"},
+ {
+ "type": "default",
+ "limit": 1,
+ "strict": False,
+ },
+ {"type": "first_n", "limit": 1},
+ ]
+
+ def test_populate(self):
+
+ provider = GCoreProvider("test_id", token="token")
+
+ # TC: 400 - Bad Request.
+ with requests_mock() as mock:
+ mock.get(ANY, status_code=400, text='{"error":"bad body"}')
+
+ with self.assertRaises(GCoreClientBadRequest) as ctx:
+ zone = Zone("unit.tests.", [])
+ provider.populate(zone)
+ self.assertIn('"error":"bad body"', text_type(ctx.exception))
+
+ # TC: 404 - Not Found.
+ with requests_mock() as mock:
+ mock.get(
+ ANY, status_code=404, text='{"error":"zone is not found"}'
+ )
+
+ with self.assertRaises(GCoreClientNotFound) as ctx:
+ zone = Zone("unit.tests.", [])
+ provider._client.zone(zone.name)
+ self.assertIn(
+ '"error":"zone is not found"', text_type(ctx.exception)
+ )
+
+ # TC: General error
+ with requests_mock() as mock:
+ mock.get(ANY, status_code=500, text="Things caught fire")
+
+ with self.assertRaises(GCoreClientException) as ctx:
+ zone = Zone("unit.tests.", [])
+ provider.populate(zone)
+ self.assertEqual("Things caught fire", text_type(ctx.exception))
+
+ # TC: No credentials or token error
+ with requests_mock() as mock:
+ with self.assertRaises(ValueError) as ctx:
+ GCoreProvider("test_id")
+ self.assertEqual(
+ "either token or login & password must be set",
+ text_type(ctx.exception),
+ )
+
+ # TC: Auth with login password
+ with requests_mock() as mock:
+
+ def match_body(request):
+ return {"username": "foo", "password": "bar"} == request.json()
+
+ auth_url = "http://api/auth/jwt/login"
+ mock.post(
+ auth_url,
+ additional_matcher=match_body,
+ status_code=200,
+ json={"access": "access"},
+ )
+
+ providerPassword = GCoreProvider(
+ "test_id",
+ url="http://dns",
+ auth_url="http://api",
+ login="foo",
+ password="bar",
+ )
+ assert mock.called
+
+ # make sure token passed in header
+ zone_rrset_url = "http://dns/zones/unit.tests/rrsets?all=true"
+ mock.get(
+ zone_rrset_url,
+ request_headers={"Authorization": "Bearer access"},
+ status_code=404,
+ )
+ zone = Zone("unit.tests.", [])
+ assert not providerPassword.populate(zone)
+
+ # TC: No diffs == no changes
+ with requests_mock() as mock:
+ base = "https://dnsapi.gcorelabs.com/v2/zones/unit.tests/rrsets"
+ with open("tests/fixtures/gcore-no-changes.json") as fh:
+ mock.get(base, text=fh.read())
+
+ zone = Zone("unit.tests.", [])
+ provider.populate(zone)
+ self.assertEqual(14, len(zone.records))
+ self.assertEqual(
+ {
+ "",
+ "_imap._tcp",
+ "_pop3._tcp",
+ "_srv._tcp",
+ "aaaa",
+ "cname",
+ "excluded",
+ "mx",
+ "ptr",
+ "sub",
+ "txt",
+ "www",
+ "www.sub",
+ },
+ {r.name for r in zone.records},
+ )
+ changes = self.expected.changes(zone, provider)
+ self.assertEqual(0, len(changes))
+
+ # TC: 4 create (dynamic) + 1 removed + 7 modified
+ with requests_mock() as mock:
+ base = "https://dnsapi.gcorelabs.com/v2/zones/unit.tests/rrsets"
+ with open("tests/fixtures/gcore-records.json") as fh:
+ mock.get(base, text=fh.read())
+
+ zone = Zone("unit.tests.", [])
+ provider.populate(zone)
+ self.assertEqual(16, len(zone.records))
+ changes = self.expected.changes(zone, provider)
+ self.assertEqual(11, len(changes))
+ self.assertEqual(
+ 3, len([c for c in changes if isinstance(c, Create)])
+ )
+ self.assertEqual(
+ 1, len([c for c in changes if isinstance(c, Delete)])
+ )
+ self.assertEqual(
+ 7, len([c for c in changes if isinstance(c, Update)])
+ )
+
+ # TC: no pools can be built
+ with requests_mock() as mock:
+ base = "https://dnsapi.gcorelabs.com/v2/zones/unit.tests/rrsets"
+ mock.get(
+ base,
+ json={
+ "rrsets": [
+ {
+ "name": "unit.tests.",
+ "type": "A",
+ "ttl": 300,
+ "filters": self.default_filters,
+ "resource_records": [{"content": ["7.7.7.7"]}],
+ }
+ ]
+ },
+ )
+
+ zone = Zone("unit.tests.", [])
+ with self.assertRaises(RuntimeError) as ctx:
+ provider.populate(zone)
+
+ self.assertTrue(
+ str(ctx.exception).startswith(
+ "filter is enabled, but no pools where built for"
+ ),
+ "{} - is not start from desired text".format(ctx.exception),
+ )
+
+ def test_apply(self):
+ provider = GCoreProvider("test_id", url="http://api", token="token")
+
+ # TC: Zone does not exists but can be created.
+ with requests_mock() as mock:
+ mock.get(
+ ANY, status_code=404, text='{"error":"zone is not found"}'
+ )
+ mock.post(ANY, status_code=200, text='{"id":1234}')
+
+ plan = provider.plan(self.expected)
+ provider.apply(plan)
+
+ # TC: Zone does not exists and can't be created.
+ with requests_mock() as mock:
+ mock.get(
+ ANY, status_code=404, text='{"error":"zone is not found"}'
+ )
+ mock.post(
+ ANY,
+ status_code=400,
+ text='{"error":"parent zone is already'
+ ' occupied by another client"}',
+ )
+
+ with self.assertRaises(
+ (GCoreClientNotFound, GCoreClientBadRequest)
+ ) as ctx:
+ plan = provider.plan(self.expected)
+ provider.apply(plan)
+ self.assertIn(
+ "parent zone is already occupied by another client",
+ text_type(ctx.exception),
+ )
+
+ resp = Mock()
+ resp.json = Mock()
+ provider._client._request = Mock(return_value=resp)
+
+ with open("tests/fixtures/gcore-zone.json") as fh:
+ zone = fh.read()
+
+ # non-existent domain
+ resp.json.side_effect = [
+ GCoreClientNotFound(resp), # no zone in populate
+ GCoreClientNotFound(resp), # no domain during apply
+ zone,
+ ]
+ plan = provider.plan(self.expected)
+
+ # TC: create all
+ self.assertEqual(13, len(plan.changes))
+ self.assertEqual(13, provider.apply(plan))
+ self.assertFalse(plan.exists)
+
+ provider._client._request.assert_has_calls(
+ [
+ call(
+ "GET",
+ "http://api/zones/unit.tests/rrsets",
+ params={"all": "true"},
+ ),
+ call("GET", "http://api/zones/unit.tests"),
+ call("POST", "http://api/zones", data={"name": "unit.tests"}),
+ call(
+ "POST",
+ "http://api/zones/unit.tests/www.sub.unit.tests./A",
+ data={
+ "ttl": 300,
+ "resource_records": [{"content": ["2.2.3.6"]}],
+ },
+ ),
+ call(
+ "POST",
+ "http://api/zones/unit.tests/www.unit.tests./A",
+ data={
+ "ttl": 300,
+ "resource_records": [{"content": ["2.2.3.6"]}],
+ },
+ ),
+ call(
+ "POST",
+ "http://api/zones/unit.tests/txt.unit.tests./TXT",
+ data={
+ "ttl": 600,
+ "resource_records": [
+ {"content": ["Bah bah black sheep"]},
+ {"content": ["have you any wool."]},
+ {
+ "content": [
+ "v=DKIM1;k=rsa;s=email;h=sha256;p=A/kinda+"
+ "of/long/string+with+numb3rs"
+ ]
+ },
+ ],
+ },
+ ),
+ call(
+ "POST",
+ "http://api/zones/unit.tests/sub.unit.tests./NS",
+ data={
+ "ttl": 3600,
+ "resource_records": [
+ {"content": ["6.2.3.4."]},
+ {"content": ["7.2.3.4."]},
+ ],
+ },
+ ),
+ call(
+ "POST",
+ "http://api/zones/unit.tests/ptr.unit.tests./PTR",
+ data={
+ "ttl": 300,
+ "resource_records": [
+ {"content": ["foo.bar.com."]},
+ ],
+ },
+ ),
+ call(
+ "POST",
+ "http://api/zones/unit.tests/mx.unit.tests./MX",
+ data={
+ "ttl": 300,
+ "resource_records": [
+ {"content": [10, "smtp-4.unit.tests."]},
+ {"content": [20, "smtp-2.unit.tests."]},
+ {"content": [30, "smtp-3.unit.tests."]},
+ {"content": [40, "smtp-1.unit.tests."]},
+ ],
+ },
+ ),
+ call(
+ "POST",
+ "http://api/zones/unit.tests/excluded.unit.tests./CNAME",
+ data={
+ "ttl": 3600,
+ "resource_records": [{"content": ["unit.tests."]}],
+ },
+ ),
+ call(
+ "POST",
+ "http://api/zones/unit.tests/cname.unit.tests./CNAME",
+ data={
+ "ttl": 300,
+ "resource_records": [{"content": ["unit.tests."]}],
+ },
+ ),
+ call(
+ "POST",
+ "http://api/zones/unit.tests/aaaa.unit.tests./AAAA",
+ data={
+ "ttl": 600,
+ "resource_records": [
+ {
+ "content": [
+ "2601:644:500:e210:62f8:1dff:feb8:947a"
+ ]
+ }
+ ],
+ },
+ ),
+ call(
+ "POST",
+ "http://api/zones/unit.tests/_srv._tcp.unit.tests./SRV",
+ data={
+ "ttl": 600,
+ "resource_records": [
+ {"content": [10, 20, 30, "foo-1.unit.tests."]},
+ {"content": [12, 20, 30, "foo-2.unit.tests."]},
+ ],
+ },
+ ),
+ call(
+ "POST",
+ "http://api/zones/unit.tests/_pop3._tcp.unit.tests./SRV",
+ data={
+ "ttl": 600,
+ "resource_records": [{"content": [0, 0, 0, "."]}],
+ },
+ ),
+ call(
+ "POST",
+ "http://api/zones/unit.tests/_imap._tcp.unit.tests./SRV",
+ data={
+ "ttl": 600,
+ "resource_records": [{"content": [0, 0, 0, "."]}],
+ },
+ ),
+ call(
+ "POST",
+ "http://api/zones/unit.tests/unit.tests./A",
+ data={
+ "ttl": 300,
+ "resource_records": [
+ {"content": ["1.2.3.4"]},
+ {"content": ["1.2.3.5"]},
+ ],
+ },
+ ),
+ ]
+ )
+ # expected number of total calls
+ self.assertEqual(16, provider._client._request.call_count)
+
+ # TC: delete 1 and update 1
+ provider._client._request.reset_mock()
+ provider._client.zone_records = Mock(
+ return_value=[
+ {
+ "name": "www",
+ "ttl": 300,
+ "type": "A",
+ "resource_records": [{"content": ["1.2.3.4"]}],
+ },
+ {
+ "name": "ttl",
+ "ttl": 600,
+ "type": "A",
+ "resource_records": [{"content": ["3.2.3.4"]}],
+ },
+ ]
+ )
+
+ # Domain exists, we don't care about return
+ resp.json.side_effect = ["{}"]
+
+ wanted = Zone("unit.tests.", [])
+ wanted.add_record(
+ Record.new(
+ wanted, "ttl", {"ttl": 300, "type": "A", "value": "3.2.3.4"}
+ )
+ )
+
+ plan = provider.plan(wanted)
+ self.assertTrue(plan.exists)
+ self.assertEqual(2, len(plan.changes))
+ self.assertEqual(2, provider.apply(plan))
+
+ provider._client._request.assert_has_calls(
+ [
+ call(
+ "DELETE", "http://api/zones/unit.tests/www.unit.tests./A"
+ ),
+ call(
+ "PUT",
+ "http://api/zones/unit.tests/ttl.unit.tests./A",
+ data={
+ "ttl": 300,
+ "resource_records": [{"content": ["3.2.3.4"]}],
+ },
+ ),
+ ]
+ )
+
+ # TC: create dynamics
+ provider._client._request.reset_mock()
+ provider._client.zone_records = Mock(return_value=[])
+
+ # Domain exists, we don't care about return
+ resp.json.side_effect = ["{}"]
+
+ wanted = Zone("unit.tests.", [])
+ wanted.add_record(
+ Record.new(
+ wanted,
+ "geo-simple",
+ {
+ "ttl": 300,
+ "type": "A",
+ "value": "3.3.3.3",
+ "dynamic": {
+ "pools": {
+ "pool-1": {
+ "fallback": "other",
+ "values": [
+ {"value": "1.1.1.1"},
+ {"value": "1.1.1.2"},
+ ],
+ },
+ "pool-2": {
+ "fallback": "other",
+ "values": [
+ {"value": "2.2.2.1"},
+ ],
+ },
+ "other": {"values": [{"value": "3.3.3.3"}]},
+ },
+ "rules": [
+ {"pool": "pool-1", "geos": ["EU-RU"]},
+ {"pool": "pool-2", "geos": ["EU"]},
+ {"pool": "other"},
+ ],
+ },
+ },
+ ),
+ )
+ wanted.add_record(
+ Record.new(
+ wanted,
+ "geo-defaults",
+ {
+ "ttl": 300,
+ "type": "A",
+ "value": "3.2.3.4",
+ "dynamic": {
+ "pools": {
+ "pool-1": {
+ "values": [
+ {"value": "2.2.2.1"},
+ ],
+ },
+ },
+ "rules": [
+ {"pool": "pool-1", "geos": ["EU"]},
+ ],
+ },
+ },
+ ),
+ )
+ wanted.add_record(
+ Record.new(
+ wanted,
+ "cname-smpl",
+ {
+ "ttl": 300,
+ "type": "CNAME",
+ "value": "en.unit.tests.",
+ "dynamic": {
+ "pools": {
+ "pool-1": {
+ "fallback": "other",
+ "values": [
+ {"value": "ru-1.unit.tests."},
+ {"value": "ru-2.unit.tests."},
+ ],
+ },
+ "pool-2": {
+ "fallback": "other",
+ "values": [
+ {"value": "eu.unit.tests."},
+ ],
+ },
+ "other": {"values": [{"value": "en.unit.tests."}]},
+ },
+ "rules": [
+ {"pool": "pool-1", "geos": ["EU-RU"]},
+ {"pool": "pool-2", "geos": ["EU"]},
+ {"pool": "other"},
+ ],
+ },
+ },
+ ),
+ )
+ wanted.add_record(
+ Record.new(
+ wanted,
+ "cname-dflt",
+ {
+ "ttl": 300,
+ "type": "CNAME",
+ "value": "en.unit.tests.",
+ "dynamic": {
+ "pools": {
+ "pool-1": {
+ "values": [
+ {"value": "eu.unit.tests."},
+ ],
+ },
+ },
+ "rules": [
+ {"pool": "pool-1", "geos": ["EU"]},
+ ],
+ },
+ },
+ ),
+ )
+
+ plan = provider.plan(wanted)
+ self.assertTrue(plan.exists)
+ self.assertEqual(4, len(plan.changes))
+ self.assertEqual(4, provider.apply(plan))
+
+ provider._client._request.assert_has_calls(
+ [
+ call(
+ "POST",
+ "http://api/zones/unit.tests/geo-simple.unit.tests./A",
+ data={
+ "ttl": 300,
+ "filters": self.default_filters,
+ "resource_records": [
+ {
+ "content": ["1.1.1.1"],
+ "meta": {"countries": ["RU"]},
+ },
+ {
+ "content": ["1.1.1.2"],
+ "meta": {"countries": ["RU"]},
+ },
+ {
+ "content": ["2.2.2.1"],
+ "meta": {"continents": ["EU"]},
+ },
+ {
+ "content": ["3.3.3.3"],
+ "meta": {"default": True},
+ },
+ ],
+ },
+ ),
+ call(
+ "POST",
+ "http://api/zones/unit.tests/geo-defaults.unit.tests./A",
+ data={
+ "ttl": 300,
+ "filters": self.default_filters,
+ "resource_records": [
+ {
+ "content": ["2.2.2.1"],
+ "meta": {"continents": ["EU"]},
+ },
+ {
+ "content": ["3.2.3.4"],
+ },
+ ],
+ },
+ ),
+ call(
+ "POST",
+ "http://api/zones/unit.tests/cname-smpl.unit.tests./CNAME",
+ data={
+ "ttl": 300,
+ "filters": self.default_filters,
+ "resource_records": [
+ {
+ "content": ["ru-1.unit.tests."],
+ "meta": {"countries": ["RU"]},
+ },
+ {
+ "content": ["ru-2.unit.tests."],
+ "meta": {"countries": ["RU"]},
+ },
+ {
+ "content": ["eu.unit.tests."],
+ "meta": {"continents": ["EU"]},
+ },
+ {
+ "content": ["en.unit.tests."],
+ "meta": {"default": True},
+ },
+ ],
+ },
+ ),
+ call(
+ "POST",
+ "http://api/zones/unit.tests/cname-dflt.unit.tests./CNAME",
+ data={
+ "ttl": 300,
+ "filters": self.default_filters,
+ "resource_records": [
+ {
+ "content": ["eu.unit.tests."],
+ "meta": {"continents": ["EU"]},
+ },
+ {
+ "content": ["en.unit.tests."],
+ },
+ ],
+ },
+ ),
+ ]
+ )
diff --git a/tests/test_octodns_provider_hetzner.py b/tests/test_octodns_provider_hetzner.py
new file mode 100644
index 0000000..218a6b2
--- /dev/null
+++ b/tests/test_octodns_provider_hetzner.py
@@ -0,0 +1,341 @@
+#
+#
+#
+
+
+from __future__ import absolute_import, division, print_function, \
+ unicode_literals
+
+from mock import Mock, call
+from os.path import dirname, join
+from requests import HTTPError
+from requests_mock import ANY, mock as requests_mock
+from six import text_type
+from unittest import TestCase
+
+from octodns.record import Record
+from octodns.provider.hetzner import HetznerClientNotFound, \
+ HetznerProvider
+from octodns.provider.yaml import YamlProvider
+from octodns.zone import Zone
+
+
+class TestHetznerProvider(TestCase):
+ expected = Zone('unit.tests.', [])
+ source = YamlProvider('test', join(dirname(__file__), 'config'))
+ source.populate(expected)
+
+ def test_populate(self):
+ provider = HetznerProvider('test', 'token')
+
+ # Bad auth
+ with requests_mock() as mock:
+ mock.get(ANY, status_code=401,
+ text='{"message":"Invalid authentication credentials"}')
+
+ with self.assertRaises(Exception) as ctx:
+ zone = Zone('unit.tests.', [])
+ provider.populate(zone)
+ self.assertEquals('Unauthorized', text_type(ctx.exception))
+
+ # General error
+ with requests_mock() as mock:
+ mock.get(ANY, status_code=502, text='Things caught fire')
+
+ with self.assertRaises(HTTPError) as ctx:
+ zone = Zone('unit.tests.', [])
+ provider.populate(zone)
+ self.assertEquals(502, ctx.exception.response.status_code)
+
+ # Non-existent zone doesn't populate anything
+ with requests_mock() as mock:
+ mock.get(ANY, status_code=404,
+ text='{"zone":{"id":"","name":"","ttl":0,"registrar":"",'
+ '"legacy_dns_host":"","legacy_ns":null,"ns":null,'
+ '"created":"","verified":"","modified":"","project":"",'
+ '"owner":"","permission":"","zone_type":{"id":"",'
+ '"name":"","description":"","prices":null},"status":"",'
+ '"paused":false,"is_secondary_dns":false,'
+ '"txt_verification":{"name":"","token":""},'
+ '"records_count":0},"error":{'
+ '"message":"zone not found","code":404}}')
+
+ zone = Zone('unit.tests.', [])
+ provider.populate(zone)
+ self.assertEquals(set(), zone.records)
+
+ # No diffs == no changes
+ with requests_mock() as mock:
+ base = provider._client.BASE_URL
+ with open('tests/fixtures/hetzner-zones.json') as fh:
+ mock.get('{}/zones'.format(base), text=fh.read())
+ with open('tests/fixtures/hetzner-records.json') as fh:
+ mock.get('{}/records'.format(base), text=fh.read())
+
+ zone = Zone('unit.tests.', [])
+ provider.populate(zone)
+ self.assertEquals(13, len(zone.records))
+ changes = self.expected.changes(zone, provider)
+ self.assertEquals(0, len(changes))
+
+ # 2nd populate makes no network calls/all from cache
+ again = Zone('unit.tests.', [])
+ provider.populate(again)
+ self.assertEquals(13, len(again.records))
+
+ # bust the cache
+ del provider._zone_records[zone.name]
+
+ def test_apply(self):
+ provider = HetznerProvider('test', 'token')
+
+ resp = Mock()
+ resp.json = Mock()
+ provider._client._do = Mock(return_value=resp)
+
+ domain_after_creation = {'zone': {
+ 'id': 'unit.tests',
+ 'name': 'unit.tests',
+ 'ttl': 3600,
+ }}
+
+ # non-existent domain, create everything
+ resp.json.side_effect = [
+ HetznerClientNotFound, # no zone in populate
+ HetznerClientNotFound, # no zone during apply
+ domain_after_creation,
+ ]
+ plan = provider.plan(self.expected)
+
+ # No root NS, no ignored, no excluded, no unsupported
+ n = len(self.expected.records) - 10
+ self.assertEquals(n, len(plan.changes))
+ self.assertEquals(n, provider.apply(plan))
+ self.assertFalse(plan.exists)
+
+ provider._client._do.assert_has_calls([
+ # created the zone
+ call('POST', '/zones', None, {
+ 'name': 'unit.tests',
+ 'ttl': None,
+ }),
+ # created all the records with their expected data
+ call('POST', '/records', data={
+ 'name': '@',
+ 'ttl': 300,
+ 'type': 'A',
+ 'value': '1.2.3.4',
+ 'zone_id': 'unit.tests',
+ }),
+ call('POST', '/records', data={
+ 'name': '@',
+ 'ttl': 300,
+ 'type': 'A',
+ 'value': '1.2.3.5',
+ 'zone_id': 'unit.tests',
+ }),
+ call('POST', '/records', data={
+ 'name': '@',
+ 'ttl': 3600,
+ 'type': 'CAA',
+ 'value': '0 issue "ca.unit.tests"',
+ 'zone_id': 'unit.tests',
+ }),
+ call('POST', '/records', data={
+ 'name': '_imap._tcp',
+ 'ttl': 600,
+ 'type': 'SRV',
+ 'value': '0 0 0 .',
+ 'zone_id': 'unit.tests',
+ }),
+ call('POST', '/records', data={
+ 'name': '_pop3._tcp',
+ 'ttl': 600,
+ 'type': 'SRV',
+ 'value': '0 0 0 .',
+ 'zone_id': 'unit.tests',
+ }),
+ call('POST', '/records', data={
+ 'name': '_srv._tcp',
+ 'ttl': 600,
+ 'type': 'SRV',
+ 'value': '10 20 30 foo-1.unit.tests.',
+ 'zone_id': 'unit.tests',
+ }),
+ call('POST', '/records', data={
+ 'name': '_srv._tcp',
+ 'ttl': 600,
+ 'type': 'SRV',
+ 'value': '12 20 30 foo-2.unit.tests.',
+ 'zone_id': 'unit.tests',
+ }),
+ call('POST', '/records', data={
+ 'name': 'aaaa',
+ 'ttl': 600,
+ 'type': 'AAAA',
+ 'value': '2601:644:500:e210:62f8:1dff:feb8:947a',
+ 'zone_id': 'unit.tests',
+ }),
+ call('POST', '/records', data={
+ 'name': 'cname',
+ 'ttl': 300,
+ 'type': 'CNAME',
+ 'value': 'unit.tests.',
+ 'zone_id': 'unit.tests',
+ }),
+ call('POST', '/records', data={
+ 'name': 'included',
+ 'ttl': 3600,
+ 'type': 'CNAME',
+ 'value': 'unit.tests.',
+ 'zone_id': 'unit.tests',
+ }),
+ call('POST', '/records', data={
+ 'name': 'mx',
+ 'ttl': 300,
+ 'type': 'MX',
+ 'value': '10 smtp-4.unit.tests.',
+ 'zone_id': 'unit.tests',
+ }),
+ call('POST', '/records', data={
+ 'name': 'mx',
+ 'ttl': 300,
+ 'type': 'MX',
+ 'value': '20 smtp-2.unit.tests.',
+ 'zone_id': 'unit.tests',
+ }),
+ call('POST', '/records', data={
+ 'name': 'mx',
+ 'ttl': 300,
+ 'type': 'MX',
+ 'value': '30 smtp-3.unit.tests.',
+ 'zone_id': 'unit.tests',
+ }),
+ call('POST', '/records', data={
+ 'name': 'mx',
+ 'ttl': 300,
+ 'type': 'MX',
+ 'value': '40 smtp-1.unit.tests.',
+ 'zone_id': 'unit.tests',
+ }),
+ call('POST', '/records', data={
+ 'name': 'sub',
+ 'ttl': 3600,
+ 'type': 'NS',
+ 'value': '6.2.3.4.',
+ 'zone_id': 'unit.tests',
+ }),
+ call('POST', '/records', data={
+ 'name': 'sub',
+ 'ttl': 3600,
+ 'type': 'NS',
+ 'value': '7.2.3.4.',
+ 'zone_id': 'unit.tests',
+ }),
+ call('POST', '/records', data={
+ 'name': 'txt',
+ 'ttl': 600,
+ 'type': 'TXT',
+ 'value': 'Bah bah black sheep',
+ 'zone_id': 'unit.tests',
+ }),
+ call('POST', '/records', data={
+ 'name': 'txt',
+ 'ttl': 600,
+ 'type': 'TXT',
+ 'value': 'have you any wool.',
+ 'zone_id': 'unit.tests',
+ }),
+ call('POST', '/records', data={
+ 'name': 'txt',
+ 'ttl': 600,
+ 'type': 'TXT',
+ 'value': 'v=DKIM1;k=rsa;s=email;h=sha256;'
+ 'p=A/kinda+of/long/string+with+numb3rs',
+ 'zone_id': 'unit.tests',
+ }),
+ call('POST', '/records', data={
+ 'name': 'www',
+ 'ttl': 300,
+ 'type': 'A',
+ 'value': '2.2.3.6',
+ 'zone_id': 'unit.tests',
+ }),
+ call('POST', '/records', data={
+ 'name': 'www.sub',
+ 'ttl': 300,
+ 'type': 'A',
+ 'value': '2.2.3.6',
+ 'zone_id': 'unit.tests',
+ }),
+ ])
+ self.assertEquals(24, provider._client._do.call_count)
+
+ provider._client._do.reset_mock()
+
+ # delete 1 and update 1
+ provider._client.zone_get = Mock(return_value={
+ 'id': 'unit.tests',
+ 'name': 'unit.tests',
+ 'ttl': 3600,
+ })
+ provider._client.zone_records_get = Mock(return_value=[
+ {
+ 'type': 'A',
+ 'id': 'one',
+ 'created': '0000-00-00T00:00:00Z',
+ 'modified': '0000-00-00T00:00:00Z',
+ 'zone_id': 'unit.tests',
+ 'name': 'www',
+ 'value': '1.2.3.4',
+ 'ttl': 300,
+ },
+ {
+ 'type': 'A',
+ 'id': 'two',
+ 'created': '0000-00-00T00:00:00Z',
+ 'modified': '0000-00-00T00:00:00Z',
+ 'zone_id': 'unit.tests',
+ 'name': 'www',
+ 'value': '2.2.3.4',
+ 'ttl': 300,
+ },
+ {
+ 'type': 'A',
+ 'id': 'three',
+ 'created': '0000-00-00T00:00:00Z',
+ 'modified': '0000-00-00T00:00:00Z',
+ 'zone_id': 'unit.tests',
+ 'name': 'ttl',
+ 'value': '3.2.3.4',
+ 'ttl': 600,
+ },
+ ])
+
+ # Domain exists, we don't care about return
+ resp.json.side_effect = ['{}']
+
+ wanted = Zone('unit.tests.', [])
+ wanted.add_record(Record.new(wanted, 'ttl', {
+ 'ttl': 300,
+ 'type': 'A',
+ 'value': '3.2.3.4',
+ }))
+
+ plan = provider.plan(wanted)
+ self.assertTrue(plan.exists)
+ self.assertEquals(2, len(plan.changes))
+ self.assertEquals(2, provider.apply(plan))
+ # recreate for update, and delete for the 2 parts of the other
+ provider._client._do.assert_has_calls([
+ call('POST', '/records', data={
+ 'name': 'ttl',
+ 'ttl': 300,
+ 'type': 'A',
+ 'value': '3.2.3.4',
+ 'zone_id': 'unit.tests',
+ }),
+ call('DELETE', '/records/one'),
+ call('DELETE', '/records/two'),
+ call('DELETE', '/records/three'),
+ ], any_order=True)
diff --git a/tests/test_octodns_provider_mythicbeasts.py b/tests/test_octodns_provider_mythicbeasts.py
index 960bd65..26af8c1 100644
--- a/tests/test_octodns_provider_mythicbeasts.py
+++ b/tests/test_octodns_provider_mythicbeasts.py
@@ -171,7 +171,7 @@ class TestMythicBeastsProvider(TestCase):
def test_command_generation(self):
zone = Zone('unit.tests.', [])
- zone.add_record(Record.new(zone, 'prawf-alias', {
+ zone.add_record(Record.new(zone, '', {
'ttl': 60,
'type': 'ALIAS',
'value': 'alias.unit.tests.',
@@ -228,7 +228,7 @@ class TestMythicBeastsProvider(TestCase):
)
expected_commands = [
- 'ADD prawf-alias.unit.tests 60 ANAME alias.unit.tests.',
+ 'ADD unit.tests 60 ANAME alias.unit.tests.',
'ADD prawf-ns.unit.tests 300 NS alias.unit.tests.',
'ADD prawf-ns.unit.tests 300 NS alias2.unit.tests.',
'ADD prawf-a.unit.tests 60 A 1.2.3.4',
@@ -378,8 +378,8 @@ class TestMythicBeastsProvider(TestCase):
zone = Zone('unit.tests.', [])
provider.populate(zone)
- self.assertEquals(15, len(zone.records))
- self.assertEquals(15, len(self.expected.records))
+ self.assertEquals(17, len(zone.records))
+ self.assertEquals(17, len(self.expected.records))
changes = self.expected.changes(zone, provider)
self.assertEquals(0, len(changes))
@@ -445,7 +445,7 @@ class TestMythicBeastsProvider(TestCase):
if isinstance(c, Update)]))
self.assertEquals(1, len([c for c in plan.changes
if isinstance(c, Delete)]))
- self.assertEquals(14, len([c for c in plan.changes
+ self.assertEquals(16, len([c for c in plan.changes
if isinstance(c, Create)]))
- self.assertEquals(16, provider.apply(plan))
+ self.assertEquals(18, provider.apply(plan))
self.assertTrue(plan.exists)
diff --git a/tests/test_octodns_provider_ns1.py b/tests/test_octodns_provider_ns1.py
index 00b068b..de6bdc9 100644
--- a/tests/test_octodns_provider_ns1.py
+++ b/tests/test_octodns_provider_ns1.py
@@ -109,6 +109,22 @@ class TestNs1Provider(TestCase):
'value': 'ca.unit.tests',
},
}))
+ expected.add(Record.new(zone, 'urlfwd', {
+ 'ttl': 41,
+ 'type': 'URLFWD',
+ 'value': {
+ 'path': '/',
+ 'target': 'http://foo.unit.tests',
+ 'code': 301,
+ 'masking': 2,
+ 'query': 0,
+ },
+ }))
+ expected.add(Record.new(zone, '1.2.3.4', {
+ 'ttl': 42,
+ 'type': 'PTR',
+ 'values': ['one.one.one.one.', 'two.two.two.two.'],
+ }))
ns1_records = [{
'type': 'A',
@@ -164,6 +180,16 @@ class TestNs1Provider(TestCase):
'ttl': 40,
'short_answers': ['0 issue ca.unit.tests'],
'domain': 'unit.tests.',
+ }, {
+ 'type': 'URLFWD',
+ 'ttl': 41,
+ 'short_answers': ['/ http://foo.unit.tests 301 2 0'],
+ 'domain': 'urlfwd.unit.tests.',
+ }, {
+ 'type': 'PTR',
+ 'ttl': 42,
+ 'short_answers': ['one.one.one.one.', 'two.two.two.two.'],
+ 'domain': '1.2.3.4.unit.tests.',
}]
@patch('ns1.rest.records.Records.retrieve')
@@ -171,7 +197,13 @@ class TestNs1Provider(TestCase):
def test_populate(self, zone_retrieve_mock, record_retrieve_mock):
provider = Ns1Provider('test', 'api-key')
+ def reset():
+ provider._client.reset_caches()
+ zone_retrieve_mock.reset_mock()
+ record_retrieve_mock.reset_mock()
+
# Bad auth
+ reset()
zone_retrieve_mock.side_effect = AuthException('unauthorized')
zone = Zone('unit.tests.', [])
with self.assertRaises(AuthException) as ctx:
@@ -179,7 +211,7 @@ class TestNs1Provider(TestCase):
self.assertEquals(zone_retrieve_mock.side_effect, ctx.exception)
# General error
- zone_retrieve_mock.reset_mock()
+ reset()
zone_retrieve_mock.side_effect = ResourceException('boom')
zone = Zone('unit.tests.', [])
with self.assertRaises(ResourceException) as ctx:
@@ -188,7 +220,7 @@ class TestNs1Provider(TestCase):
self.assertEquals(('unit.tests',), zone_retrieve_mock.call_args[0])
# Non-existent zone doesn't populate anything
- zone_retrieve_mock.reset_mock()
+ reset()
zone_retrieve_mock.side_effect = \
ResourceException('server error: zone not found')
zone = Zone('unit.tests.', [])
@@ -198,8 +230,7 @@ class TestNs1Provider(TestCase):
self.assertFalse(exists)
# Existing zone w/o records
- zone_retrieve_mock.reset_mock()
- record_retrieve_mock.reset_mock()
+ reset()
ns1_zone = {
'records': [{
"domain": "geo.unit.tests",
@@ -229,8 +260,7 @@ class TestNs1Provider(TestCase):
'geo.unit.tests', 'A')])
# Existing zone w/records
- zone_retrieve_mock.reset_mock()
- record_retrieve_mock.reset_mock()
+ reset()
ns1_zone = {
'records': self.ns1_records + [{
"domain": "geo.unit.tests",
@@ -260,8 +290,7 @@ class TestNs1Provider(TestCase):
'geo.unit.tests', 'A')])
# Test skipping unsupported record type
- zone_retrieve_mock.reset_mock()
- record_retrieve_mock.reset_mock()
+ reset()
ns1_zone = {
'records': self.ns1_records + [{
'type': 'UNSUPPORTED',
@@ -314,19 +343,21 @@ class TestNs1Provider(TestCase):
self.assertEquals(expected_n, len(plan.changes))
self.assertTrue(plan.exists)
+ def reset():
+ provider._client.reset_caches()
+ record_retrieve_mock.reset_mock()
+ zone_create_mock.reset_mock()
+ zone_retrieve_mock.reset_mock()
+
# Fails, general error
- zone_retrieve_mock.reset_mock()
- record_retrieve_mock.reset_mock()
- zone_create_mock.reset_mock()
+ reset()
zone_retrieve_mock.side_effect = ResourceException('boom')
with self.assertRaises(ResourceException) as ctx:
provider.apply(plan)
self.assertEquals(zone_retrieve_mock.side_effect, ctx.exception)
# Fails, bad auth
- zone_retrieve_mock.reset_mock()
- record_retrieve_mock.reset_mock()
- zone_create_mock.reset_mock()
+ reset()
zone_retrieve_mock.side_effect = \
ResourceException('server error: zone not found')
zone_create_mock.side_effect = AuthException('unauthorized')
@@ -335,17 +366,15 @@ class TestNs1Provider(TestCase):
self.assertEquals(zone_create_mock.side_effect, ctx.exception)
# non-existent zone, create
- zone_retrieve_mock.reset_mock()
- record_retrieve_mock.reset_mock()
- zone_create_mock.reset_mock()
+ reset()
zone_retrieve_mock.side_effect = \
ResourceException('server error: zone not found')
zone_create_mock.side_effect = ['foo']
- # Test out the create rate-limit handling, then 9 successes
+ # Test out the create rate-limit handling, then successes for the rest
record_create_mock.side_effect = [
RateLimitException('boo', period=0),
- ] + ([None] * 9)
+ ] + ([None] * len(self.expected))
got_n = provider.apply(plan)
self.assertEquals(expected_n, got_n)
@@ -363,12 +392,13 @@ class TestNs1Provider(TestCase):
call('unit.tests', 'unit.tests', 'MX', answers=[
(10, 'mx1.unit.tests.'), (20, 'mx2.unit.tests.')
], ttl=35),
+ call('unit.tests', '1.2.3.4.unit.tests', 'PTR', answers=[
+ 'one.one.one.one.', 'two.two.two.two.',
+ ], ttl=42),
])
# Update & delete
- zone_retrieve_mock.reset_mock()
- record_retrieve_mock.reset_mock()
- zone_create_mock.reset_mock()
+ reset()
ns1_zone = {
'records': self.ns1_records + [{
@@ -578,6 +608,82 @@ class TestNs1ProviderDynamic(TestCase):
'meta': {},
})
+ def aaaa_record(self):
+ return Record.new(self.zone, '', {
+ 'dynamic': {
+ 'pools': {
+ 'lhr': {
+ 'fallback': 'iad',
+ 'values': [{
+ 'value': '::ffff:3.4.5.6',
+ }],
+ },
+ 'iad': {
+ 'values': [{
+ 'value': '::ffff:1.2.3.4',
+ }, {
+ 'value': '::ffff:2.3.4.5',
+ }],
+ },
+ },
+ 'rules': [{
+ 'geos': [
+ 'AF',
+ 'EU-GB',
+ 'NA-US-FL'
+ ],
+ 'pool': 'lhr',
+ }, {
+ 'geos': [
+ 'AF-ZW',
+ ],
+ 'pool': 'iad',
+ }, {
+ 'pool': 'iad',
+ }],
+ },
+ 'octodns': {
+ 'healthcheck': {
+ 'host': 'send.me',
+ 'path': '/_ping',
+ 'port': 80,
+ 'protocol': 'HTTP',
+ }
+ },
+ 'ttl': 32,
+ 'type': 'AAAA',
+ 'value': '::ffff:1.2.3.4',
+ 'meta': {},
+ })
+
+ def cname_record(self):
+ return Record.new(self.zone, 'foo', {
+ 'dynamic': {
+ 'pools': {
+ 'iad': {
+ 'values': [{
+ 'value': 'iad.unit.tests.',
+ }],
+ },
+ },
+ 'rules': [{
+ 'pool': 'iad',
+ }],
+ },
+ 'octodns': {
+ 'healthcheck': {
+ 'host': 'send.me',
+ 'path': '/_ping',
+ 'port': 80,
+ 'protocol': 'HTTP',
+ }
+ },
+ 'ttl': 33,
+ 'type': 'CNAME',
+ 'value': 'value.unit.tests.',
+ 'meta': {},
+ })
+
def test_notes(self):
provider = Ns1Provider('test', 'api-key')
@@ -609,6 +715,12 @@ class TestNs1ProviderDynamic(TestCase):
},
'notes': 'host:unit.tests type:A',
}
+ monitor_five = {
+ 'config': {
+ 'host': 'iad.unit.tests',
+ },
+ 'notes': 'host:foo.unit.tests type:CNAME',
+ }
provider._client._monitors_cache = {
'one': monitor_one,
'two': {
@@ -624,6 +736,19 @@ class TestNs1ProviderDynamic(TestCase):
'notes': 'host:other.unit.tests type:A',
},
'four': monitor_four,
+ 'five': monitor_five,
+ 'six': {
+ 'config': {
+ 'host': '10.10.10.10',
+ },
+ 'notes': 'non-conforming notes',
+ },
+ 'seven': {
+ 'config': {
+ 'host': '11.11.11.11',
+ },
+ 'notes': None,
+ },
}
# Would match, but won't get there b/c it's not dynamic
@@ -641,6 +766,11 @@ class TestNs1ProviderDynamic(TestCase):
'2.3.4.5': monitor_four,
}, provider._monitors_for(self.record()))
+ # Check match for CNAME values
+ self.assertEquals({
+ 'iad.unit.tests.': monitor_five,
+ }, provider._monitors_for(self.cname_record()))
+
def test_uuid(self):
# Just a smoke test/for coverage
provider = Ns1Provider('test', 'api-key')
@@ -700,12 +830,70 @@ class TestNs1ProviderDynamic(TestCase):
monitor = {
'name': 'test monitor',
}
+ provider._client._notifylists_cache = {}
monitor_id, feed_id = provider._monitor_create(monitor)
self.assertEquals('mon-id', monitor_id)
self.assertEquals('feed-id', feed_id)
monitors_create_mock.assert_has_calls([call(name='test monitor',
notify_list='nl-id')])
+ @patch('octodns.provider.ns1.Ns1Provider._feed_create')
+ @patch('octodns.provider.ns1.Ns1Client.monitors_create')
+ @patch('octodns.provider.ns1.Ns1Client._try')
+ def test_monitor_create_shared_notifylist(self, try_mock,
+ monitors_create_mock,
+ feed_create_mock):
+ provider = Ns1Provider('test', 'api-key', shared_notifylist=True)
+
+ # pre-fill caches to avoid extranious calls (things we're testing
+ # elsewhere)
+ provider._client._datasource_id = 'foo'
+ provider._client._feeds_for_monitors = {}
+
+ # First time we'll need to create the share list
+ provider._client._notifylists_cache = {}
+ try_mock.reset_mock()
+ monitors_create_mock.reset_mock()
+ feed_create_mock.reset_mock()
+ try_mock.side_effect = [{
+ 'id': 'nl-id',
+ 'name': provider.SHARED_NOTIFYLIST_NAME,
+ }]
+ monitors_create_mock.side_effect = [{
+ 'id': 'mon-id',
+ }]
+ feed_create_mock.side_effect = ['feed-id']
+ monitor = {
+ 'name': 'test monitor',
+ }
+ monitor_id, feed_id = provider._monitor_create(monitor)
+ self.assertEquals('mon-id', monitor_id)
+ self.assertEquals('feed-id', feed_id)
+ monitors_create_mock.assert_has_calls([call(name='test monitor',
+ notify_list='nl-id')])
+ try_mock.assert_called_once()
+ # The shared notifylist should be cached now
+ self.assertEquals([provider.SHARED_NOTIFYLIST_NAME],
+ list(provider._client._notifylists_cache.keys()))
+
+ # Second time we'll use the cached version
+ try_mock.reset_mock()
+ monitors_create_mock.reset_mock()
+ feed_create_mock.reset_mock()
+ monitors_create_mock.side_effect = [{
+ 'id': 'mon-id',
+ }]
+ feed_create_mock.side_effect = ['feed-id']
+ monitor = {
+ 'name': 'test monitor',
+ }
+ monitor_id, feed_id = provider._monitor_create(monitor)
+ self.assertEquals('mon-id', monitor_id)
+ self.assertEquals('feed-id', feed_id)
+ monitors_create_mock.assert_has_calls([call(name='test monitor',
+ notify_list='nl-id')])
+ try_mock.assert_not_called()
+
def test_monitor_gen(self):
provider = Ns1Provider('test', 'api-key')
@@ -717,6 +905,10 @@ class TestNs1ProviderDynamic(TestCase):
self.assertFalse(monitor['config']['ssl'])
self.assertEquals('host:unit.tests type:A', monitor['notes'])
+ record._octodns['healthcheck']['host'] = None
+ monitor = provider._monitor_gen(record, value)
+ self.assertTrue(r'\nHost: 3.4.5.6\r' in monitor['config']['send'])
+
record._octodns['healthcheck']['protocol'] = 'HTTPS'
monitor = provider._monitor_gen(record, value)
self.assertTrue(monitor['config']['ssl'])
@@ -728,6 +920,22 @@ class TestNs1ProviderDynamic(TestCase):
# No http response expected
self.assertFalse('rules' in monitor)
+ def test_monitor_gen_AAAA(self):
+ provider = Ns1Provider('test', 'api-key')
+
+ value = '::ffff:3.4.5.6'
+ record = self.aaaa_record()
+ monitor = provider._monitor_gen(record, value)
+ self.assertTrue(monitor['config']['ipv6'])
+
+ def test_monitor_gen_CNAME(self):
+ provider = Ns1Provider('test', 'api-key')
+
+ value = 'iad.unit.tests.'
+ record = self.cname_record()
+ monitor = provider._monitor_gen(record, value)
+ self.assertEquals(value[:-1], monitor['config']['host'])
+
def test_monitor_is_match(self):
provider = Ns1Provider('test', 'api-key')
@@ -786,11 +994,14 @@ class TestNs1ProviderDynamic(TestCase):
'mon-id': 'feed-id',
}
+ def reset():
+ feed_create_mock.reset_mock()
+ monitor_create_mock.reset_mock()
+ monitor_gen_mock.reset_mock()
+ monitors_update_mock.reset_mock()
+
# No existing monitor
- monitor_gen_mock.reset_mock()
- monitor_create_mock.reset_mock()
- monitors_update_mock.reset_mock()
- feed_create_mock.reset_mock()
+ reset()
monitor_gen_mock.side_effect = [{'key': 'value'}]
monitor_create_mock.side_effect = [('mon-id', 'feed-id')]
value = '1.2.3.4'
@@ -804,10 +1015,7 @@ class TestNs1ProviderDynamic(TestCase):
feed_create_mock.assert_not_called()
# Existing monitor that doesn't need updates
- monitor_gen_mock.reset_mock()
- monitor_create_mock.reset_mock()
- monitors_update_mock.reset_mock()
- feed_create_mock.reset_mock()
+ reset()
monitor = {
'id': 'mon-id',
'key': 'value',
@@ -824,10 +1032,7 @@ class TestNs1ProviderDynamic(TestCase):
feed_create_mock.assert_not_called()
# Existing monitor that doesn't need updates, but is missing its feed
- monitor_gen_mock.reset_mock()
- monitor_create_mock.reset_mock()
- monitors_update_mock.reset_mock()
- feed_create_mock.reset_mock()
+ reset()
monitor = {
'id': 'mon-id2',
'key': 'value',
@@ -845,10 +1050,7 @@ class TestNs1ProviderDynamic(TestCase):
feed_create_mock.assert_has_calls([call(monitor)])
# Existing monitor that needs updates
- monitor_gen_mock.reset_mock()
- monitor_create_mock.reset_mock()
- monitors_update_mock.reset_mock()
- feed_create_mock.reset_mock()
+ reset()
monitor = {
'id': 'mon-id',
'key': 'value',
@@ -882,11 +1084,14 @@ class TestNs1ProviderDynamic(TestCase):
'mon-id': 'feed-id',
}
+ def reset():
+ datafeed_delete_mock.reset_mock()
+ monitors_delete_mock.reset_mock()
+ monitors_for_mock.reset_mock()
+ notifylists_delete_mock.reset_mock()
+
# No active monitors and no existing, nothing will happen
- monitors_for_mock.reset_mock()
- datafeed_delete_mock.reset_mock()
- monitors_delete_mock.reset_mock()
- notifylists_delete_mock.reset_mock()
+ reset()
monitors_for_mock.side_effect = [{}]
record = self.record()
provider._monitors_gc(record)
@@ -896,16 +1101,19 @@ class TestNs1ProviderDynamic(TestCase):
notifylists_delete_mock.assert_not_called()
# No active monitors and one existing, delete all the things
- monitors_for_mock.reset_mock()
- datafeed_delete_mock.reset_mock()
- monitors_delete_mock.reset_mock()
- notifylists_delete_mock.reset_mock()
+ reset()
monitors_for_mock.side_effect = [{
'x': {
'id': 'mon-id',
'notify_list': 'nl-id',
}
}]
+ provider._client._notifylists_cache = {
+ 'not shared': {
+ 'id': 'nl-id',
+ 'name': 'not shared',
+ }
+ }
provider._monitors_gc(record)
monitors_for_mock.assert_has_calls([call(record)])
datafeed_delete_mock.assert_has_calls([call('foo', 'feed-id')])
@@ -913,10 +1121,7 @@ class TestNs1ProviderDynamic(TestCase):
notifylists_delete_mock.assert_has_calls([call('nl-id')])
# Same existing, this time in active list, should be noop
- monitors_for_mock.reset_mock()
- datafeed_delete_mock.reset_mock()
- monitors_delete_mock.reset_mock()
- notifylists_delete_mock.reset_mock()
+ reset()
monitors_for_mock.side_effect = [{
'x': {
'id': 'mon-id',
@@ -931,10 +1136,7 @@ class TestNs1ProviderDynamic(TestCase):
# Non-active monitor w/o a feed, and another monitor that's left alone
# b/c it's active
- monitors_for_mock.reset_mock()
- datafeed_delete_mock.reset_mock()
- monitors_delete_mock.reset_mock()
- notifylists_delete_mock.reset_mock()
+ reset()
monitors_for_mock.side_effect = [{
'x': {
'id': 'mon-id',
@@ -945,12 +1147,69 @@ class TestNs1ProviderDynamic(TestCase):
'notify_list': 'nl-id2',
},
}]
+ provider._client._notifylists_cache = {
+ 'not shared': {
+ 'id': 'nl-id',
+ 'name': 'not shared',
+ },
+ 'not shared 2': {
+ 'id': 'nl-id2',
+ 'name': 'not shared 2',
+ }
+ }
provider._monitors_gc(record, {'mon-id'})
monitors_for_mock.assert_has_calls([call(record)])
datafeed_delete_mock.assert_not_called()
monitors_delete_mock.assert_has_calls([call('mon-id2')])
notifylists_delete_mock.assert_has_calls([call('nl-id2')])
+ # Non-active monitor w/o a notifylist, generally shouldn't happen, but
+ # code should handle it just in case someone gets clicky in the UI
+ reset()
+ monitors_for_mock.side_effect = [{
+ 'y': {
+ 'id': 'mon-id2',
+ 'notify_list': 'nl-id2',
+ },
+ }]
+ provider._client._notifylists_cache = {
+ 'not shared a': {
+ 'id': 'nl-ida',
+ 'name': 'not shared a',
+ },
+ 'not shared b': {
+ 'id': 'nl-idb',
+ 'name': 'not shared b',
+ }
+ }
+ provider._monitors_gc(record, {'mon-id'})
+ monitors_for_mock.assert_has_calls([call(record)])
+ datafeed_delete_mock.assert_not_called()
+ monitors_delete_mock.assert_has_calls([call('mon-id2')])
+ notifylists_delete_mock.assert_not_called()
+
+ # Non-active monitor with a shared notifylist, monitor deleted, but
+ # notifylist is left alone
+ reset()
+ provider.shared_notifylist = True
+ monitors_for_mock.side_effect = [{
+ 'y': {
+ 'id': 'mon-id2',
+ 'notify_list': 'shared',
+ },
+ }]
+ provider._client._notifylists_cache = {
+ 'shared': {
+ 'id': 'shared',
+ 'name': provider.SHARED_NOTIFYLIST_NAME,
+ },
+ }
+ provider._monitors_gc(record, {'mon-id'})
+ monitors_for_mock.assert_has_calls([call(record)])
+ datafeed_delete_mock.assert_not_called()
+ monitors_delete_mock.assert_has_calls([call('mon-id2')])
+ notifylists_delete_mock.assert_not_called()
+
@patch('octodns.provider.ns1.Ns1Provider._monitor_sync')
@patch('octodns.provider.ns1.Ns1Provider._monitors_for')
def test_params_for_dynamic_region_only(self, monitors_for_mock,
@@ -982,7 +1241,7 @@ class TestNs1ProviderDynamic(TestCase):
rule0 = record.data['dynamic']['rules'][0]
rule1 = record.data['dynamic']['rules'][1]
rule0['geos'] = ['AF', 'EU']
- rule1['geos'] = ['NA']
+ rule1['geos'] = ['AS']
ret, monitor_ids = provider._params_for_A(record)
self.assertEquals(10, len(ret['answers']))
self.assertEquals(ret['filters'],
@@ -996,7 +1255,7 @@ class TestNs1ProviderDynamic(TestCase):
},
'iad__georegion': {
'meta': {
- 'georegion': ['US-CENTRAL', 'US-EAST', 'US-WEST'],
+ 'georegion': ['ASIAPAC'],
'note': 'rule-order:1'
}
},
@@ -1040,7 +1299,7 @@ class TestNs1ProviderDynamic(TestCase):
rule0 = record.data['dynamic']['rules'][0]
rule1 = record.data['dynamic']['rules'][1]
rule0['geos'] = ['AF', 'EU']
- rule1['geos'] = ['NA-US-CA']
+ rule1['geos'] = ['NA-US-CA', 'NA-CA-NL']
ret, _ = provider._params_for_A(record)
self.assertEquals(10, len(ret['answers']))
exp = Ns1Provider._FILTER_CHAIN_WITH_REGION_AND_COUNTRY(provider,
@@ -1055,7 +1314,8 @@ class TestNs1ProviderDynamic(TestCase):
'iad__country': {
'meta': {
'note': 'rule-order:1',
- 'us_state': ['CA']
+ 'us_state': ['CA'],
+ 'ca_province': ['NL']
}
},
'lhr__georegion': {
@@ -1083,6 +1343,7 @@ class TestNs1ProviderDynamic(TestCase):
# provider._params_for_A() calls provider._monitors_for() and
# provider._monitor_sync(). Mock their return values so that we don't
# make NS1 API calls during tests
+ provider._client.reset_caches()
monitors_for_mock.reset_mock()
monitor_sync_mock.reset_mock()
monitors_for_mock.side_effect = [{
@@ -1098,7 +1359,7 @@ class TestNs1ProviderDynamic(TestCase):
rule0 = record.data['dynamic']['rules'][0]
rule1 = record.data['dynamic']['rules'][1]
rule0['geos'] = ['AF', 'EU', 'NA-US-CA']
- rule1['geos'] = ['NA', 'NA-US']
+ rule1['geos'] = ['AS', 'AS-IN']
ret, _ = provider._params_for_A(record)
self.assertEquals(17, len(ret['answers']))
@@ -1117,14 +1378,21 @@ class TestNs1ProviderDynamic(TestCase):
# finally has a catchall. Those are examples of the two ways pools get
# expanded.
#
- # lhr splits in two, with a region and country.
+ # lhr splits in two, with a region and country and includes a fallback
+ #
+ # All values now include their own `pool:` name
#
# well as both lhr georegion (for contients) and country. The first is
# an example of a repeated target pool in a rule (only allowed when the
# 2nd is a catchall.)
- self.assertEquals(['from:--default--', 'from:iad__catchall',
- 'from:iad__country', 'from:iad__georegion',
- 'from:lhr__country', 'from:lhr__georegion'],
+ self.assertEquals(['fallback: from:iad__catchall pool:iad',
+ 'fallback: from:iad__country pool:iad',
+ 'fallback: from:iad__georegion pool:iad',
+ 'fallback: from:lhr__country pool:iad',
+ 'fallback: from:lhr__georegion pool:iad',
+ 'fallback:iad from:lhr__country pool:lhr',
+ 'fallback:iad from:lhr__georegion pool:lhr',
+ 'from:--default--'],
sorted(notes.keys()))
# All the iad's should match (after meta and region were removed)
@@ -1151,13 +1419,13 @@ class TestNs1ProviderDynamic(TestCase):
},
'iad__country': {
'meta': {
- 'country': ['US'],
+ 'country': ['IN'],
'note': 'rule-order:1'
}
},
'iad__georegion': {
'meta': {
- 'georegion': ['US-CENTRAL', 'US-EAST', 'US-WEST'],
+ 'georegion': ['ASIAPAC'],
'note': 'rule-order:1'
}
},
@@ -1242,9 +1510,13 @@ class TestNs1ProviderDynamic(TestCase):
('mid-2', 'fid-2'),
('mid-3', 'fid-3'),
]
- # This indirectly calls into _params_for_dynamic_A and tests the
+ # This indirectly calls into _params_for_dynamic and tests the
# handling to get there
record = self.record()
+ # copy an existing answer from a different pool to 'lhr' so
+ # in order to test answer repetition across pools (monitor reuse)
+ record.dynamic._data()['pools']['lhr']['values'].append(
+ record.dynamic._data()['pools']['iad']['values'][0])
ret, _ = provider._params_for_A(record)
# Given that record has both country and region in the rules,
@@ -1270,7 +1542,39 @@ class TestNs1ProviderDynamic(TestCase):
params, _ = provider._params_for_geo_A(record)
self.assertEquals([], params['filters'])
- def test_data_for_dynamic_A(self):
+ @patch('octodns.provider.ns1.Ns1Provider._monitor_sync')
+ @patch('octodns.provider.ns1.Ns1Provider._monitors_for')
+ def test_params_for_dynamic_CNAME(self, monitors_for_mock,
+ monitor_sync_mock):
+ provider = Ns1Provider('test', 'api-key')
+
+ # pre-fill caches to avoid extranious calls (things we're testing
+ # elsewhere)
+ provider._client._datasource_id = 'foo'
+ provider._client._feeds_for_monitors = {
+ 'mon-id': 'feed-id',
+ }
+
+ # provider._params_for_A() calls provider._monitors_for() and
+ # provider._monitor_sync(). Mock their return values so that we don't
+ # make NS1 API calls during tests
+ monitors_for_mock.reset_mock()
+ monitor_sync_mock.reset_mock()
+ monitors_for_mock.side_effect = [{
+ 'iad.unit.tests.': 'mid-1',
+ }]
+ monitor_sync_mock.side_effect = [
+ ('mid-1', 'fid-1'),
+ ]
+
+ record = self.cname_record()
+ ret, _ = provider._params_for_CNAME(record)
+
+ # Check if the default value was correctly read and populated
+ # All other dynamic record test cases are covered by dynamic_A tests
+ self.assertEquals(ret['answers'][-1]['answer'][0], 'value.unit.tests.')
+
+ def test_data_for_dynamic(self):
provider = Ns1Provider('test', 'api-key')
# Unexpected filters throws an error
@@ -1279,7 +1583,7 @@ class TestNs1ProviderDynamic(TestCase):
'filters': [],
}
with self.assertRaises(Ns1Exception) as ctx:
- provider._data_for_dynamic_A('A', ns1_record)
+ provider._data_for_dynamic('A', ns1_record)
self.assertEquals('Unrecognized advanced record',
text_type(ctx.exception))
@@ -1291,7 +1595,7 @@ class TestNs1ProviderDynamic(TestCase):
'regions': {},
'ttl': 42,
}
- data = provider._data_for_dynamic_A('A', ns1_record)
+ data = provider._data_for_dynamic('A', ns1_record)
self.assertEquals({
'dynamic': {
'pools': {},
@@ -1374,8 +1678,9 @@ class TestNs1ProviderDynamic(TestCase):
'lhr__country': {
'meta': {
'note': 'rule-order:1 fallback:iad',
- 'country': ['CA'],
+ 'country': ['MX'],
'us_state': ['OR'],
+ 'ca_province': ['NL']
},
},
# iad will use the old style "plain" region naming. We won't
@@ -1396,7 +1701,7 @@ class TestNs1ProviderDynamic(TestCase):
'tier': 3,
'ttl': 42,
}
- data = provider._data_for_dynamic_A('A', ns1_record)
+ data = provider._data_for_dynamic('A', ns1_record)
self.assertEquals({
'dynamic': {
'pools': {
@@ -1419,8 +1724,9 @@ class TestNs1ProviderDynamic(TestCase):
'_order': '1',
'geos': [
'AF',
- 'NA-CA',
- 'NA-US-OR',
+ 'NA-CA-NL',
+ 'NA-MX',
+ 'NA-US-OR'
],
'pool': 'lhr',
}, {
@@ -1440,7 +1746,7 @@ class TestNs1ProviderDynamic(TestCase):
}, data)
# Same answer if we go through _data_for_A which out sources the job to
- # _data_for_dynamic_A
+ # _data_for_dynamic
data2 = provider._data_for_A('A', ns1_record)
self.assertEquals(data, data2)
@@ -1451,7 +1757,7 @@ class TestNs1ProviderDynamic(TestCase):
ns1_record['regions'][old_style_catchall_pool_name] = \
ns1_record['regions'][catchall_pool_name]
del ns1_record['regions'][catchall_pool_name]
- data3 = provider._data_for_dynamic_A('A', ns1_record)
+ data3 = provider._data_for_dynamic('A', ns1_record)
self.assertEquals(data, data2)
# Oceania test cases
@@ -1471,6 +1777,194 @@ class TestNs1ProviderDynamic(TestCase):
self.assertTrue(
'OC-{}'.format(c) in data4['dynamic']['rules'][0]['geos'])
+ # NA test cases
+ # 1. Full list of countries should return 'NA' in geos
+ na_countries = Ns1Provider._CONTINENT_TO_LIST_OF_COUNTRIES['NA']
+ del ns1_record['regions']['lhr__country']['meta']['us_state']
+ ns1_record['regions']['lhr__country']['meta']['country'] = \
+ list(na_countries)
+ data5 = provider._data_for_A('A', ns1_record)
+ self.assertTrue('NA' in data5['dynamic']['rules'][0]['geos'])
+
+ # 2. Partial list of countries should return just those
+ partial_na_cntry_list = list(na_countries)[:5] + ['SX', 'UM']
+ ns1_record['regions']['lhr__country']['meta']['country'] = \
+ partial_na_cntry_list
+ data6 = provider._data_for_A('A', ns1_record)
+ for c in partial_na_cntry_list:
+ self.assertTrue(
+ 'NA-{}'.format(c) in data6['dynamic']['rules'][0]['geos'])
+
+ # Test out fallback only pools and new-style notes
+ ns1_record = {
+ 'answers': [{
+ 'answer': ['1.1.1.1'],
+ 'meta': {
+ 'priority': 1,
+ 'note': 'from:one__country pool:one fallback:two',
+ },
+ 'region': 'one_country',
+ }, {
+ 'answer': ['2.2.2.2'],
+ 'meta': {
+ 'priority': 2,
+ 'note': 'from:one__country pool:two fallback:three',
+ },
+ 'region': 'one_country',
+ }, {
+ 'answer': ['3.3.3.3'],
+ 'meta': {
+ 'priority': 3,
+ 'note': 'from:one__country pool:three fallback:',
+ },
+ 'region': 'one_country',
+ }, {
+ 'answer': ['5.5.5.5'],
+ 'meta': {
+ 'priority': 4,
+ 'note': 'from:--default--',
+ },
+ 'region': 'one_country',
+ }, {
+ 'answer': ['4.4.4.4'],
+ 'meta': {
+ 'priority': 1,
+ 'note': 'from:four__country pool:four fallback:',
+ },
+ 'region': 'four_country',
+ }, {
+ 'answer': ['5.5.5.5'],
+ 'meta': {
+ 'priority': 2,
+ 'note': 'from:--default--',
+ },
+ 'region': 'four_country',
+ }],
+ 'domain': 'unit.tests',
+ 'filters': filters,
+ 'regions': {
+ 'one__country': {
+ 'meta': {
+ 'note': 'rule-order:1 fallback:two',
+ 'country': ['CA'],
+ 'us_state': ['OR'],
+ },
+ },
+ 'four__country': {
+ 'meta': {
+ 'note': 'rule-order:2',
+ 'country': ['CA'],
+ 'us_state': ['OR'],
+ },
+ },
+ catchall_pool_name: {
+ 'meta': {
+ 'note': 'rule-order:3',
+ },
+ }
+ },
+ 'tier': 3,
+ 'ttl': 42,
+ }
+ data = provider._data_for_dynamic('A', ns1_record)
+ self.assertEquals({
+ 'dynamic': {
+ 'pools': {
+ 'four': {
+ 'fallback': None,
+ 'values': [{'value': '4.4.4.4', 'weight': 1}]
+ },
+ 'one': {
+ 'fallback': 'two',
+ 'values': [{'value': '1.1.1.1', 'weight': 1}]
+ },
+ 'three': {
+ 'fallback': None,
+ 'values': [{'value': '3.3.3.3', 'weight': 1}]
+ },
+ 'two': {
+ 'fallback': 'three',
+ 'values': [{'value': '2.2.2.2', 'weight': 1}]
+ },
+ },
+ 'rules': [{
+ '_order': '1',
+ 'geos': ['NA-CA', 'NA-US-OR'],
+ 'pool': 'one'
+ }, {
+ '_order': '2',
+ 'geos': ['NA-CA', 'NA-US-OR'],
+ 'pool': 'four'
+ }, {
+ '_order': '3', 'pool': 'iad'}
+ ]
+ },
+ 'ttl': 42,
+ 'type': 'A',
+ 'values': ['5.5.5.5']
+ }, data)
+
+ def test_data_for_dynamic_CNAME(self):
+ provider = Ns1Provider('test', 'api-key')
+
+ # Test out a small setup that just covers default value validation
+ # Everything else is same as dynamic A whose tests will cover all
+ # other options and test cases
+ # Not testing for geo/region specific cases
+ filters = provider._get_updated_filter_chain(False, False)
+ catchall_pool_name = 'iad__catchall'
+ ns1_record = {
+ 'answers': [{
+ 'answer': ['iad.unit.tests.'],
+ 'meta': {
+ 'priority': 1,
+ 'weight': 12,
+ 'note': 'from:{}'.format(catchall_pool_name),
+ },
+ 'region': catchall_pool_name,
+ }, {
+ 'answer': ['value.unit.tests.'],
+ 'meta': {
+ 'priority': 2,
+ 'note': 'from:--default--',
+ },
+ 'region': catchall_pool_name,
+ }],
+ 'domain': 'foo.unit.tests',
+ 'filters': filters,
+ 'regions': {
+ catchall_pool_name: {
+ 'meta': {
+ 'note': 'rule-order:1',
+ },
+ }
+ },
+ 'tier': 3,
+ 'ttl': 43,
+ 'type': 'CNAME',
+ }
+ data = provider._data_for_CNAME('CNAME', ns1_record)
+ self.assertEquals({
+ 'dynamic': {
+ 'pools': {
+ 'iad': {
+ 'fallback': None,
+ 'values': [{
+ 'value': 'iad.unit.tests.',
+ 'weight': 12,
+ }],
+ },
+ },
+ 'rules': [{
+ '_order': '1',
+ 'pool': 'iad',
+ }],
+ },
+ 'ttl': 43,
+ 'type': 'CNAME',
+ 'value': 'value.unit.tests.',
+ }, data)
+
@patch('ns1.rest.records.Records.retrieve')
@patch('ns1.rest.zones.Zones.retrieve')
@patch('octodns.provider.ns1.Ns1Provider._monitors_for')
@@ -1480,34 +1974,35 @@ class TestNs1ProviderDynamic(TestCase):
desired = Zone('unit.tests.', [])
+ def reset():
+ monitors_for_mock.reset_mock()
+ provider._client.reset_caches()
+ records_retrieve_mock.reset_mock()
+ zones_retrieve_mock.reset_mock()
+
# Empty zone and no changes
- monitors_for_mock.reset_mock()
- zones_retrieve_mock.reset_mock()
- records_retrieve_mock.reset_mock()
+ reset()
extra = provider._extra_changes(desired, [])
self.assertFalse(extra)
monitors_for_mock.assert_not_called()
# Non-existent zone. No changes
- monitors_for_mock.reset_mock()
+ reset()
zones_retrieve_mock.side_effect = \
ResourceException('server error: zone not found')
- records_retrieve_mock.reset_mock()
extra = provider._extra_changes(desired, [])
self.assertFalse(extra)
# Unexpected exception message
- zones_retrieve_mock.reset_mock()
+ reset()
zones_retrieve_mock.side_effect = ResourceException('boom')
with self.assertRaises(ResourceException) as ctx:
extra = provider._extra_changes(desired, [])
self.assertEquals(zones_retrieve_mock.side_effect, ctx.exception)
# Simple record, ignored, filter update lookups ignored
- monitors_for_mock.reset_mock()
- zones_retrieve_mock.reset_mock()
- records_retrieve_mock.reset_mock()
+ reset()
zones_retrieve_mock.side_effect = \
ResourceException('server error: zone not found')
@@ -1552,9 +2047,7 @@ class TestNs1ProviderDynamic(TestCase):
desired.add_record(dynamic)
# untouched, but everything in sync so no change needed
- monitors_for_mock.reset_mock()
- zones_retrieve_mock.reset_mock()
- records_retrieve_mock.reset_mock()
+ reset()
# Generate what we expect to have
gend = provider._monitor_gen(dynamic, '1.2.3.4')
gend.update({
@@ -1572,9 +2065,7 @@ class TestNs1ProviderDynamic(TestCase):
# If we don't have a notify list we're broken and we'll expect to see
# an Update
- monitors_for_mock.reset_mock()
- zones_retrieve_mock.reset_mock()
- records_retrieve_mock.reset_mock()
+ reset()
del gend['notify_list']
monitors_for_mock.side_effect = [{
'1.2.3.4': gend,
@@ -1588,9 +2079,7 @@ class TestNs1ProviderDynamic(TestCase):
# Add notify_list back and change the healthcheck protocol, we'll still
# expect to see an update
- monitors_for_mock.reset_mock()
- zones_retrieve_mock.reset_mock()
- records_retrieve_mock.reset_mock()
+ reset()
gend['notify_list'] = 'xyz'
dynamic._octodns['healthcheck']['protocol'] = 'HTTPS'
del gend['notify_list']
@@ -1605,9 +2094,7 @@ class TestNs1ProviderDynamic(TestCase):
monitors_for_mock.assert_has_calls([call(dynamic)])
# If it's in the changed list, it'll be ignored
- monitors_for_mock.reset_mock()
- zones_retrieve_mock.reset_mock()
- records_retrieve_mock.reset_mock()
+ reset()
extra = provider._extra_changes(desired, [update])
self.assertFalse(extra)
monitors_for_mock.assert_not_called()
@@ -1615,9 +2102,7 @@ class TestNs1ProviderDynamic(TestCase):
# Test changes in filters
# No change in filters
- monitors_for_mock.reset_mock()
- zones_retrieve_mock.reset_mock()
- records_retrieve_mock.reset_mock()
+ reset()
ns1_zone = {
'records': [{
"domain": "dyn.unit.tests",
@@ -1634,9 +2119,7 @@ class TestNs1ProviderDynamic(TestCase):
self.assertFalse(extra)
# filters need an update
- monitors_for_mock.reset_mock()
- zones_retrieve_mock.reset_mock()
- records_retrieve_mock.reset_mock()
+ reset()
ns1_zone = {
'records': [{
"domain": "dyn.unit.tests",
@@ -1653,9 +2136,7 @@ class TestNs1ProviderDynamic(TestCase):
self.assertTrue(extra)
# Mixed disabled in filters. Raise Ns1Exception
- monitors_for_mock.reset_mock()
- zones_retrieve_mock.reset_mock()
- records_retrieve_mock.reset_mock()
+ reset()
ns1_zone = {
'records': [{
"domain": "dyn.unit.tests",
@@ -1780,12 +2261,14 @@ class TestNs1Client(TestCase):
client = Ns1Client('dummy-key')
# No retry required, just calls and is returned
+ client.reset_caches()
zone_retrieve_mock.reset_mock()
zone_retrieve_mock.side_effect = ['foo']
self.assertEquals('foo', client.zones_retrieve('unit.tests'))
zone_retrieve_mock.assert_has_calls([call('unit.tests')])
# One retry required
+ client.reset_caches()
zone_retrieve_mock.reset_mock()
zone_retrieve_mock.side_effect = [
RateLimitException('boo', period=0),
@@ -1795,6 +2278,7 @@ class TestNs1Client(TestCase):
zone_retrieve_mock.assert_has_calls([call('unit.tests')])
# Two retries required
+ client.reset_caches()
zone_retrieve_mock.reset_mock()
zone_retrieve_mock.side_effect = [
RateLimitException('boo', period=0),
@@ -1804,6 +2288,7 @@ class TestNs1Client(TestCase):
zone_retrieve_mock.assert_has_calls([call('unit.tests')])
# Exhaust our retries
+ client.reset_caches()
zone_retrieve_mock.reset_mock()
zone_retrieve_mock.side_effect = [
RateLimitException('first', period=0),
@@ -2015,36 +2500,61 @@ class TestNs1Client(TestCase):
notifylists_delete_mock):
client = Ns1Client('dummy-key')
- notifylists_list_mock.reset_mock()
- notifylists_create_mock.reset_mock()
- notifylists_delete_mock.reset_mock()
- notifylists_create_mock.side_effect = ['bar']
+ def reset():
+ notifylists_create_mock.reset_mock()
+ notifylists_delete_mock.reset_mock()
+ notifylists_list_mock.reset_mock()
+
+ reset()
+ notifylists_list_mock.side_effect = [{}]
+ expected = {
+ 'id': 'nl-id',
+ 'name': 'bar',
+ }
+ notifylists_create_mock.side_effect = [expected]
notify_list = [{
'config': {
'sourceid': 'foo',
},
'type': 'datafeed',
}]
- nl = client.notifylists_create(name='some name',
- notify_list=notify_list)
- self.assertEquals('bar', nl)
- notifylists_list_mock.assert_not_called()
+ got = client.notifylists_create(name='some name',
+ notify_list=notify_list)
+ self.assertEquals(expected, got)
+ notifylists_list_mock.assert_called_once()
notifylists_create_mock.assert_has_calls([
call({'name': 'some name', 'notify_list': notify_list})
])
notifylists_delete_mock.assert_not_called()
- notifylists_list_mock.reset_mock()
- notifylists_create_mock.reset_mock()
- notifylists_delete_mock.reset_mock()
+ reset()
client.notifylists_delete('nlid')
notifylists_list_mock.assert_not_called()
notifylists_create_mock.assert_not_called()
notifylists_delete_mock.assert_has_calls([call('nlid')])
- notifylists_list_mock.reset_mock()
- notifylists_create_mock.reset_mock()
- notifylists_delete_mock.reset_mock()
+ # Delete again, this time with a cache item that needs cleaned out and
+ # another that needs to be ignored
+ reset()
+ client._notifylists_cache = {
+ 'another': {
+ 'id': 'notid',
+ 'name': 'another',
+ },
+ # This one comes 2nd on purpose
+ 'the-one': {
+ 'id': 'nlid',
+ 'name': 'the-one',
+ },
+ }
+ client.notifylists_delete('nlid')
+ notifylists_list_mock.assert_not_called()
+ notifylists_create_mock.assert_not_called()
+ notifylists_delete_mock.assert_has_calls([call('nlid')])
+ # Only another left
+ self.assertEquals(['another'], list(client._notifylists_cache.keys()))
+
+ reset()
expected = ['one', 'two', 'three']
notifylists_list_mock.side_effect = [expected]
nls = client.notifylists_list()
@@ -2052,3 +2562,150 @@ class TestNs1Client(TestCase):
notifylists_list_mock.assert_has_calls([call()])
notifylists_create_mock.assert_not_called()
notifylists_delete_mock.assert_not_called()
+
+ @patch('ns1.rest.records.Records.delete')
+ @patch('ns1.rest.records.Records.update')
+ @patch('ns1.rest.records.Records.create')
+ @patch('ns1.rest.records.Records.retrieve')
+ @patch('ns1.rest.zones.Zones.create')
+ @patch('ns1.rest.zones.Zones.delete')
+ @patch('ns1.rest.zones.Zones.retrieve')
+ def test_client_caching(self, zone_retrieve_mock, zone_delete_mock,
+ zone_create_mock, record_retrieve_mock,
+ record_create_mock, record_update_mock,
+ record_delete_mock):
+ client = Ns1Client('dummy-key')
+
+ def reset():
+ zone_retrieve_mock.reset_mock()
+ zone_delete_mock.reset_mock()
+ zone_create_mock.reset_mock()
+ record_retrieve_mock.reset_mock()
+ record_create_mock.reset_mock()
+ record_update_mock.reset_mock()
+ record_delete_mock.reset_mock()
+ # Testing caches so we don't reset those
+
+ # Initial zone get fetches and caches
+ reset()
+ zone_retrieve_mock.side_effect = ['foo']
+ self.assertEquals('foo', client.zones_retrieve('unit.tests'))
+ zone_retrieve_mock.assert_has_calls([call('unit.tests')])
+ self.assertEquals({
+ 'unit.tests': 'foo',
+ }, client._zones_cache)
+
+ # Subsequent zone get does not fetch and returns from cache
+ reset()
+ self.assertEquals('foo', client.zones_retrieve('unit.tests'))
+ zone_retrieve_mock.assert_not_called()
+
+ # Zone create stores in cache
+ reset()
+ zone_create_mock.side_effect = ['bar']
+ self.assertEquals('bar', client.zones_create('sub.unit.tests'))
+ zone_create_mock.assert_has_calls([call('sub.unit.tests')])
+ self.assertEquals({
+ 'sub.unit.tests': 'bar',
+ 'unit.tests': 'foo',
+ }, client._zones_cache)
+
+ # Initial record get fetches and caches
+ reset()
+ record_retrieve_mock.side_effect = ['baz']
+ self.assertEquals('baz', client.records_retrieve('unit.tests',
+ 'a.unit.tests', 'A'))
+ record_retrieve_mock.assert_has_calls([call('unit.tests',
+ 'a.unit.tests', 'A')])
+ self.assertEquals({
+ 'unit.tests': {
+ 'a.unit.tests': {
+ 'A': 'baz'
+ }
+ }
+ }, client._records_cache)
+
+ # Subsequent record get does not fetch and returns from cache
+ reset()
+ self.assertEquals('baz', client.records_retrieve('unit.tests',
+ 'a.unit.tests', 'A'))
+ record_retrieve_mock.assert_not_called()
+
+ # Record create stores in cache
+ reset()
+ record_create_mock.side_effect = ['boo']
+ self.assertEquals('boo', client.records_create('unit.tests',
+ 'aaaa.unit.tests',
+ 'AAAA', key='val'))
+ record_create_mock.assert_has_calls([call('unit.tests',
+ 'aaaa.unit.tests', 'AAAA',
+ key='val')])
+ self.assertEquals({
+ 'unit.tests': {
+ 'a.unit.tests': {
+ 'A': 'baz'
+ },
+ 'aaaa.unit.tests': {
+ 'AAAA': 'boo'
+ },
+ }
+ }, client._records_cache)
+
+ # Record delete removes from cache and removes zone
+ reset()
+ record_delete_mock.side_effect = [{}]
+ self.assertEquals({}, client.records_delete('unit.tests',
+ 'aaaa.unit.tests',
+ 'AAAA'))
+ record_delete_mock.assert_has_calls([call('unit.tests',
+ 'aaaa.unit.tests', 'AAAA')])
+ self.assertEquals({
+ 'unit.tests': {
+ 'a.unit.tests': {
+ 'A': 'baz'
+ },
+ 'aaaa.unit.tests': {},
+ }
+ }, client._records_cache)
+ self.assertEquals({
+ 'sub.unit.tests': 'bar',
+ }, client._zones_cache)
+
+ # Delete the other record, no zone this time, record should still go
+ # away
+ reset()
+ record_delete_mock.side_effect = [{}]
+ self.assertEquals({}, client.records_delete('unit.tests',
+ 'a.unit.tests', 'A'))
+ record_delete_mock.assert_has_calls([call('unit.tests', 'a.unit.tests',
+ 'A')])
+ self.assertEquals({
+ 'unit.tests': {
+ 'a.unit.tests': {},
+ 'aaaa.unit.tests': {},
+ }
+ }, client._records_cache)
+ self.assertEquals({
+ 'sub.unit.tests': 'bar',
+ }, client._zones_cache)
+
+ # Record update removes zone and caches result
+ record_update_mock.side_effect = ['done']
+ self.assertEquals('done', client.records_update('sub.unit.tests',
+ 'aaaa.sub.unit.tests',
+ 'AAAA', key='val'))
+ record_update_mock.assert_has_calls([call('sub.unit.tests',
+ 'aaaa.sub.unit.tests',
+ 'AAAA', key='val')])
+ self.assertEquals({
+ 'unit.tests': {
+ 'a.unit.tests': {},
+ 'aaaa.unit.tests': {},
+ },
+ 'sub.unit.tests': {
+ 'aaaa.sub.unit.tests': {
+ 'AAAA': 'done',
+ },
+ }
+ }, client._records_cache)
+ self.assertEquals({}, client._zones_cache)
diff --git a/tests/test_octodns_provider_powerdns.py b/tests/test_octodns_provider_powerdns.py
index fd877ef..92211d1 100644
--- a/tests/test_octodns_provider_powerdns.py
+++ b/tests/test_octodns_provider_powerdns.py
@@ -82,6 +82,20 @@ class TestPowerDnsProvider(TestCase):
provider._powerdns_version = None
self.assertNotEquals(provider.powerdns_version, [4, 1, 10])
+ # Test version detection with pre-releases
+ with requests_mock() as mock:
+ # Reset version, so detection will try again
+ provider._powerdns_version = None
+ mock.get('http://non.existent:8081/api/v1/servers/localhost',
+ status_code=200, json={'version': "4.4.0-alpha1"})
+ self.assertEquals(provider.powerdns_version, [4, 4, 0])
+
+ provider._powerdns_version = None
+ mock.get('http://non.existent:8081/api/v1/servers/localhost',
+ status_code=200,
+ json={'version': "4.5.0-alpha0.435.master.gcb114252b"})
+ self.assertEquals(provider.powerdns_version, [4, 5, 0])
+
def test_provider_version_config(self):
provider = PowerDnsProvider('test', 'non.existent', 'api-key',
nameserver_values=['8.8.8.8.',
@@ -171,8 +185,8 @@ class TestPowerDnsProvider(TestCase):
expected = Zone('unit.tests.', [])
source = YamlProvider('test', join(dirname(__file__), 'config'))
source.populate(expected)
- expected_n = len(expected.records) - 2
- self.assertEquals(16, expected_n)
+ expected_n = len(expected.records) - 4
+ self.assertEquals(19, expected_n)
# No diffs == no changes
with requests_mock() as mock:
@@ -180,7 +194,7 @@ class TestPowerDnsProvider(TestCase):
zone = Zone('unit.tests.', [])
provider.populate(zone)
- self.assertEquals(16, len(zone.records))
+ self.assertEquals(19, len(zone.records))
changes = expected.changes(zone, provider)
self.assertEquals(0, len(changes))
@@ -277,7 +291,7 @@ class TestPowerDnsProvider(TestCase):
expected = Zone('unit.tests.', [])
source = YamlProvider('test', join(dirname(__file__), 'config'))
source.populate(expected)
- self.assertEquals(18, len(expected.records))
+ self.assertEquals(23, len(expected.records))
# A small change to a single record
with requests_mock() as mock:
diff --git a/tests/test_octodns_provider_route53.py b/tests/test_octodns_provider_route53.py
index a2b61e7..b3e5ba4 100644
--- a/tests/test_octodns_provider_route53.py
+++ b/tests/test_octodns_provider_route53.py
@@ -394,6 +394,139 @@ class TestRoute53Provider(TestCase):
return (provider, stubber)
+ def test_process_desired_zone(self):
+ provider, stubber = self._get_stubbed_fallback_auth_provider()
+
+ # No records, essentially a no-op
+ desired = Zone('unit.tests.', [])
+ got = provider._process_desired_zone(desired)
+ self.assertEquals(desired.records, got.records)
+
+ # Record without any geos
+ desired = Zone('unit.tests.', [])
+ record = Record.new(desired, 'a', {
+ 'ttl': 30,
+ 'type': 'A',
+ 'value': '1.2.3.4',
+ 'dynamic': {
+ 'pools': {
+ 'one': {
+ 'values': [{
+ 'value': '2.2.3.4',
+ }],
+ },
+ },
+ 'rules': [{
+ 'pool': 'one',
+ }],
+ },
+ })
+ desired.add_record(record)
+ got = provider._process_desired_zone(desired)
+ self.assertEquals(desired.records, got.records)
+ self.assertEquals(1, len(list(got.records)[0].dynamic.rules))
+ self.assertFalse('geos' in list(got.records)[0].dynamic.rules[0].data)
+
+ # Record where all geos are supported
+ desired = Zone('unit.tests.', [])
+ record = Record.new(desired, 'a', {
+ 'ttl': 30,
+ 'type': 'A',
+ 'value': '1.2.3.4',
+ 'dynamic': {
+ 'pools': {
+ 'one': {
+ 'values': [{
+ 'value': '1.2.3.4',
+ }],
+ },
+ 'two': {
+ 'values': [{
+ 'value': '2.2.3.4',
+ }],
+ },
+ },
+ 'rules': [{
+ 'geos': ['EU', 'NA-US-OR'],
+ 'pool': 'two',
+ }, {
+ 'pool': 'one',
+ }],
+ },
+ })
+ desired.add_record(record)
+ got = provider._process_desired_zone(desired)
+ self.assertEquals(2, len(list(got.records)[0].dynamic.rules))
+ self.assertEquals(['EU', 'NA-US-OR'],
+ list(got.records)[0].dynamic.rules[0].data['geos'])
+ self.assertFalse('geos' in list(got.records)[0].dynamic.rules[1].data)
+
+ # Record with NA-CA-* only rule which is removed
+ desired = Zone('unit.tests.', [])
+ record = Record.new(desired, 'a', {
+ 'ttl': 30,
+ 'type': 'A',
+ 'value': '1.2.3.4',
+ 'dynamic': {
+ 'pools': {
+ 'one': {
+ 'values': [{
+ 'value': '1.2.3.4',
+ }],
+ },
+ 'two': {
+ 'values': [{
+ 'value': '2.2.3.4',
+ }],
+ },
+ },
+ 'rules': [{
+ 'geos': ['NA-CA-BC'],
+ 'pool': 'two',
+ }, {
+ 'pool': 'one',
+ }],
+ },
+ })
+ desired.add_record(record)
+ got = provider._process_desired_zone(desired)
+ self.assertEquals(1, len(list(got.records)[0].dynamic.rules))
+ self.assertFalse('geos' in list(got.records)[0].dynamic.rules[0].data)
+
+ # Record with NA-CA-* rule combined with other geos, filtered
+ desired = Zone('unit.tests.', [])
+ record = Record.new(desired, 'a', {
+ 'ttl': 30,
+ 'type': 'A',
+ 'value': '1.2.3.4',
+ 'dynamic': {
+ 'pools': {
+ 'one': {
+ 'values': [{
+ 'value': '1.2.3.4',
+ }],
+ },
+ 'two': {
+ 'values': [{
+ 'value': '2.2.3.4',
+ }],
+ },
+ },
+ 'rules': [{
+ 'geos': ['EU', 'NA-CA-NB', 'NA-US-OR'],
+ 'pool': 'two',
+ }, {
+ 'pool': 'one',
+ }],
+ },
+ })
+ desired.add_record(record)
+ got = provider._process_desired_zone(desired)
+ self.assertEquals(2, len(list(got.records)[0].dynamic.rules))
+ self.assertEquals(['EU', 'NA-US-OR'],
+ list(got.records)[0].dynamic.rules[0].data['geos'])
+ self.assertFalse('geos' in list(got.records)[0].dynamic.rules[1].data)
+
def test_populate_with_fallback(self):
provider, stubber = self._get_stubbed_fallback_auth_provider()
@@ -1166,6 +1299,31 @@ class TestRoute53Provider(TestCase):
})
stubber.add_response('change_tags_for_resource', {})
+ health_check_config = {
+ 'EnableSNI': False,
+ 'FailureThreshold': 6,
+ 'FullyQualifiedDomainName': '4.2.3.4',
+ 'IPAddress': '4.2.3.4',
+ 'MeasureLatency': True,
+ 'Port': 8080,
+ 'RequestInterval': 10,
+ 'ResourcePath': '/_status',
+ 'Type': 'HTTP'
+ }
+ stubber.add_response('create_health_check', {
+ 'HealthCheck': {
+ 'Id': '43',
+ 'CallerReference': self.caller_ref,
+ 'HealthCheckConfig': health_check_config,
+ 'HealthCheckVersion': 1,
+ },
+ 'Location': 'http://url',
+ }, {
+ 'CallerReference': ANY,
+ 'HealthCheckConfig': health_check_config,
+ })
+ stubber.add_response('change_tags_for_resource', {})
+
record = Record.new(self.expected, '', {
'ttl': 61,
'type': 'A',
@@ -1191,6 +1349,11 @@ class TestRoute53Provider(TestCase):
# when allowed to create we do
id = provider.get_health_check_id(record, value, True)
self.assertEquals('42', id)
+
+ # when allowed to create and when host is None
+ record._octodns['healthcheck']['host'] = None
+ id = provider.get_health_check_id(record, value, True)
+ self.assertEquals('43', id)
stubber.assert_no_pending_responses()
# A CNAME style healthcheck, without a value
@@ -1962,6 +2125,163 @@ class TestRoute53Provider(TestCase):
self.assertEquals(1, len(extra))
stubber.assert_no_pending_responses()
+ def test_extra_change_dynamic_has_health_check_cname(self):
+ provider, stubber = self._get_stubbed_provider()
+
+ list_hosted_zones_resp = {
+ 'HostedZones': [{
+ 'Name': 'unit.tests.',
+ 'Id': 'z42',
+ 'CallerReference': 'abc',
+ }],
+ 'Marker': 'm',
+ 'IsTruncated': False,
+ 'MaxItems': '100',
+ }
+ stubber.add_response('list_hosted_zones', list_hosted_zones_resp, {})
+
+ # record with geo and no health check returns change
+ desired = Zone('unit.tests.', [])
+ record = Record.new(desired, 'cname', {
+ 'ttl': 30,
+ 'type': 'CNAME',
+ 'value': 'cname.unit.tests.',
+ 'dynamic': {
+ 'pools': {
+ 'one': {
+ 'values': [{
+ 'value': 'one.cname.unit.tests.',
+ }],
+ },
+ },
+ 'rules': [{
+ 'pool': 'one',
+ }],
+ },
+ })
+ desired.add_record(record)
+ list_resource_record_sets_resp = {
+ 'ResourceRecordSets': [{
+ # Not dynamic value and other name
+ 'Name': 'unit.tests.',
+ 'Type': 'CNAME',
+ 'GeoLocation': {
+ 'CountryCode': '*',
+ },
+ 'ResourceRecords': [{
+ 'Value': 'cname.unit.tests.',
+ }],
+ 'TTL': 61,
+ # All the non-matches have a different Id so we'll fail if they
+ # match
+ 'HealthCheckId': '33',
+ }, {
+ # Not dynamic value, matching name, other type
+ 'Name': 'cname.unit.tests.',
+ 'Type': 'AAAA',
+ 'ResourceRecords': [{
+ 'Value': '2001:0db8:3c4d:0015:0000:0000:1a2f:1a4b'
+ }],
+ 'TTL': 61,
+ 'HealthCheckId': '33',
+ }, {
+ # default value pool
+ 'Name': '_octodns-default-value.cname.unit.tests.',
+ 'Type': 'CNAME',
+ 'GeoLocation': {
+ 'CountryCode': '*',
+ },
+ 'ResourceRecords': [{
+ 'Value': 'cname.unit.tests.',
+ }],
+ 'TTL': 61,
+ 'HealthCheckId': '33',
+ }, {
+ # different record
+ 'Name': '_octodns-two-value.other.unit.tests.',
+ 'Type': 'CNAME',
+ 'GeoLocation': {
+ 'CountryCode': '*',
+ },
+ 'ResourceRecords': [{
+ 'Value': 'cname.unit.tests.',
+ }],
+ 'TTL': 61,
+ 'HealthCheckId': '33',
+ }, {
+ # same everything, but different type
+ 'Name': '_octodns-one-value.cname.unit.tests.',
+ 'Type': 'AAAA',
+ 'ResourceRecords': [{
+ 'Value': '2001:0db8:3c4d:0015:0000:0000:1a2f:1a4b'
+ }],
+ 'TTL': 61,
+ 'HealthCheckId': '33',
+ }, {
+ # same everything, sub
+ 'Name': '_octodns-one-value.sub.cname.unit.tests.',
+ 'Type': 'CNAME',
+ 'ResourceRecords': [{
+ 'Value': 'cname.unit.tests.',
+ }],
+ 'TTL': 61,
+ 'HealthCheckId': '33',
+ }, {
+ # match
+ 'Name': '_octodns-one-value.cname.unit.tests.',
+ 'Type': 'CNAME',
+ 'ResourceRecords': [{
+ 'Value': 'one.cname.unit.tests.',
+ }],
+ 'TTL': 61,
+ 'HealthCheckId': '42',
+ }],
+ 'IsTruncated': False,
+ 'MaxItems': '100',
+ }
+ stubber.add_response('list_resource_record_sets',
+ list_resource_record_sets_resp,
+ {'HostedZoneId': 'z42'})
+
+ stubber.add_response('list_health_checks', {
+ 'HealthChecks': [{
+ 'Id': '42',
+ 'CallerReference': self.caller_ref,
+ 'HealthCheckConfig': {
+ 'Type': 'HTTPS',
+ 'FullyQualifiedDomainName': 'one.cname.unit.tests.',
+ 'ResourcePath': '/_dns',
+ 'Type': 'HTTPS',
+ 'Port': 443,
+ 'MeasureLatency': True,
+ 'RequestInterval': 10,
+ },
+ 'HealthCheckVersion': 2,
+ }],
+ 'IsTruncated': False,
+ 'MaxItems': '100',
+ 'Marker': '',
+ })
+ extra = provider._extra_changes(desired=desired, changes=[])
+ self.assertEquals(0, len(extra))
+ stubber.assert_no_pending_responses()
+
+ # change b/c of healthcheck path
+ record._octodns['healthcheck'] = {
+ 'path': '/_ready'
+ }
+ extra = provider._extra_changes(desired=desired, changes=[])
+ self.assertEquals(1, len(extra))
+ stubber.assert_no_pending_responses()
+
+ # no change b/c healthcheck host ignored for dynamic cname
+ record._octodns['healthcheck'] = {
+ 'host': 'foo.bar.io'
+ }
+ extra = provider._extra_changes(desired=desired, changes=[])
+ self.assertEquals(0, len(extra))
+ stubber.assert_no_pending_responses()
+
def _get_test_plan(self, max_changes):
provider = Route53Provider('test', 'abc', '123', max_changes)
diff --git a/tests/test_octodns_provider_transip.py b/tests/test_octodns_provider_transip.py
index f792085..234c95e 100644
--- a/tests/test_octodns_provider_transip.py
+++ b/tests/test_octodns_provider_transip.py
@@ -56,10 +56,11 @@ class MockDomainService(DomainService):
_dns_entries.extend(entries_for(name, record))
- # NS is not supported as a DNS Entry,
- # so it should cover the if statement
+ # Add a non-supported type
+ # so it triggers the "is supported" (transip.py:115) check and
+ # give 100% code coverage
_dns_entries.append(
- DnsEntry('@', '3600', 'NS', 'ns01.transip.nl.'))
+ DnsEntry('@', '3600', 'BOGUS', 'ns01.transip.nl.'))
self.mockupEntries = _dns_entries
@@ -222,7 +223,7 @@ N4OiVz1I3rbZGYa396lpxO6ku8yCglisL1yrSP6DdEUp66ntpKVd
provider._client = MockDomainService('unittest', self.bogus_key)
plan = provider.plan(_expected)
- self.assertEqual(12, plan.change_counts['Create'])
+ self.assertEqual(15, plan.change_counts['Create'])
self.assertEqual(0, plan.change_counts['Update'])
self.assertEqual(0, plan.change_counts['Delete'])
@@ -235,7 +236,7 @@ N4OiVz1I3rbZGYa396lpxO6ku8yCglisL1yrSP6DdEUp66ntpKVd
provider = TransipProvider('test', 'unittest', self.bogus_key)
provider._client = MockDomainService('unittest', self.bogus_key)
plan = provider.plan(_expected)
- self.assertEqual(12, len(plan.changes))
+ self.assertEqual(15, len(plan.changes))
changes = provider.apply(plan)
self.assertEqual(changes, len(plan.changes))
diff --git a/tests/test_octodns_provider_ultra.py b/tests/test_octodns_provider_ultra.py
index 43eac3c..a22a489 100644
--- a/tests/test_octodns_provider_ultra.py
+++ b/tests/test_octodns_provider_ultra.py
@@ -1,8 +1,11 @@
+from __future__ import unicode_literals
+
from mock import Mock, call
from os.path import dirname, join
from requests import HTTPError
from requests_mock import ANY, mock as requests_mock
from six import text_type
+from six.moves.urllib import parse
from unittest import TestCase
from json import load as json_load
@@ -55,7 +58,8 @@ class TestUltraProvider(TestCase):
self.assertEquals(1, mock.call_count)
expected_payload = "grant_type=password&username=user&"\
"password=rightpass"
- self.assertEquals(mock.last_request.text, expected_payload)
+ self.assertEquals(parse.parse_qs(mock.last_request.text),
+ parse.parse_qs(expected_payload))
def test_get_zones(self):
provider = _get_provider()
@@ -274,7 +278,7 @@ class TestUltraProvider(TestCase):
self.assertTrue(provider.populate(zone))
self.assertEquals('octodns1.test.', zone.name)
- self.assertEquals(11, len(zone.records))
+ self.assertEquals(12, len(zone.records))
self.assertEquals(4, mock.call_count)
def test_apply(self):
@@ -285,12 +289,12 @@ class TestUltraProvider(TestCase):
provider._request.side_effect = [
UltraNoZonesExistException('No Zones'),
None, # zone create
- ] + [None] * 13 # individual record creates
+ ] + [None] * 15 # individual record creates
# non-existent zone, create everything
plan = provider.plan(self.expected)
- self.assertEquals(13, len(plan.changes))
- self.assertEquals(13, provider.apply(plan))
+ self.assertEquals(15, len(plan.changes))
+ self.assertEquals(15, provider.apply(plan))
self.assertFalse(plan.exists)
provider._request.assert_has_calls([
@@ -320,7 +324,7 @@ class TestUltraProvider(TestCase):
'p=A/kinda+of/long/string+with+numb3rs']}),
], True)
# expected number of total calls
- self.assertEquals(15, provider._request.call_count)
+ self.assertEquals(17, provider._request.call_count)
# Create sample rrset payload to attempt to alter
page1 = json_load(open('tests/fixtures/ultra-records-page-1.json'))
@@ -352,8 +356,8 @@ class TestUltraProvider(TestCase):
}))
plan = provider.plan(wanted)
- self.assertEquals(10, len(plan.changes))
- self.assertEquals(10, provider.apply(plan))
+ self.assertEquals(11, len(plan.changes))
+ self.assertEquals(11, provider.apply(plan))
self.assertTrue(plan.exists)
provider._request.assert_has_calls([
@@ -492,6 +496,15 @@ class TestUltraProvider(TestCase):
Record.new(zone, 'txt',
{'ttl': 60, 'type': 'TXT',
'values': ['abc', 'def']})),
+
+ # ALIAS
+ ('', 'ALIAS',
+ '/v2/zones/unit.tests./rrsets/APEXALIAS/unit.tests.',
+ {'ttl': 60, 'rdata': ['target.unit.tests.']},
+ Record.new(zone, '',
+ {'ttl': 60, 'type': 'ALIAS',
+ 'value': 'target.unit.tests.'})),
+
):
# Validate path and payload based on record meet expectations
path, payload = provider._gen_data(expected_record)
diff --git a/tests/test_octodns_provider_yaml.py b/tests/test_octodns_provider_yaml.py
index f858c05..7e4f6f7 100644
--- a/tests/test_octodns_provider_yaml.py
+++ b/tests/test_octodns_provider_yaml.py
@@ -35,10 +35,10 @@ class TestYamlProvider(TestCase):
# without it we see everything
source.populate(zone)
- self.assertEquals(18, len(zone.records))
+ self.assertEquals(23, len(zone.records))
source.populate(dynamic_zone)
- self.assertEquals(5, len(dynamic_zone.records))
+ self.assertEquals(6, len(dynamic_zone.records))
# Assumption here is that a clean round-trip means that everything
# worked as expected, data that went in came back out and could be
@@ -58,21 +58,21 @@ class TestYamlProvider(TestCase):
# We add everything
plan = target.plan(zone)
- self.assertEquals(15, len([c for c in plan.changes
+ self.assertEquals(20, len([c for c in plan.changes
if isinstance(c, Create)]))
self.assertFalse(isfile(yaml_file))
# Now actually do it
- self.assertEquals(15, target.apply(plan))
+ self.assertEquals(20, target.apply(plan))
self.assertTrue(isfile(yaml_file))
# Dynamic plan
plan = target.plan(dynamic_zone)
- self.assertEquals(5, len([c for c in plan.changes
+ self.assertEquals(6, len([c for c in plan.changes
if isinstance(c, Create)]))
self.assertFalse(isfile(dynamic_yaml_file))
# Apply it
- self.assertEquals(5, target.apply(plan))
+ self.assertEquals(6, target.apply(plan))
self.assertTrue(isfile(dynamic_yaml_file))
# There should be no changes after the round trip
@@ -87,7 +87,7 @@ class TestYamlProvider(TestCase):
# A 2nd sync should still create everything
plan = target.plan(zone)
- self.assertEquals(15, len([c for c in plan.changes
+ self.assertEquals(20, len([c for c in plan.changes
if isinstance(c, Create)]))
with open(yaml_file) as fh:
@@ -106,9 +106,14 @@ class TestYamlProvider(TestCase):
self.assertTrue('values' in data.pop('naptr'))
self.assertTrue('values' in data.pop('sub'))
self.assertTrue('values' in data.pop('txt'))
+ self.assertTrue('values' in data.pop('loc'))
+ self.assertTrue('values' in data.pop('urlfwd'))
# these are stored as singular 'value'
+ self.assertTrue('value' in data.pop('_imap._tcp'))
+ self.assertTrue('value' in data.pop('_pop3._tcp'))
self.assertTrue('value' in data.pop('aaaa'))
self.assertTrue('value' in data.pop('cname'))
+ self.assertTrue('value' in data.pop('dname'))
self.assertTrue('value' in data.pop('included'))
self.assertTrue('value' in data.pop('ptr'))
self.assertTrue('value' in data.pop('spf'))
@@ -144,6 +149,10 @@ class TestYamlProvider(TestCase):
self.assertTrue('value' in dyna)
# self.assertTrue('dynamic' in dyna)
+ dyna = data.pop('pool-only-in-fallback')
+ self.assertTrue('value' in dyna)
+ # self.assertTrue('dynamic' in dyna)
+
# make sure nothing is left
self.assertEquals([], list(data.keys()))
@@ -206,18 +215,20 @@ class TestSplitYamlProvider(TestCase):
def test_zone_directory(self):
source = SplitYamlProvider(
- 'test', join(dirname(__file__), 'config/split'))
+ 'test', join(dirname(__file__), 'config/split'),
+ extension='.tst')
zone = Zone('unit.tests.', [])
self.assertEqual(
- join(dirname(__file__), 'config/split/unit.tests.'),
+ join(dirname(__file__), 'config/split', 'unit.tests.tst'),
source._zone_directory(zone))
def test_apply_handles_existing_zone_directory(self):
with TemporaryDirectory() as td:
- provider = SplitYamlProvider('test', join(td.dirname, 'config'))
- makedirs(join(td.dirname, 'config', 'does.exist.'))
+ provider = SplitYamlProvider('test', join(td.dirname, 'config'),
+ extension='.tst')
+ makedirs(join(td.dirname, 'config', 'does.exist.tst'))
zone = Zone('does.exist.', [])
self.assertTrue(isdir(provider._zone_directory(zone)))
@@ -226,7 +237,8 @@ class TestSplitYamlProvider(TestCase):
def test_provider(self):
source = SplitYamlProvider(
- 'test', join(dirname(__file__), 'config/split'))
+ 'test', join(dirname(__file__), 'config/split'),
+ extension='.tst')
zone = Zone('unit.tests.', [])
dynamic_zone = Zone('dynamic.tests.', [])
@@ -237,7 +249,7 @@ class TestSplitYamlProvider(TestCase):
# without it we see everything
source.populate(zone)
- self.assertEquals(18, len(zone.records))
+ self.assertEquals(20, len(zone.records))
source.populate(dynamic_zone)
self.assertEquals(5, len(dynamic_zone.records))
@@ -245,18 +257,19 @@ class TestSplitYamlProvider(TestCase):
with TemporaryDirectory() as td:
# Add some subdirs to make sure that it can create them
directory = join(td.dirname, 'sub', 'dir')
- zone_dir = join(directory, 'unit.tests.')
- dynamic_zone_dir = join(directory, 'dynamic.tests.')
- target = SplitYamlProvider('test', directory)
+ zone_dir = join(directory, 'unit.tests.tst')
+ dynamic_zone_dir = join(directory, 'dynamic.tests.tst')
+ target = SplitYamlProvider('test', directory,
+ extension='.tst')
# We add everything
plan = target.plan(zone)
- self.assertEquals(15, len([c for c in plan.changes
+ self.assertEquals(17, len([c for c in plan.changes
if isinstance(c, Create)]))
self.assertFalse(isdir(zone_dir))
# Now actually do it
- self.assertEquals(15, target.apply(plan))
+ self.assertEquals(17, target.apply(plan))
# Dynamic plan
plan = target.plan(dynamic_zone)
@@ -279,7 +292,7 @@ class TestSplitYamlProvider(TestCase):
# A 2nd sync should still create everything
plan = target.plan(zone)
- self.assertEquals(15, len([c for c in plan.changes
+ self.assertEquals(17, len([c for c in plan.changes
if isinstance(c, Create)]))
yaml_file = join(zone_dir, '$unit.tests.yaml')
@@ -294,7 +307,8 @@ class TestSplitYamlProvider(TestCase):
# These records are stored as plural "values." Check each file to
# ensure correctness.
- for record_name in ('_srv._tcp', 'mx', 'naptr', 'sub', 'txt'):
+ for record_name in ('_srv._tcp', 'mx', 'naptr', 'sub', 'txt',
+ 'urlfwd'):
yaml_file = join(zone_dir, '{}.yaml'.format(record_name))
self.assertTrue(isfile(yaml_file))
with open(yaml_file) as fh:
@@ -302,8 +316,8 @@ class TestSplitYamlProvider(TestCase):
self.assertTrue('values' in data.pop(record_name))
# These are stored as singular "value." Again, check each file.
- for record_name in ('aaaa', 'cname', 'included', 'ptr', 'spf',
- 'www.sub', 'www'):
+ for record_name in ('aaaa', 'cname', 'dname', 'included', 'ptr',
+ 'spf', 'www.sub', 'www'):
yaml_file = join(zone_dir, '{}.yaml'.format(record_name))
self.assertTrue(isfile(yaml_file))
with open(yaml_file) as fh:
@@ -334,7 +348,8 @@ class TestSplitYamlProvider(TestCase):
def test_empty(self):
source = SplitYamlProvider(
- 'test', join(dirname(__file__), 'config/split'))
+ 'test', join(dirname(__file__), 'config/split'),
+ extension='.tst')
zone = Zone('empty.', [])
@@ -344,7 +359,8 @@ class TestSplitYamlProvider(TestCase):
def test_unsorted(self):
source = SplitYamlProvider(
- 'test', join(dirname(__file__), 'config/split'))
+ 'test', join(dirname(__file__), 'config/split'),
+ extension='.tst')
zone = Zone('unordered.', [])
@@ -355,14 +371,15 @@ class TestSplitYamlProvider(TestCase):
source = SplitYamlProvider(
'test', join(dirname(__file__), 'config/split'),
- enforce_order=False)
+ extension='.tst', enforce_order=False)
# no exception
source.populate(zone)
self.assertEqual(2, len(zone.records))
def test_subzone_handling(self):
source = SplitYamlProvider(
- 'test', join(dirname(__file__), 'config/split'))
+ 'test', join(dirname(__file__), 'config/split'),
+ extension='.tst')
# If we add `sub` as a sub-zone we'll reject `www.sub`
zone = Zone('unit.tests.', ['sub'])
@@ -386,8 +403,8 @@ class TestOverridingYamlProvider(TestCase):
# Load the base, should see the 5 records
base.populate(zone)
got = {r.name: r for r in zone.records}
- self.assertEquals(5, len(got))
- # We get the "dynamic" A from the bae config
+ self.assertEquals(6, len(got))
+ # We get the "dynamic" A from the base config
self.assertTrue('dynamic' in got['a'].data)
# No added
self.assertFalse('added' in got)
@@ -395,7 +412,7 @@ class TestOverridingYamlProvider(TestCase):
# Load the overrides, should replace one and add 1
override.populate(zone)
got = {r.name: r for r in zone.records}
- self.assertEquals(6, len(got))
+ self.assertEquals(7, len(got))
# 'a' was replaced with a generic record
self.assertEquals({
'ttl': 3600,
diff --git a/tests/test_octodns_record.py b/tests/test_octodns_record.py
index 08a3e7a..c848853 100644
--- a/tests/test_octodns_record.py
+++ b/tests/test_octodns_record.py
@@ -9,10 +9,11 @@ from six import text_type
from unittest import TestCase
from octodns.record import ARecord, AaaaRecord, AliasRecord, CaaRecord, \
- CaaValue, CnameRecord, Create, Delete, GeoValue, MxRecord, MxValue, \
- NaptrRecord, NaptrValue, NsRecord, PtrRecord, Record, SshfpRecord, \
- SshfpValue, SpfRecord, SrvRecord, SrvValue, TxtRecord, Update, \
- ValidationError, _Dynamic, _DynamicPool, _DynamicRule
+ CaaValue, CnameRecord, DnameRecord, Create, Delete, GeoValue, LocRecord, \
+ LocValue, MxRecord, MxValue, NaptrRecord, NaptrValue, NsRecord, \
+ PtrRecord, Record, SshfpRecord, SshfpValue, SpfRecord, SrvRecord, \
+ SrvValue, TxtRecord, Update, UrlfwdRecord, UrlfwdValue, ValidationError, \
+ _Dynamic, _DynamicPool, _DynamicRule
from octodns.zone import Zone
from helpers import DynamicProvider, GeoProvider, SimpleProvider
@@ -55,6 +56,19 @@ class TestRecord(TestCase):
})
self.assertEquals(upper_record.value, lower_record.value)
+ def test_dname_lowering_value(self):
+ upper_record = DnameRecord(self.zone, 'DnameUppwerValue', {
+ 'ttl': 30,
+ 'type': 'DNAME',
+ 'value': 'GITHUB.COM',
+ })
+ lower_record = DnameRecord(self.zone, 'DnameLowerValue', {
+ 'ttl': 30,
+ 'type': 'DNAME',
+ 'value': 'github.com',
+ })
+ self.assertEquals(upper_record.value, lower_record.value)
+
def test_ptr_lowering_value(self):
upper_record = PtrRecord(self.zone, 'PtrUppwerValue', {
'ttl': 30,
@@ -245,11 +259,21 @@ class TestRecord(TestCase):
self.assertEquals(b_data, b.data)
def test_aaaa(self):
- a_values = ['2001:0db8:3c4d:0015:0000:0000:1a2f:1a2b',
- '2001:0db8:3c4d:0015:0000:0000:1a2f:1a3b']
- b_value = '2001:0db8:3c4d:0015:0000:0000:1a2f:1a4b'
+ a_values = ['2001:db8:3c4d:15::1a2f:1a2b',
+ '2001:db8:3c4d:15::1a2f:1a3b']
+ b_value = '2001:db8:3c4d:15::1a2f:1a4b'
self.assertMultipleValues(AaaaRecord, a_values, b_value)
+ # Specifically validate that we normalize IPv6 addresses
+ values = ['2001:db8:3c4d:15:0000:0000:1a2f:1a2b',
+ '2001:0db8:3c4d:0015::1a2f:1a3b']
+ data = {
+ 'ttl': 30,
+ 'values': values,
+ }
+ record = AaaaRecord(self.zone, 'aaaa', data)
+ self.assertEquals(a_values, record.values)
+
def assertSingleValue(self, _type, a_value, b_value):
a_data = {'ttl': 30, 'value': a_value}
a = _type(self.zone, 'a', a_data)
@@ -362,6 +386,102 @@ class TestRecord(TestCase):
self.assertSingleValue(CnameRecord, 'target.foo.com.',
'other.foo.com.')
+ def test_dname(self):
+ self.assertSingleValue(DnameRecord, 'target.foo.com.',
+ 'other.foo.com.')
+
+ def test_loc(self):
+ a_values = [{
+ 'lat_degrees': 31,
+ 'lat_minutes': 58,
+ 'lat_seconds': 52.1,
+ 'lat_direction': 'S',
+ 'long_degrees': 115,
+ 'long_minutes': 49,
+ 'long_seconds': 11.7,
+ 'long_direction': 'E',
+ 'altitude': 20,
+ 'size': 10,
+ 'precision_horz': 10,
+ 'precision_vert': 2,
+ }]
+ a_data = {'ttl': 30, 'values': a_values}
+ a = LocRecord(self.zone, 'a', a_data)
+ self.assertEquals('a', a.name)
+ self.assertEquals('a.unit.tests.', a.fqdn)
+ self.assertEquals(30, a.ttl)
+ self.assertEquals(a_values[0]['lat_degrees'], a.values[0].lat_degrees)
+ self.assertEquals(a_values[0]['lat_minutes'], a.values[0].lat_minutes)
+ self.assertEquals(a_values[0]['lat_seconds'], a.values[0].lat_seconds)
+ self.assertEquals(a_values[0]['lat_direction'],
+ a.values[0].lat_direction)
+ self.assertEquals(a_values[0]['long_degrees'],
+ a.values[0].long_degrees)
+ self.assertEquals(a_values[0]['long_minutes'],
+ a.values[0].long_minutes)
+ self.assertEquals(a_values[0]['long_seconds'],
+ a.values[0].long_seconds)
+ self.assertEquals(a_values[0]['long_direction'],
+ a.values[0].long_direction)
+ self.assertEquals(a_values[0]['altitude'], a.values[0].altitude)
+ self.assertEquals(a_values[0]['size'], a.values[0].size)
+ self.assertEquals(a_values[0]['precision_horz'],
+ a.values[0].precision_horz)
+ self.assertEquals(a_values[0]['precision_vert'],
+ a.values[0].precision_vert)
+
+ b_value = {
+ 'lat_degrees': 32,
+ 'lat_minutes': 7,
+ 'lat_seconds': 19,
+ 'lat_direction': 'S',
+ 'long_degrees': 116,
+ 'long_minutes': 2,
+ 'long_seconds': 25,
+ 'long_direction': 'E',
+ 'altitude': 10,
+ 'size': 1,
+ 'precision_horz': 10000,
+ 'precision_vert': 10,
+ }
+ b_data = {'ttl': 30, 'value': b_value}
+ b = LocRecord(self.zone, 'b', b_data)
+ self.assertEquals(b_value['lat_degrees'], b.values[0].lat_degrees)
+ self.assertEquals(b_value['lat_minutes'], b.values[0].lat_minutes)
+ self.assertEquals(b_value['lat_seconds'], b.values[0].lat_seconds)
+ self.assertEquals(b_value['lat_direction'], b.values[0].lat_direction)
+ self.assertEquals(b_value['long_degrees'], b.values[0].long_degrees)
+ self.assertEquals(b_value['long_minutes'], b.values[0].long_minutes)
+ self.assertEquals(b_value['long_seconds'], b.values[0].long_seconds)
+ self.assertEquals(b_value['long_direction'],
+ b.values[0].long_direction)
+ self.assertEquals(b_value['altitude'], b.values[0].altitude)
+ self.assertEquals(b_value['size'], b.values[0].size)
+ self.assertEquals(b_value['precision_horz'],
+ b.values[0].precision_horz)
+ self.assertEquals(b_value['precision_vert'],
+ b.values[0].precision_vert)
+ self.assertEquals(b_data, b.data)
+
+ target = SimpleProvider()
+ # No changes with self
+ self.assertFalse(a.changes(a, target))
+ # Diff in lat_direction causes change
+ other = LocRecord(self.zone, 'a', {'ttl': 30, 'values': a_values})
+ other.values[0].lat_direction = 'N'
+ change = a.changes(other, target)
+ self.assertEqual(change.existing, a)
+ self.assertEqual(change.new, other)
+ # Diff in altitude causes change
+ other.values[0].altitude = a.values[0].altitude
+ other.values[0].altitude = -10
+ change = a.changes(other, target)
+ self.assertEqual(change.existing, a)
+ self.assertEqual(change.new, other)
+
+ # __repr__ doesn't blow up
+ a.__repr__()
+
def test_mx(self):
a_values = [{
'preference': 10,
@@ -774,6 +894,112 @@ class TestRecord(TestCase):
b_value = 'b other'
self.assertMultipleValues(TxtRecord, a_values, b_value)
+ def test_urlfwd(self):
+ a_values = [{
+ 'path': '/',
+ 'target': 'http://foo',
+ 'code': 301,
+ 'masking': 2,
+ 'query': 0,
+ }, {
+ 'path': '/target',
+ 'target': 'http://target',
+ 'code': 302,
+ 'masking': 2,
+ 'query': 0,
+ }]
+ a_data = {'ttl': 30, 'values': a_values}
+ a = UrlfwdRecord(self.zone, 'a', a_data)
+ self.assertEquals('a', a.name)
+ self.assertEquals('a.unit.tests.', a.fqdn)
+ self.assertEquals(30, a.ttl)
+ self.assertEquals(a_values[0]['path'], a.values[0].path)
+ self.assertEquals(a_values[0]['target'], a.values[0].target)
+ self.assertEquals(a_values[0]['code'], a.values[0].code)
+ self.assertEquals(a_values[0]['masking'], a.values[0].masking)
+ self.assertEquals(a_values[0]['query'], a.values[0].query)
+ self.assertEquals(a_values[1]['path'], a.values[1].path)
+ self.assertEquals(a_values[1]['target'], a.values[1].target)
+ self.assertEquals(a_values[1]['code'], a.values[1].code)
+ self.assertEquals(a_values[1]['masking'], a.values[1].masking)
+ self.assertEquals(a_values[1]['query'], a.values[1].query)
+ self.assertEquals(a_data, a.data)
+
+ b_value = {
+ 'path': '/',
+ 'target': 'http://location',
+ 'code': 301,
+ 'masking': 2,
+ 'query': 0,
+ }
+ b_data = {'ttl': 30, 'value': b_value}
+ b = UrlfwdRecord(self.zone, 'b', b_data)
+ self.assertEquals(b_value['path'], b.values[0].path)
+ self.assertEquals(b_value['target'], b.values[0].target)
+ self.assertEquals(b_value['code'], b.values[0].code)
+ self.assertEquals(b_value['masking'], b.values[0].masking)
+ self.assertEquals(b_value['query'], b.values[0].query)
+ self.assertEquals(b_data, b.data)
+
+ target = SimpleProvider()
+ # No changes with self
+ self.assertFalse(a.changes(a, target))
+ # Diff in path causes change
+ other = UrlfwdRecord(self.zone, 'a', {'ttl': 30, 'values': a_values})
+ other.values[0].path = '/change'
+ change = a.changes(other, target)
+ self.assertEqual(change.existing, a)
+ self.assertEqual(change.new, other)
+ # Diff in target causes change
+ other = UrlfwdRecord(self.zone, 'a', {'ttl': 30, 'values': a_values})
+ other.values[0].target = 'http://target'
+ change = a.changes(other, target)
+ self.assertEqual(change.existing, a)
+ self.assertEqual(change.new, other)
+ # Diff in code causes change
+ other = UrlfwdRecord(self.zone, 'a', {'ttl': 30, 'values': a_values})
+ other.values[0].code = 302
+ change = a.changes(other, target)
+ self.assertEqual(change.existing, a)
+ self.assertEqual(change.new, other)
+ # Diff in masking causes change
+ other = UrlfwdRecord(self.zone, 'a', {'ttl': 30, 'values': a_values})
+ other.values[0].masking = 0
+ change = a.changes(other, target)
+ self.assertEqual(change.existing, a)
+ self.assertEqual(change.new, other)
+ # Diff in query causes change
+ other = UrlfwdRecord(self.zone, 'a', {'ttl': 30, 'values': a_values})
+ other.values[0].query = 1
+ change = a.changes(other, target)
+ self.assertEqual(change.existing, a)
+ self.assertEqual(change.new, other)
+
+ # hash
+ v = UrlfwdValue({
+ 'path': '/',
+ 'target': 'http://place',
+ 'code': 301,
+ 'masking': 2,
+ 'query': 0,
+ })
+ o = UrlfwdValue({
+ 'path': '/location',
+ 'target': 'http://redirect',
+ 'code': 302,
+ 'masking': 2,
+ 'query': 0,
+ })
+ values = set()
+ values.add(v)
+ self.assertTrue(v in values)
+ self.assertFalse(o in values)
+ values.add(o)
+ self.assertTrue(o in values)
+
+ # __repr__ doesn't blow up
+ a.__repr__()
+
def test_record_new(self):
txt = Record.new(self.zone, 'txt', {
'ttl': 44,
@@ -796,6 +1022,39 @@ class TestRecord(TestCase):
})
self.assertTrue('Unknown record type' in text_type(ctx.exception))
+ def test_record_copy(self):
+ a = Record.new(self.zone, 'a', {
+ 'ttl': 44,
+ 'type': 'A',
+ 'value': '1.2.3.4',
+ })
+
+ # Identical copy.
+ b = a.copy()
+ self.assertIsInstance(b, ARecord)
+ self.assertEquals('unit.tests.', b.zone.name)
+ self.assertEquals('a', b.name)
+ self.assertEquals('A', b._type)
+ self.assertEquals(['1.2.3.4'], b.values)
+
+ # Copy with another zone object.
+ c_zone = Zone('other.tests.', [])
+ c = a.copy(c_zone)
+ self.assertIsInstance(c, ARecord)
+ self.assertEquals('other.tests.', c.zone.name)
+ self.assertEquals('a', c.name)
+ self.assertEquals('A', c._type)
+ self.assertEquals(['1.2.3.4'], c.values)
+
+ # Record with no record type specified in data.
+ d_data = {
+ 'ttl': 600,
+ 'values': ['just a test']
+ }
+ d = TxtRecord(self.zone, 'txt', d_data)
+ d.copy()
+ self.assertEquals('TXT', d._type)
+
def test_change(self):
existing = Record.new(self.zone, 'txt', {
'ttl': 44,
@@ -872,17 +1131,33 @@ class TestRecord(TestCase):
}
})
self.assertEquals('/_ready', new.healthcheck_path)
- self.assertEquals('bleep.bloop', new.healthcheck_host)
+ self.assertEquals('bleep.bloop', new.healthcheck_host())
self.assertEquals('HTTP', new.healthcheck_protocol)
self.assertEquals(8080, new.healthcheck_port)
+ # empty host value in healthcheck
+ new = Record.new(self.zone, 'a', {
+ 'ttl': 44,
+ 'type': 'A',
+ 'value': '1.2.3.4',
+ 'octodns': {
+ 'healthcheck': {
+ 'path': '/_ready',
+ 'host': None,
+ 'protocol': 'HTTP',
+ 'port': 8080,
+ }
+ }
+ })
+ self.assertEquals('1.2.3.4', new.healthcheck_host(value="1.2.3.4"))
+
new = Record.new(self.zone, 'a', {
'ttl': 44,
'type': 'A',
'value': '1.2.3.4',
})
self.assertEquals('/_dns', new.healthcheck_path)
- self.assertEquals('a.unit.tests', new.healthcheck_host)
+ self.assertEquals('a.unit.tests', new.healthcheck_host())
self.assertEquals('HTTPS', new.healthcheck_protocol)
self.assertEquals(443, new.healthcheck_port)
@@ -901,7 +1176,7 @@ class TestRecord(TestCase):
}
})
self.assertIsNone(new.healthcheck_path)
- self.assertIsNone(new.healthcheck_host)
+ self.assertIsNone(new.healthcheck_host())
self.assertEquals('TCP', new.healthcheck_protocol)
self.assertEquals(8080, new.healthcheck_port)
@@ -916,7 +1191,7 @@ class TestRecord(TestCase):
}
})
self.assertIsNone(new.healthcheck_path)
- self.assertIsNone(new.healthcheck_host)
+ self.assertIsNone(new.healthcheck_host())
self.assertEquals('TCP', new.healthcheck_protocol)
self.assertEquals(443, new.healthcheck_port)
@@ -1077,6 +1352,93 @@ class TestRecord(TestCase):
self.assertTrue(d >= d)
self.assertTrue(d <= d)
+ def test_loc_value(self):
+ a = LocValue({
+ 'lat_degrees': 31,
+ 'lat_minutes': 58,
+ 'lat_seconds': 52.1,
+ 'lat_direction': 'S',
+ 'long_degrees': 115,
+ 'long_minutes': 49,
+ 'long_seconds': 11.7,
+ 'long_direction': 'E',
+ 'altitude': 20,
+ 'size': 10,
+ 'precision_horz': 10,
+ 'precision_vert': 2,
+ })
+ b = LocValue({
+ 'lat_degrees': 32,
+ 'lat_minutes': 7,
+ 'lat_seconds': 19,
+ 'lat_direction': 'S',
+ 'long_degrees': 116,
+ 'long_minutes': 2,
+ 'long_seconds': 25,
+ 'long_direction': 'E',
+ 'altitude': 10,
+ 'size': 1,
+ 'precision_horz': 10000,
+ 'precision_vert': 10,
+ })
+ c = LocValue({
+ 'lat_degrees': 53,
+ 'lat_minutes': 14,
+ 'lat_seconds': 10,
+ 'lat_direction': 'N',
+ 'long_degrees': 2,
+ 'long_minutes': 18,
+ 'long_seconds': 26,
+ 'long_direction': 'W',
+ 'altitude': 10,
+ 'size': 1,
+ 'precision_horz': 1000,
+ 'precision_vert': 10,
+ })
+
+ self.assertEqual(a, a)
+ self.assertEqual(b, b)
+ self.assertEqual(c, c)
+
+ self.assertNotEqual(a, b)
+ self.assertNotEqual(a, c)
+ self.assertNotEqual(b, a)
+ self.assertNotEqual(b, c)
+ self.assertNotEqual(c, a)
+ self.assertNotEqual(c, b)
+
+ self.assertTrue(a < b)
+ self.assertTrue(a < c)
+
+ self.assertTrue(b > a)
+ self.assertTrue(b < c)
+
+ self.assertTrue(c > a)
+ self.assertTrue(c > b)
+
+ self.assertTrue(a <= b)
+ self.assertTrue(a <= c)
+ self.assertTrue(a <= a)
+ self.assertTrue(a >= a)
+
+ self.assertTrue(b >= a)
+ self.assertTrue(b <= c)
+ self.assertTrue(b >= b)
+ self.assertTrue(b <= b)
+
+ self.assertTrue(c >= a)
+ self.assertTrue(c >= b)
+ self.assertTrue(c >= c)
+ self.assertTrue(c <= c)
+
+ # Hash
+ values = set()
+ values.add(a)
+ self.assertTrue(a in values)
+ self.assertFalse(b in values)
+ values.add(b)
+ self.assertTrue(b in values)
+
def test_mx_value(self):
a = MxValue({'preference': 0, 'priority': 'a', 'exchange': 'v',
'value': '1'})
@@ -1265,7 +1627,7 @@ class TestRecordValidation(TestCase):
self.assertTrue(reason.endswith('.unit.tests." is too long at 254'
' chars, max is 253'))
- # label length, DNS defins max as 63
+ # label length, DNS defines max as 63
with self.assertRaises(ValidationError) as ctx:
# The . will put this over the edge
name = 'x' * 64
@@ -1275,10 +1637,30 @@ class TestRecordValidation(TestCase):
'value': '1.2.3.4',
})
reason = ctx.exception.reasons[0]
- self.assertTrue(reason.startswith('invalid name, "xxxx'))
+ self.assertTrue(reason.startswith('invalid label, "xxxx'))
self.assertTrue(reason.endswith('xxx" is too long at 64'
' chars, max is 63'))
+ with self.assertRaises(ValidationError) as ctx:
+ name = 'foo.' + 'x' * 64 + '.bar'
+ Record.new(self.zone, name, {
+ 'ttl': 300,
+ 'type': 'A',
+ 'value': '1.2.3.4',
+ })
+ reason = ctx.exception.reasons[0]
+ self.assertTrue(reason.startswith('invalid label, "xxxx'))
+ self.assertTrue(reason.endswith('xxx" is too long at 64'
+ ' chars, max is 63'))
+
+ # should not raise with dots
+ name = 'xxxxxxxx.' * 10
+ Record.new(self.zone, name, {
+ 'ttl': 300,
+ 'type': 'A',
+ 'value': '1.2.3.4',
+ })
+
# no ttl
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '', {
@@ -1693,6 +2075,16 @@ class TestRecordValidation(TestCase):
'value': 'foo.bar.com.',
})
+ # root only
+ with self.assertRaises(ValidationError) as ctx:
+ Record.new(self.zone, 'nope', {
+ 'type': 'ALIAS',
+ 'ttl': 600,
+ 'value': 'foo.bar.com.',
+ })
+ self.assertEquals(['non-root ALIAS not allowed'],
+ ctx.exception.reasons)
+
# missing value
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '', {
@@ -1703,7 +2095,7 @@ class TestRecordValidation(TestCase):
# missing value
with self.assertRaises(ValidationError) as ctx:
- Record.new(self.zone, 'www', {
+ Record.new(self.zone, '', {
'type': 'ALIAS',
'ttl': 600,
'value': None
@@ -1712,13 +2104,23 @@ class TestRecordValidation(TestCase):
# empty value
with self.assertRaises(ValidationError) as ctx:
- Record.new(self.zone, 'www', {
+ Record.new(self.zone, '', {
'type': 'ALIAS',
'ttl': 600,
'value': ''
})
self.assertEquals(['empty value'], ctx.exception.reasons)
+ # not a valid FQDN
+ with self.assertRaises(ValidationError) as ctx:
+ Record.new(self.zone, '', {
+ 'type': 'ALIAS',
+ 'ttl': 600,
+ 'value': '__.',
+ })
+ self.assertEquals(['ALIAS value "__." is not a valid FQDN'],
+ ctx.exception.reasons)
+
# missing trailing .
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, '', {
@@ -1815,6 +2217,16 @@ class TestRecordValidation(TestCase):
})
self.assertEquals(['root CNAME not allowed'], ctx.exception.reasons)
+ # not a valid FQDN
+ with self.assertRaises(ValidationError) as ctx:
+ Record.new(self.zone, 'www', {
+ 'type': 'CNAME',
+ 'ttl': 600,
+ 'value': '___.',
+ })
+ self.assertEquals(['CNAME value "___." is not a valid FQDN'],
+ ctx.exception.reasons)
+
# missing trailing .
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, 'www', {
@@ -1825,6 +2237,341 @@ class TestRecordValidation(TestCase):
self.assertEquals(['CNAME value "foo.bar.com" missing trailing .'],
ctx.exception.reasons)
+ def test_DNAME(self):
+ # A valid DNAME record.
+ Record.new(self.zone, 'sub', {
+ 'type': 'DNAME',
+ 'ttl': 600,
+ 'value': 'foo.bar.com.',
+ })
+
+ # A DNAME record can be present at the zone APEX.
+ Record.new(self.zone, '', {
+ 'type': 'DNAME',
+ 'ttl': 600,
+ 'value': 'foo.bar.com.',
+ })
+
+ # not a valid FQDN
+ with self.assertRaises(ValidationError) as ctx:
+ Record.new(self.zone, 'www', {
+ 'type': 'DNAME',
+ 'ttl': 600,
+ 'value': '.',
+ })
+ self.assertEquals(['DNAME value "." is not a valid FQDN'],
+ ctx.exception.reasons)
+
+ # missing trailing .
+ with self.assertRaises(ValidationError) as ctx:
+ Record.new(self.zone, 'www', {
+ 'type': 'DNAME',
+ 'ttl': 600,
+ 'value': 'foo.bar.com',
+ })
+ self.assertEquals(['DNAME value "foo.bar.com" missing trailing .'],
+ ctx.exception.reasons)
+
+ def test_LOC(self):
+ # doesn't blow up
+ Record.new(self.zone, '', {
+ 'type': 'LOC',
+ 'ttl': 600,
+ 'value': {
+ 'lat_degrees': 31,
+ 'lat_minutes': 58,
+ 'lat_seconds': 52.1,
+ 'lat_direction': 'S',
+ 'long_degrees': 115,
+ 'long_minutes': 49,
+ 'long_seconds': 11.7,
+ 'long_direction': 'E',
+ 'altitude': 20,
+ 'size': 10,
+ 'precision_horz': 10,
+ 'precision_vert': 2,
+ }
+ })
+
+ # missing int key
+ with self.assertRaises(ValidationError) as ctx:
+ Record.new(self.zone, '', {
+ 'type': 'LOC',
+ 'ttl': 600,
+ 'value': {
+ 'lat_minutes': 58,
+ 'lat_seconds': 52.1,
+ 'lat_direction': 'S',
+ 'long_degrees': 115,
+ 'long_minutes': 49,
+ 'long_seconds': 11.7,
+ 'long_direction': 'E',
+ 'altitude': 20,
+ 'size': 10,
+ 'precision_horz': 10,
+ 'precision_vert': 2,
+ }
+ })
+
+ self.assertEquals(['missing lat_degrees'], ctx.exception.reasons)
+
+ # missing float key
+ with self.assertRaises(ValidationError) as ctx:
+ Record.new(self.zone, '', {
+ 'type': 'LOC',
+ 'ttl': 600,
+ 'value': {
+ 'lat_degrees': 31,
+ 'lat_minutes': 58,
+ 'lat_direction': 'S',
+ 'long_degrees': 115,
+ 'long_minutes': 49,
+ 'long_seconds': 11.7,
+ 'long_direction': 'E',
+ 'altitude': 20,
+ 'size': 10,
+ 'precision_horz': 10,
+ 'precision_vert': 2,
+ }
+ })
+
+ self.assertEquals(['missing lat_seconds'], ctx.exception.reasons)
+
+ # missing text key
+ with self.assertRaises(ValidationError) as ctx:
+ Record.new(self.zone, '', {
+ 'type': 'LOC',
+ 'ttl': 600,
+ 'value': {
+ 'lat_degrees': 31,
+ 'lat_minutes': 58,
+ 'lat_seconds': 52.1,
+ 'long_degrees': 115,
+ 'long_minutes': 49,
+ 'long_seconds': 11.7,
+ 'long_direction': 'E',
+ 'altitude': 20,
+ 'size': 10,
+ 'precision_horz': 10,
+ 'precision_vert': 2,
+ }
+ })
+
+ self.assertEquals(['missing lat_direction'], ctx.exception.reasons)
+
+ # invalid direction
+ with self.assertRaises(ValidationError) as ctx:
+ Record.new(self.zone, '', {
+ 'type': 'LOC',
+ 'ttl': 600,
+ 'value': {
+ 'lat_degrees': 31,
+ 'lat_minutes': 58,
+ 'lat_seconds': 52.1,
+ 'lat_direction': 'U',
+ 'long_degrees': 115,
+ 'long_minutes': 49,
+ 'long_seconds': 11.7,
+ 'long_direction': 'E',
+ 'altitude': 20,
+ 'size': 10,
+ 'precision_horz': 10,
+ 'precision_vert': 2,
+ }
+ })
+
+ self.assertEquals(['invalid direction for lat_direction "U"'],
+ ctx.exception.reasons)
+
+ with self.assertRaises(ValidationError) as ctx:
+ Record.new(self.zone, '', {
+ 'type': 'LOC',
+ 'ttl': 600,
+ 'value': {
+ 'lat_degrees': 31,
+ 'lat_minutes': 58,
+ 'lat_seconds': 52.1,
+ 'lat_direction': 'S',
+ 'long_degrees': 115,
+ 'long_minutes': 49,
+ 'long_seconds': 11.7,
+ 'long_direction': 'N',
+ 'altitude': 20,
+ 'size': 10,
+ 'precision_horz': 10,
+ 'precision_vert': 2,
+ }
+ })
+
+ self.assertEquals(['invalid direction for long_direction "N"'],
+ ctx.exception.reasons)
+
+ # invalid degrees
+ with self.assertRaises(ValidationError) as ctx:
+ Record.new(self.zone, '', {
+ 'type': 'LOC',
+ 'ttl': 600,
+ 'value': {
+ 'lat_degrees': 360,
+ 'lat_minutes': 58,
+ 'lat_seconds': 52.1,
+ 'lat_direction': 'S',
+ 'long_degrees': 115,
+ 'long_minutes': 49,
+ 'long_seconds': 11.7,
+ 'long_direction': 'E',
+ 'altitude': 20,
+ 'size': 10,
+ 'precision_horz': 10,
+ 'precision_vert': 2,
+ }
+ })
+
+ self.assertEquals(['invalid value for lat_degrees "360"'],
+ ctx.exception.reasons)
+
+ with self.assertRaises(ValidationError) as ctx:
+ Record.new(self.zone, '', {
+ 'type': 'LOC',
+ 'ttl': 600,
+ 'value': {
+ 'lat_degrees': 'nope',
+ 'lat_minutes': 58,
+ 'lat_seconds': 52.1,
+ 'lat_direction': 'S',
+ 'long_degrees': 115,
+ 'long_minutes': 49,
+ 'long_seconds': 11.7,
+ 'long_direction': 'E',
+ 'altitude': 20,
+ 'size': 10,
+ 'precision_horz': 10,
+ 'precision_vert': 2,
+ }
+ })
+
+ self.assertEquals(['invalid lat_degrees "nope"'],
+ ctx.exception.reasons)
+
+ # invalid minutes
+ with self.assertRaises(ValidationError) as ctx:
+ Record.new(self.zone, '', {
+ 'type': 'LOC',
+ 'ttl': 600,
+ 'value': {
+ 'lat_degrees': 31,
+ 'lat_minutes': 60,
+ 'lat_seconds': 52.1,
+ 'lat_direction': 'S',
+ 'long_degrees': 115,
+ 'long_minutes': 49,
+ 'long_seconds': 11.7,
+ 'long_direction': 'E',
+ 'altitude': 20,
+ 'size': 10,
+ 'precision_horz': 10,
+ 'precision_vert': 2,
+ }
+ })
+
+ self.assertEquals(['invalid value for lat_minutes "60"'],
+ ctx.exception.reasons)
+
+ # invalid seconds
+ with self.assertRaises(ValidationError) as ctx:
+ Record.new(self.zone, '', {
+ 'type': 'LOC',
+ 'ttl': 600,
+ 'value': {
+ 'lat_degrees': 31,
+ 'lat_minutes': 58,
+ 'lat_seconds': 60,
+ 'lat_direction': 'S',
+ 'long_degrees': 115,
+ 'long_minutes': 49,
+ 'long_seconds': 11.7,
+ 'long_direction': 'E',
+ 'altitude': 20,
+ 'size': 10,
+ 'precision_horz': 10,
+ 'precision_vert': 2,
+ }
+ })
+
+ self.assertEquals(['invalid value for lat_seconds "60"'],
+ ctx.exception.reasons)
+
+ with self.assertRaises(ValidationError) as ctx:
+ Record.new(self.zone, '', {
+ 'type': 'LOC',
+ 'ttl': 600,
+ 'value': {
+ 'lat_degrees': 31,
+ 'lat_minutes': 58,
+ 'lat_seconds': 'nope',
+ 'lat_direction': 'S',
+ 'long_degrees': 115,
+ 'long_minutes': 49,
+ 'long_seconds': 11.7,
+ 'long_direction': 'E',
+ 'altitude': 20,
+ 'size': 10,
+ 'precision_horz': 10,
+ 'precision_vert': 2,
+ }
+ })
+
+ self.assertEquals(['invalid lat_seconds "nope"'],
+ ctx.exception.reasons)
+
+ # invalid altitude
+ with self.assertRaises(ValidationError) as ctx:
+ Record.new(self.zone, '', {
+ 'type': 'LOC',
+ 'ttl': 600,
+ 'value': {
+ 'lat_degrees': 31,
+ 'lat_minutes': 58,
+ 'lat_seconds': 52.1,
+ 'lat_direction': 'S',
+ 'long_degrees': 115,
+ 'long_minutes': 49,
+ 'long_seconds': 11.7,
+ 'long_direction': 'E',
+ 'altitude': -666666,
+ 'size': 10,
+ 'precision_horz': 10,
+ 'precision_vert': 2,
+ }
+ })
+
+ self.assertEquals(['invalid value for altitude "-666666"'],
+ ctx.exception.reasons)
+
+ # invalid size
+ with self.assertRaises(ValidationError) as ctx:
+ Record.new(self.zone, '', {
+ 'type': 'LOC',
+ 'ttl': 600,
+ 'value': {
+ 'lat_degrees': 31,
+ 'lat_minutes': 58,
+ 'lat_seconds': 52.1,
+ 'lat_direction': 'S',
+ 'long_degrees': 115,
+ 'long_minutes': 49,
+ 'long_seconds': 11.7,
+ 'long_direction': 'E',
+ 'altitude': 20,
+ 'size': 99999999.99,
+ 'precision_horz': 10,
+ 'precision_vert': 2,
+ }
+ })
+
+ self.assertEquals(['invalid value for size "99999999.99"'],
+ ctx.exception.reasons)
+
def test_MX(self):
# doesn't blow up
Record.new(self.zone, '', {
@@ -1996,7 +2743,17 @@ class TestRecordValidation(TestCase):
'type': 'PTR',
'ttl': 600,
})
- self.assertEquals(['missing value'], ctx.exception.reasons)
+ self.assertEquals(['missing values'], ctx.exception.reasons)
+
+ # not a valid FQDN
+ with self.assertRaises(ValidationError) as ctx:
+ Record.new(self.zone, '', {
+ 'type': 'PTR',
+ 'ttl': 600,
+ 'value': '_.',
+ })
+ self.assertEquals(['PTR value "_." is not a valid FQDN'],
+ ctx.exception.reasons)
# no trailing .
with self.assertRaises(ValidationError) as ctx:
@@ -2378,6 +3135,203 @@ class TestRecordValidation(TestCase):
# should be chunked values, with quoting
self.assertEquals(single.chunked_values, chunked.chunked_values)
+ def test_URLFWD(self):
+ # doesn't blow up
+ Record.new(self.zone, '', {
+ 'type': 'URLFWD',
+ 'ttl': 600,
+ 'value': {
+ 'path': '/',
+ 'target': 'http://foo',
+ 'code': 301,
+ 'masking': 2,
+ 'query': 0,
+ }
+ })
+ Record.new(self.zone, '', {
+ 'type': 'URLFWD',
+ 'ttl': 600,
+ 'values': [{
+ 'path': '/',
+ 'target': 'http://foo',
+ 'code': 301,
+ 'masking': 2,
+ 'query': 0,
+ }, {
+ 'path': '/target',
+ 'target': 'http://target',
+ 'code': 302,
+ 'masking': 2,
+ 'query': 0,
+ }]
+ })
+
+ # missing path
+ with self.assertRaises(ValidationError) as ctx:
+ Record.new(self.zone, '', {
+ 'type': 'URLFWD',
+ 'ttl': 600,
+ 'value': {
+ 'target': 'http://foo',
+ 'code': 301,
+ 'masking': 2,
+ 'query': 0,
+ }
+ })
+ self.assertEquals(['missing path'], ctx.exception.reasons)
+
+ # missing target
+ with self.assertRaises(ValidationError) as ctx:
+ Record.new(self.zone, '', {
+ 'type': 'URLFWD',
+ 'ttl': 600,
+ 'value': {
+ 'path': '/',
+ 'code': 301,
+ 'masking': 2,
+ 'query': 0,
+ }
+ })
+ self.assertEquals(['missing target'], ctx.exception.reasons)
+
+ # missing code
+ with self.assertRaises(ValidationError) as ctx:
+ Record.new(self.zone, '', {
+ 'type': 'URLFWD',
+ 'ttl': 600,
+ 'value': {
+ 'path': '/',
+ 'target': 'http://foo',
+ 'masking': 2,
+ 'query': 0,
+ }
+ })
+ self.assertEquals(['missing code'], ctx.exception.reasons)
+
+ # invalid code
+ with self.assertRaises(ValidationError) as ctx:
+ Record.new(self.zone, '', {
+ 'type': 'URLFWD',
+ 'ttl': 600,
+ 'value': {
+ 'path': '/',
+ 'target': 'http://foo',
+ 'code': 'nope',
+ 'masking': 2,
+ 'query': 0,
+ }
+ })
+ self.assertEquals(['invalid return code "nope"'],
+ ctx.exception.reasons)
+
+ # unrecognized code
+ with self.assertRaises(ValidationError) as ctx:
+ Record.new(self.zone, '', {
+ 'type': 'URLFWD',
+ 'ttl': 600,
+ 'value': {
+ 'path': '/',
+ 'target': 'http://foo',
+ 'code': 3,
+ 'masking': 2,
+ 'query': 0,
+ }
+ })
+ self.assertEquals(['unrecognized return code "3"'],
+ ctx.exception.reasons)
+
+ # missing masking
+ with self.assertRaises(ValidationError) as ctx:
+ Record.new(self.zone, '', {
+ 'type': 'URLFWD',
+ 'ttl': 600,
+ 'value': {
+ 'path': '/',
+ 'target': 'http://foo',
+ 'code': 301,
+ 'query': 0,
+ }
+ })
+ self.assertEquals(['missing masking'], ctx.exception.reasons)
+
+ # invalid masking
+ with self.assertRaises(ValidationError) as ctx:
+ Record.new(self.zone, '', {
+ 'type': 'URLFWD',
+ 'ttl': 600,
+ 'value': {
+ 'path': '/',
+ 'target': 'http://foo',
+ 'code': 301,
+ 'masking': 'nope',
+ 'query': 0,
+ }
+ })
+ self.assertEquals(['invalid masking setting "nope"'],
+ ctx.exception.reasons)
+
+ # unrecognized masking
+ with self.assertRaises(ValidationError) as ctx:
+ Record.new(self.zone, '', {
+ 'type': 'URLFWD',
+ 'ttl': 600,
+ 'value': {
+ 'path': '/',
+ 'target': 'http://foo',
+ 'code': 301,
+ 'masking': 3,
+ 'query': 0,
+ }
+ })
+ self.assertEquals(['unrecognized masking setting "3"'],
+ ctx.exception.reasons)
+
+ # missing query
+ with self.assertRaises(ValidationError) as ctx:
+ Record.new(self.zone, '', {
+ 'type': 'URLFWD',
+ 'ttl': 600,
+ 'value': {
+ 'path': '/',
+ 'target': 'http://foo',
+ 'code': 301,
+ 'masking': 2,
+ }
+ })
+ self.assertEquals(['missing query'], ctx.exception.reasons)
+
+ # invalid query
+ with self.assertRaises(ValidationError) as ctx:
+ Record.new(self.zone, '', {
+ 'type': 'URLFWD',
+ 'ttl': 600,
+ 'value': {
+ 'path': '/',
+ 'target': 'http://foo',
+ 'code': 301,
+ 'masking': 2,
+ 'query': 'nope',
+ }
+ })
+ self.assertEquals(['invalid query setting "nope"'],
+ ctx.exception.reasons)
+
+ # unrecognized query
+ with self.assertRaises(ValidationError) as ctx:
+ Record.new(self.zone, '', {
+ 'type': 'URLFWD',
+ 'ttl': 600,
+ 'value': {
+ 'path': '/',
+ 'target': 'http://foo',
+ 'code': 301,
+ 'masking': 2,
+ 'query': 3,
+ }
+ })
+ self.assertEquals(['unrecognized query setting "3"'],
+ ctx.exception.reasons)
+
class TestDynamicRecords(TestCase):
zone = Zone('unit.tests.', [])
@@ -2388,6 +3342,7 @@ class TestDynamicRecords(TestCase):
'pools': {
'one': {
'values': [{
+ 'weight': 10,
'value': '3.3.3.3',
}],
},
@@ -2787,7 +3742,7 @@ class TestDynamicRecords(TestCase):
self.assertEquals(['pool "one" is missing values'],
ctx.exception.reasons)
- # pool valu not a dict
+ # pool value not a dict
a_data = {
'dynamic': {
'pools': {
@@ -2971,6 +3926,33 @@ class TestDynamicRecords(TestCase):
self.assertEquals(['invalid weight "foo" in pool "three" value 2'],
ctx.exception.reasons)
+ # single value with weight!=1
+ a_data = {
+ 'dynamic': {
+ 'pools': {
+ 'one': {
+ 'values': [{
+ 'weight': 12,
+ 'value': '6.6.6.6',
+ }],
+ },
+ },
+ 'rules': [{
+ 'pool': 'one',
+ }],
+ },
+ 'ttl': 60,
+ 'type': 'A',
+ 'values': [
+ '1.1.1.1',
+ '2.2.2.2',
+ ],
+ }
+ with self.assertRaises(ValidationError) as ctx:
+ Record.new(self.zone, 'bad', a_data)
+ self.assertEquals(['pool "one" has single value with weight!=1'],
+ ctx.exception.reasons)
+
# invalid fallback
a_data = {
'dynamic': {
diff --git a/tests/test_octodns_source_axfr.py b/tests/test_octodns_source_axfr.py
index bd25062..9aa80dd 100644
--- a/tests/test_octodns_source_axfr.py
+++ b/tests/test_octodns_source_axfr.py
@@ -9,6 +9,8 @@ import dns.zone
from dns.exception import DNSException
from mock import patch
+from os.path import exists
+from shutil import copyfile
from six import text_type
from unittest import TestCase
@@ -21,7 +23,7 @@ from octodns.record import ValidationError
class TestAxfrSource(TestCase):
source = AxfrSource('test', 'localhost')
- forward_zonefile = dns.zone.from_file('./tests/zones/unit.tests.',
+ forward_zonefile = dns.zone.from_file('./tests/zones/unit.tests.tst',
'unit.tests', relativize=False)
@patch('dns.zone.from_xfr')
@@ -34,7 +36,7 @@ class TestAxfrSource(TestCase):
]
self.source.populate(got)
- self.assertEquals(11, len(got.records))
+ self.assertEquals(15, len(got.records))
with self.assertRaises(AxfrSourceZoneTransferFailed) as ctx:
zone = Zone('unit.tests.', [])
@@ -44,18 +46,45 @@ class TestAxfrSource(TestCase):
class TestZoneFileSource(TestCase):
- source = ZoneFileSource('test', './tests/zones')
+ source = ZoneFileSource('test', './tests/zones', file_extension='.tst')
+
+ def test_zonefiles_with_extension(self):
+ source = ZoneFileSource('test', './tests/zones', '.extension')
+ # Load zonefiles with a specified file extension
+ valid = Zone('ext.unit.tests.', [])
+ source.populate(valid)
+ self.assertEquals(1, len(valid.records))
+
+ def test_zonefiles_without_extension(self):
+ # Windows doesn't let files end with a `.` so we add a .tst to them in
+ # the repo and then try and create the `.` version we need for the
+ # default case (no extension.)
+ copyfile('./tests/zones/unit.tests.tst', './tests/zones/unit.tests.')
+ # Unfortunately copyfile silently works and create the file without
+ # the `.` so we have to check to see if it did that
+ if exists('./tests/zones/unit.tests'):
+ # It did so we need to skip this test, that means windows won't
+ # have full code coverage, but skipping the test is going out of
+ # our way enough for a os-specific/oddball case.
+ self.skipTest('Unable to create unit.tests. (ending with .) so '
+ 'skipping default filename testing.')
+
+ source = ZoneFileSource('test', './tests/zones')
+ # Load zonefiles without a specified file extension
+ valid = Zone('unit.tests.', [])
+ source.populate(valid)
+ self.assertEquals(15, len(valid.records))
def test_populate(self):
# Valid zone file in directory
valid = Zone('unit.tests.', [])
self.source.populate(valid)
- self.assertEquals(11, len(valid.records))
+ self.assertEquals(15, len(valid.records))
# 2nd populate does not read file again
again = Zone('unit.tests.', [])
self.source.populate(again)
- self.assertEquals(11, len(again.records))
+ self.assertEquals(15, len(again.records))
# bust the cache
del self.source._zone_records[valid.name]
diff --git a/tests/test_octodns_source_envvar.py b/tests/test_octodns_source_envvar.py
index 0714883..ac66a22 100644
--- a/tests/test_octodns_source_envvar.py
+++ b/tests/test_octodns_source_envvar.py
@@ -1,6 +1,6 @@
+from mock import patch
from six import text_type
from unittest import TestCase
-from unittest.mock import patch
from octodns.source.envvar import EnvVarSource
from octodns.source.envvar import EnvironmentVariableNotFoundException
diff --git a/tests/test_octodns_zone.py b/tests/test_octodns_zone.py
index 1d000f2..ddc2157 100644
--- a/tests/test_octodns_zone.py
+++ b/tests/test_octodns_zone.py
@@ -355,3 +355,59 @@ class TestZone(TestCase):
self.assertTrue(zone_missing.changes(zone_normal, provider))
self.assertFalse(zone_missing.changes(zone_included, provider))
+
+ def assertEqualsNameAndValues(self, a, b):
+ a = dict([(r.name, r.values[0]) for r in a])
+ b = dict([(r.name, r.values[0]) for r in b])
+ self.assertEquals(a, b)
+
+ def test_copy(self):
+ zone = Zone('unit.tests.', [])
+
+ a = ARecord(zone, 'a', {'ttl': 42, 'value': '1.1.1.1'})
+ zone.add_record(a)
+ b = ARecord(zone, 'b', {'ttl': 42, 'value': '1.1.1.2'})
+ zone.add_record(b)
+
+ # Sanity check
+ self.assertEqualsNameAndValues(set((a, b)), zone.records)
+
+ copy = zone.copy()
+ # We have an origin set and it is the source/original zone
+ self.assertEquals(zone, copy._origin)
+ # Our records are zone's records to start (references)
+ self.assertEqualsNameAndValues(zone.records, copy.records)
+
+ # If we try and change something that's already there we realize and
+ # then get an error about a duplicate
+ b_prime = ARecord(zone, 'b', {'ttl': 42, 'value': '1.1.1.3'})
+ with self.assertRaises(DuplicateRecordException):
+ copy.add_record(b_prime)
+ self.assertIsNone(copy._origin)
+ # Unchanged, straight copies
+ self.assertEqualsNameAndValues(zone.records, copy.records)
+
+ # If we add with replace things will be realized and the record will
+ # have changed
+ copy = zone.copy()
+ copy.add_record(b_prime, replace=True)
+ self.assertIsNone(copy._origin)
+ self.assertEqualsNameAndValues(set((a, b_prime)), copy.records)
+
+ # If we add another record, things are reliazed and it has been added
+ copy = zone.copy()
+ c = ARecord(zone, 'c', {'ttl': 42, 'value': '1.1.1.3'})
+ copy.add_record(c)
+ self.assertEqualsNameAndValues(set((a, b, c)), copy.records)
+
+ # If we remove a record, things are reliazed and it has been removed
+ copy = zone.copy()
+ copy.remove_record(a)
+ self.assertEqualsNameAndValues(set((b,)), copy.records)
+
+ # Re-realizing is a noop
+ copy = zone.copy()
+ # Happens the first time
+ self.assertTrue(copy.hydrate())
+ # Doesn't the second
+ self.assertFalse(copy.hydrate())
diff --git a/tests/zones/ext.unit.tests.extension b/tests/zones/ext.unit.tests.extension
new file mode 100644
index 0000000..2ed7ac6
--- /dev/null
+++ b/tests/zones/ext.unit.tests.extension
@@ -0,0 +1,12 @@
+$ORIGIN ext.unit.tests.
+@ 3600 IN SOA ns1.ext.unit.tests. root.ext.unit.tests. (
+ 2018071501 ; Serial
+ 3600 ; Refresh (1 hour)
+ 600 ; Retry (10 minutes)
+ 604800 ; Expire (1 week)
+ 3600 ; NXDOMAIN ttl (1 hour)
+ )
+
+; NS Records
+@ 3600 IN NS ns1.ext.unit.tests.
+@ 3600 IN NS ns2.ext.unit.tests.
diff --git a/tests/zones/invalid.records. b/tests/zones/invalid.records.tst
similarity index 100%
rename from tests/zones/invalid.records.
rename to tests/zones/invalid.records.tst
diff --git a/tests/zones/invalid.zone. b/tests/zones/invalid.zone.tst
similarity index 100%
rename from tests/zones/invalid.zone.
rename to tests/zones/invalid.zone.tst
diff --git a/tests/zones/unit.tests. b/tests/zones/unit.tests.tst
similarity index 79%
rename from tests/zones/unit.tests.
rename to tests/zones/unit.tests.tst
index 0305e05..b916b81 100644
--- a/tests/zones/unit.tests.
+++ b/tests/zones/unit.tests.tst
@@ -13,9 +13,16 @@ $ORIGIN unit.tests.
under 3600 IN NS ns1.unit.tests.
under 3600 IN NS ns2.unit.tests.
+; CAA Records
+caa 1800 IN CAA 0 issue "ca.unit.tests"
+caa 1800 IN CAA 0 iodef "mailto:admin@unit.tests"
+
; SRV Records
_srv._tcp 600 IN SRV 10 20 30 foo-1.unit.tests.
_srv._tcp 600 IN SRV 10 20 30 foo-2.unit.tests.
+; NULL SRV Records
+_pop3._tcp 600 IN SRV 0 0 0 .
+_imap._tcp 600 IN SRV 0 0 0 .
; TXT Records
txt 600 IN TXT "Bah bah black sheep"
@@ -28,6 +35,10 @@ mx 300 IN MX 20 smtp-2.unit.tests.
mx 300 IN MX 30 smtp-3.unit.tests.
mx 300 IN MX 40 smtp-1.unit.tests.
+; LOC Records
+loc 300 IN LOC 31 58 52.1 S 115 49 11.7 E 20m 10m 10m 2m
+loc 300 IN LOC 53 14 10 N 2 18 26 W 20m 10m 1000m 2m
+
; A Records
@ 300 IN A 1.2.3.4
@ 300 IN A 1.2.3.5