Browse Source

Merge branch 'master' into gcore-provider

pull/681/head
Yaroshevich Denis 5 years ago
committed by GitHub
parent
commit
2a7b36ade4
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
46 changed files with 5132 additions and 240 deletions
  1. +34
    -1
      CHANGELOG.md
  2. +19
    -10
      README.md
  3. +1
    -2
      docs/records.md
  4. +1
    -1
      octodns/__init__.py
  5. +72
    -13
      octodns/manager.py
  6. +6
    -0
      octodns/processor/__init__.py
  7. +30
    -0
      octodns/processor/base.py
  8. +44
    -0
      octodns/processor/filter.py
  9. +103
    -0
      octodns/processor/ownership.py
  10. +845
    -70
      octodns/provider/azuredns.py
  11. +4
    -1
      octodns/provider/base.py
  12. +3
    -3
      octodns/provider/dnsimple.py
  13. +6
    -6
      octodns/provider/dyn.py
  14. +1
    -1
      octodns/provider/easydns.py
  15. +339
    -0
      octodns/provider/hetzner.py
  16. +76
    -36
      octodns/provider/ns1.py
  17. +1
    -1
      octodns/provider/plan.py
  18. +7
    -2
      octodns/provider/route53.py
  19. +2
    -2
      octodns/provider/transip.py
  20. +25
    -8
      octodns/provider/ultra.py
  21. +24
    -11
      octodns/record/__init__.py
  22. +5
    -3
      requirements.txt
  23. +4
    -0
      script/coverage
  24. +4
    -0
      script/test
  25. +23
    -0
      tests/config/dynamic.tests.yaml
  26. +6
    -0
      tests/config/plan-output-filehandle.yaml
  27. +23
    -0
      tests/config/processors-missing-class.yaml
  28. +25
    -0
      tests/config/processors-wants-config.yaml
  29. +33
    -0
      tests/config/processors.yaml
  30. +17
    -0
      tests/config/unknown-processor.yaml
  31. +223
    -0
      tests/fixtures/hetzner-records.json
  32. +43
    -0
      tests/fixtures/hetzner-zones.json
  33. +9
    -0
      tests/fixtures/ultra-records-page-2.json
  34. +30
    -0
      tests/helpers.py
  35. +149
    -9
      tests/test_octodns_manager.py
  36. +90
    -0
      tests/test_octodns_processor_filter.py
  37. +146
    -0
      tests/test_octodns_processor_ownership.py
  38. +1718
    -29
      tests/test_octodns_provider_azuredns.py
  39. +65
    -2
      tests/test_octodns_provider_base.py
  40. +341
    -0
      tests/test_octodns_provider_hetzner.py
  41. +272
    -11
      tests/test_octodns_provider_ns1.py
  42. +187
    -0
      tests/test_octodns_provider_route53.py
  43. +6
    -5
      tests/test_octodns_provider_transip.py
  44. +12
    -3
      tests/test_octodns_provider_ultra.py
  45. +9
    -5
      tests/test_octodns_provider_yaml.py
  46. +49
    -5
      tests/test_octodns_record.py

+ 34
- 1
CHANGELOG.md View File

@ -1,6 +1,39 @@
## v0.9.13 - 2021-..-.. -
#### Noteworthy changes
* Alpha support for Processors has been added. Processors allow for hooking
into the source, target, and planing process to make nearly arbitrary changes
to data. See the [octodns/processor/](/octodns/processor) directory for
examples. The change has been designed to have no impact on the process
unless the `processors` key is present in zone configs.
## v0.9.12 - 2021-04-30 - Enough time has passed
#### Noteworthy changes
* Formal Python 2.7 support removed, deps and tooling were becoming
unmaintainable
* octodns/octodns move, from github/octodns, more to come
#### Stuff
* ZoneFileSource supports specifying an extension & no files end in . to better
support Windows
* LOC record type support added
* Support for pre-release versions of PowerDNS
* PowerDNS delete before create which allows A <-> CNAME etc.
* Improved validation of fqdn's in ALIAS, CNAME, etc.
* Transip support for NS records
* Support for sending plan output to a file
* DNSimple uses zone api rather than domain to support non-registered stuff,
e.g. reverse zones.
* Support for fallback-only dynamic pools and related fixes to NS1 provider
* Initial Hetzner provider
## v0.9.11 - 2020-11-05 - We still don't know edition ## v0.9.11 - 2020-11-05 - We still don't know edition
#### Noteworthy changtes
#### Noteworthy changes
* ALIAS records only allowed at the root of zones - see `leient` in record docs * ALIAS records only allowed at the root of zones - see `leient` in record docs
for work-arounds if you really need them. for work-arounds if you really need them.


+ 19
- 10
README.md View File

@ -1,4 +1,4 @@
<img src="https://raw.githubusercontent.com/octodns/octodns/master/docs/logos/octodns-logo.png?" height=251 width=404>
<img src="https://raw.githubusercontent.com/octodns/octodns/master/docs/logos/octodns-logo.png?" alt="OctoDNS Logo" height=251 width=404>
## DNS as code - Tools for managing DNS across multiple providers ## DNS as code - Tools for managing DNS across multiple providers
@ -38,7 +38,7 @@ It is similar to [Netflix/denominator](https://github.com/Netflix/denominator).
Running through the following commands will install the latest release of OctoDNS and set up a place for your config files to live. To determine if provider specific requirements are necessary see the [Supported providers table](#supported-providers) below. Running through the following commands will install the latest release of OctoDNS and set up a place for your config files to live. To determine if provider specific requirements are necessary see the [Supported providers table](#supported-providers) below.
```
```shell
$ mkdir dns $ mkdir dns
$ cd dns $ cd dns
$ virtualenv env $ virtualenv env
@ -48,6 +48,14 @@ $ pip install octodns <provider-specific-requirements>
$ mkdir config $ mkdir config
``` ```
#### Installing a specific commit SHA
If you'd like to install a version that has not yet been released in a repetable/safe manner you can do the following. In general octoDNS is fairly stable inbetween releases thanks to the plan and apply process, but care should be taken regardless.
```shell
$ pip install -e git+https://git@github.com/github/octodns.git@<SHA>#egg=octodns
```
### Config ### Config
We start by creating a config file to tell OctoDNS about our providers and the zone(s) we want it to manage. Below we're setting up a `YamlProvider` to source records from our config files and both a `Route53Provider` and `DynProvider` to serve as the targets for those records. You can have any number of zones set up and any number of sources of data and targets for records for each. You can also have multiple config files, that make use of separate accounts and each manage a distinct set of zones. A good example of this this might be `./config/staging.yaml` & `./config/production.yaml`. We'll focus on a `config/production.yaml`. We start by creating a config file to tell OctoDNS about our providers and the zone(s) we want it to manage. Below we're setting up a `YamlProvider` to source records from our config files and both a `Route53Provider` and `DynProvider` to serve as the targets for those records. You can have any number of zones set up and any number of sources of data and targets for records for each. You can also have multiple config files, that make use of separate accounts and each manage a distinct set of zones. A good example of this this might be `./config/staging.yaml` & `./config/production.yaml`. We'll focus on a `config/production.yaml`.
@ -113,7 +121,7 @@ Further information can be found in [Records Documentation](/docs/records.md).
We're ready to do a dry-run with our new setup to see what changes it would make. Since we're pretending here we'll act like there are no existing records for `example.com.` in our accounts on either provider. We're ready to do a dry-run with our new setup to see what changes it would make. Since we're pretending here we'll act like there are no existing records for `example.com.` in our accounts on either provider.
```
```shell
$ octodns-sync --config-file=./config/production.yaml $ octodns-sync --config-file=./config/production.yaml
... ...
******************************************************************************** ********************************************************************************
@ -137,7 +145,7 @@ There will be other logging information presented on the screen, but successful
Now it's time to tell OctoDNS to make things happen. We'll invoke it again with the same options and add a `--doit` on the end to tell it this time we actually want it to try and make the specified changes. Now it's time to tell OctoDNS to make things happen. We'll invoke it again with the same options and add a `--doit` on the end to tell it this time we actually want it to try and make the specified changes.
```
```shell
$ octodns-sync --config-file=./config/production.yaml --doit $ octodns-sync --config-file=./config/production.yaml --doit
... ...
``` ```
@ -150,17 +158,17 @@ In the above case we manually ran OctoDNS from the command line. That works and
The first step is to create a PR with your changes. The first step is to create a PR with your changes.
![](/docs/assets/pr.png)
![GitHub user interface of a pull request](/docs/assets/pr.png)
Assuming the code tests and config validation statuses are green the next step is to do a noop deploy and verify that the changes OctoDNS plans to make are the ones you expect. Assuming the code tests and config validation statuses are green the next step is to do a noop deploy and verify that the changes OctoDNS plans to make are the ones you expect.
![](/docs/assets/noop.png)
![Output of a noop deployment command](/docs/assets/noop.png)
After that comes a set of reviews. One from a teammate who should have full context on what you're trying to accomplish and visibility in to the changes you're making to do it. The other is from a member of the team here at GitHub that owns DNS, mostly as a sanity check and to make sure that best practices are being followed. As much of that as possible is baked into `octodns-validate`. After that comes a set of reviews. One from a teammate who should have full context on what you're trying to accomplish and visibility in to the changes you're making to do it. The other is from a member of the team here at GitHub that owns DNS, mostly as a sanity check and to make sure that best practices are being followed. As much of that as possible is baked into `octodns-validate`.
After the reviews it's time to branch deploy the change. After the reviews it's time to branch deploy the change.
![](/docs/assets/deploy.png)
![Output of a deployment command](/docs/assets/deploy.png)
If that goes smoothly, you again see the expected changes, and verify them with `dig` and/or `octodns-report` you're good to hit the merge button. If there are problems you can quickly do a `.deploy dns/master` to go back to the previous state. If that goes smoothly, you again see the expected changes, and verify them with `dig` and/or `octodns-report` you're good to hit the merge button. If there are problems you can quickly do a `.deploy dns/master` to go back to the previous state.
@ -168,7 +176,7 @@ If that goes smoothly, you again see the expected changes, and verify them with
Very few situations will involve starting with a blank slate which is why there's tooling built in to pull existing data out of providers into a matching config file. Very few situations will involve starting with a blank slate which is why there's tooling built in to pull existing data out of providers into a matching config file.
```
```shell
$ octodns-dump --config-file=config/production.yaml --output-dir=tmp/ example.com. route53 $ octodns-dump --config-file=config/production.yaml --output-dir=tmp/ example.com. route53
2017-03-15T13:33:34 INFO Manager __init__: config_file=tmp/production.yaml 2017-03-15T13:33:34 INFO Manager __init__: config_file=tmp/production.yaml
2017-03-15T13:33:34 INFO Manager dump: zone=example.com., sources=('route53',) 2017-03-15T13:33:34 INFO Manager dump: zone=example.com., sources=('route53',)
@ -184,7 +192,7 @@ The above command pulled the existing data out of Route53 and placed the results
| Provider | Requirements | Record Support | Dynamic | Notes | | Provider | Requirements | Record Support | Dynamic | Notes |
|--|--|--|--|--| |--|--|--|--|--|
| [AzureProvider](/octodns/provider/azuredns.py) | azure-mgmt-dns | A, AAAA, CAA, CNAME, MX, NS, PTR, SRV, TXT | No | |
| [AzureProvider](/octodns/provider/azuredns.py) | azure-identity, azure-mgmt-dns, azure-mgmt-trafficmanager | A, AAAA, CAA, CNAME, MX, NS, PTR, SRV, TXT | Alpha (CNAMEs and partial A/AAAA) | |
| [Akamai](/octodns/provider/edgedns.py) | edgegrid-python | A, AAAA, CNAME, MX, NAPTR, NS, PTR, SPF, SRV, SSHFP, TXT | No | | | [Akamai](/octodns/provider/edgedns.py) | edgegrid-python | A, AAAA, CNAME, MX, NAPTR, NS, PTR, SPF, SRV, SSHFP, TXT | No | |
| [CloudflareProvider](/octodns/provider/cloudflare.py) | | A, AAAA, ALIAS, CAA, CNAME, LOC, MX, NS, PTR, SPF, SRV, TXT | No | CAA tags restricted | | [CloudflareProvider](/octodns/provider/cloudflare.py) | | A, AAAA, ALIAS, CAA, CNAME, LOC, MX, NS, PTR, SPF, SRV, TXT | No | CAA tags restricted |
| [ConstellixProvider](/octodns/provider/constellix.py) | | A, AAAA, ALIAS (ANAME), CAA, CNAME, MX, NS, PTR, SPF, SRV, TXT | No | CAA tags restricted | | [ConstellixProvider](/octodns/provider/constellix.py) | | A, AAAA, ALIAS (ANAME), CAA, CNAME, MX, NS, PTR, SPF, SRV, TXT | No | CAA tags restricted |
@ -198,6 +206,7 @@ The above command pulled the existing data out of Route53 and placed the results
| [GandiProvider](/octodns/provider/gandi.py) | | A, AAAA, ALIAS, CAA, CNAME, DNAME, MX, NS, PTR, SPF, SRV, SSHFP, TXT | No | | | [GandiProvider](/octodns/provider/gandi.py) | | A, AAAA, ALIAS, CAA, CNAME, DNAME, MX, NS, PTR, SPF, SRV, SSHFP, TXT | No | |
| [GCoreProvider](/octodns/provider/gcore.py) | | A, AAAA | No | | | [GCoreProvider](/octodns/provider/gcore.py) | | A, AAAA | No | |
| [GoogleCloudProvider](/octodns/provider/googlecloud.py) | google-cloud-dns | A, AAAA, CAA, CNAME, MX, NAPTR, NS, PTR, SPF, SRV, TXT | No | | | [GoogleCloudProvider](/octodns/provider/googlecloud.py) | google-cloud-dns | A, AAAA, CAA, CNAME, MX, NAPTR, NS, PTR, SPF, SRV, TXT | No | |
| [HetznerProvider](/octodns/provider/hetzner.py) | | A, AAAA, CAA, CNAME, MX, NS, SRV, TXT | No | |
| [MythicBeastsProvider](/octodns/provider/mythicbeasts.py) | Mythic Beasts | A, AAAA, ALIAS, CNAME, MX, NS, SRV, SSHFP, CAA, TXT | No | | | [MythicBeastsProvider](/octodns/provider/mythicbeasts.py) | Mythic Beasts | A, AAAA, ALIAS, CNAME, MX, NS, SRV, SSHFP, CAA, TXT | No | |
| [Ns1Provider](/octodns/provider/ns1.py) | ns1-python | All | Yes | Missing `NA` geo target | | [Ns1Provider](/octodns/provider/ns1.py) | ns1-python | All | Yes | Missing `NA` geo target |
| [OVH](/octodns/provider/ovh.py) | ovh | A, AAAA, CAA, CNAME, MX, NAPTR, NS, PTR, SPF, SRV, SSHFP, TXT, DKIM | No | | | [OVH](/octodns/provider/ovh.py) | ovh | A, AAAA, CAA, CNAME, MX, NAPTR, NS, PTR, SPF, SRV, SSHFP, TXT, DKIM | No | |
@ -205,7 +214,7 @@ The above command pulled the existing data out of Route53 and placed the results
| [Rackspace](/octodns/provider/rackspace.py) | | A, AAAA, ALIAS, CNAME, MX, NS, PTR, SPF, TXT | No | | | [Rackspace](/octodns/provider/rackspace.py) | | A, AAAA, ALIAS, CNAME, MX, NS, PTR, SPF, TXT | No | |
| [Route53](/octodns/provider/route53.py) | boto3 | A, AAAA, CAA, CNAME, MX, NAPTR, NS, PTR, SPF, SRV, TXT | Both | CNAME health checks don't support a Host header | | [Route53](/octodns/provider/route53.py) | boto3 | A, AAAA, CAA, CNAME, MX, NAPTR, NS, PTR, SPF, SRV, TXT | Both | CNAME health checks don't support a Host header |
| [Selectel](/octodns/provider/selectel.py) | | A, AAAA, CNAME, MX, NS, SPF, SRV, TXT | No | | | [Selectel](/octodns/provider/selectel.py) | | A, AAAA, CNAME, MX, NS, SPF, SRV, TXT | No | |
| [Transip](/octodns/provider/transip.py) | transip | A, AAAA, CNAME, MX, SRV, SPF, TXT, SSHFP, CAA | No | |
| [Transip](/octodns/provider/transip.py) | transip | A, AAAA, CNAME, MX, NS, SRV, SPF, TXT, SSHFP, CAA | No | |
| [UltraDns](/octodns/provider/ultra.py) | | A, AAAA, CAA, CNAME, MX, NS, PTR, SPF, SRV, TXT | No | | | [UltraDns](/octodns/provider/ultra.py) | | A, AAAA, CAA, CNAME, MX, NS, PTR, SPF, SRV, TXT | No | |
| [AxfrSource](/octodns/source/axfr.py) | | A, AAAA, CAA, CNAME, LOC, MX, NS, PTR, SPF, SRV, TXT | No | read-only | | [AxfrSource](/octodns/source/axfr.py) | | A, AAAA, CAA, CNAME, LOC, MX, NS, PTR, SPF, SRV, TXT | No | read-only |
| [ZoneFileSource](/octodns/source/axfr.py) | | A, AAAA, CAA, CNAME, MX, NS, PTR, SPF, SRV, TXT | No | read-only | | [ZoneFileSource](/octodns/source/axfr.py) | | A, AAAA, CAA, CNAME, MX, NS, PTR, SPF, SRV, TXT | No | read-only |


+ 1
- 2
docs/records.md View File

@ -114,8 +114,7 @@ If you'd like to enable lenience for a whole zone you can do so with the followi
```yaml ```yaml
non-compliant-zone.com.: non-compliant-zone.com.:
octodns:
lenient: true
lenient: true
sources: sources:
- route53 - route53
targets: targets:


+ 1
- 1
octodns/__init__.py View File

@ -3,4 +3,4 @@
from __future__ import absolute_import, division, print_function, \ from __future__ import absolute_import, division, print_function, \
unicode_literals unicode_literals
__VERSION__ = '0.9.11'
__VERSION__ = '0.9.12'

+ 72
- 13
octodns/manager.py View File

@ -9,6 +9,7 @@ from concurrent.futures import ThreadPoolExecutor
from importlib import import_module from importlib import import_module
from os import environ from os import environ
from six import text_type from six import text_type
from sys import stdout
import logging import logging
from .provider.base import BaseProvider from .provider.base import BaseProvider
@ -121,6 +122,25 @@ class Manager(object):
raise ManagerException('Incorrect provider config for {}' raise ManagerException('Incorrect provider config for {}'
.format(provider_name)) .format(provider_name))
self.processors = {}
for processor_name, processor_config in \
self.config.get('processors', {}).items():
try:
_class = processor_config.pop('class')
except KeyError:
self.log.exception('Invalid processor class')
raise ManagerException('Processor {} is missing class'
.format(processor_name))
_class = self._get_named_class('processor', _class)
kwargs = self._build_kwargs(processor_config)
try:
self.processors[processor_name] = _class(processor_name,
**kwargs)
except TypeError:
self.log.exception('Invalid processor config')
raise ManagerException('Incorrect processor config for {}'
.format(processor_name))
zone_tree = {} zone_tree = {}
# sort by reversed strings so that parent zones always come first # sort by reversed strings so that parent zones always come first
for name in sorted(self.config['zones'].keys(), key=lambda s: s[::-1]): for name in sorted(self.config['zones'].keys(), key=lambda s: s[::-1]):
@ -222,8 +242,8 @@ class Manager(object):
self.log.debug('configured_sub_zones: subs=%s', sub_zone_names) self.log.debug('configured_sub_zones: subs=%s', sub_zone_names)
return set(sub_zone_names) return set(sub_zone_names)
def _populate_and_plan(self, zone_name, sources, targets, desired=None,
lenient=False):
def _populate_and_plan(self, zone_name, processors, sources, targets,
desired=None, lenient=False):
self.log.debug('sync: populating, zone=%s, lenient=%s', self.log.debug('sync: populating, zone=%s, lenient=%s',
zone_name, lenient) zone_name, lenient)
@ -236,7 +256,6 @@ class Manager(object):
for _, records in desired._records.items(): for _, records in desired._records.items():
for record in records: for record in records:
zone.add_record(record.copy(zone=zone), lenient=lenient) zone.add_record(record.copy(zone=zone), lenient=lenient)
else: else:
for source in sources: for source in sources:
try: try:
@ -244,10 +263,13 @@ class Manager(object):
except TypeError as e: except TypeError as e:
if "keyword argument 'lenient'" not in text_type(e): if "keyword argument 'lenient'" not in text_type(e):
raise raise
self.log.warn(': provider %s does not accept lenient '
self.log.warn('provider %s does not accept lenient '
'param', source.__class__.__name__) 'param', source.__class__.__name__)
source.populate(zone) source.populate(zone)
for processor in processors:
zone = processor.process_source_zone(zone, sources=sources)
self.log.debug('sync: planning, zone=%s', zone_name) self.log.debug('sync: planning, zone=%s', zone_name)
plans = [] plans = []
@ -259,7 +281,18 @@ class Manager(object):
'value': 'provider={}'.format(target.id) 'value': 'provider={}'.format(target.id)
}) })
zone.add_record(meta, replace=True) zone.add_record(meta, replace=True)
plan = target.plan(zone)
try:
plan = target.plan(zone, processors=processors)
except TypeError as e:
if "keyword argument 'processors'" not in text_type(e):
raise
self.log.warn('provider.plan %s does not accept processors '
'param', target.__class__.__name__)
plan = target.plan(zone)
for processor in processors:
plan = processor.process_plan(plan, sources=sources,
target=target)
if plan: if plan:
plans.append((target, plan)) plans.append((target, plan))
@ -267,16 +300,19 @@ class Manager(object):
return plans, zone return plans, zone
def sync(self, eligible_zones=[], eligible_sources=[], eligible_targets=[], def sync(self, eligible_zones=[], eligible_sources=[], eligible_targets=[],
dry_run=True, force=False):
self.log.info('sync: eligible_zones=%s, eligible_targets=%s, '
'dry_run=%s, force=%s', eligible_zones, eligible_targets,
dry_run, force)
dry_run=True, force=False, plan_output_fh=stdout):
self.log.info(
'sync: eligible_zones=%s, eligible_targets=%s, dry_run=%s, '
'force=%s, plan_output_fh=%s',
eligible_zones, eligible_targets, dry_run, force,
getattr(plan_output_fh, 'name', plan_output_fh.__class__.__name__))
zones = self.config['zones'].items() zones = self.config['zones'].items()
if eligible_zones: if eligible_zones:
zones = [z for z in zones if z[0] in eligible_zones] zones = [z for z in zones if z[0] in eligible_zones]
aliased_zones = {}
aliased_zones = {}
futures = [] futures = []
for zone_name, config in zones: for zone_name, config in zones:
self.log.info('sync: zone=%s', zone_name) self.log.info('sync: zone=%s', zone_name)
@ -315,6 +351,8 @@ class Manager(object):
raise ManagerException('Zone {} is missing targets' raise ManagerException('Zone {} is missing targets'
.format(zone_name)) .format(zone_name))
processors = config.get('processors', [])
if (eligible_sources and not if (eligible_sources and not
[s for s in sources if s in eligible_sources]): [s for s in sources if s in eligible_sources]):
self.log.info('sync: no eligible sources, skipping') self.log.info('sync: no eligible sources, skipping')
@ -332,6 +370,15 @@ class Manager(object):
self.log.info('sync: sources=%s -> targets=%s', sources, targets) self.log.info('sync: sources=%s -> targets=%s', sources, targets)
try:
collected = []
for processor in processors:
collected.append(self.processors[processor])
processors = collected
except KeyError:
raise ManagerException('Zone {}, unknown processor: {}'
.format(zone_name, processor))
try: try:
# rather than using a list comprehension, we break this loop # rather than using a list comprehension, we break this loop
# out so that the `except` block below can reference the # out so that the `except` block below can reference the
@ -358,8 +405,9 @@ class Manager(object):
.format(zone_name, target)) .format(zone_name, target))
futures.append(self._executor.submit(self._populate_and_plan, futures.append(self._executor.submit(self._populate_and_plan,
zone_name, sources,
targets, lenient=lenient))
zone_name, processors,
sources, targets,
lenient=lenient))
# Wait on all results and unpack/flatten the plans and store the # Wait on all results and unpack/flatten the plans and store the
# desired states in case we need them below # desired states in case we need them below
@ -384,6 +432,7 @@ class Manager(object):
futures.append(self._executor.submit( futures.append(self._executor.submit(
self._populate_and_plan, self._populate_and_plan,
zone_name, zone_name,
processors,
[], [],
[self.providers[t] for t in source_config['targets']], [self.providers[t] for t in source_config['targets']],
desired=desired_config, desired=desired_config,
@ -402,7 +451,7 @@ class Manager(object):
plans.sort(key=self._plan_keyer, reverse=True) plans.sort(key=self._plan_keyer, reverse=True)
for output in self.plan_outputs.values(): for output in self.plan_outputs.values():
output.run(plans=plans, log=self.log)
output.run(plans=plans, log=self.log, fh=plan_output_fh)
if not force: if not force:
self.log.debug('sync: checking safety') self.log.debug('sync: checking safety')
@ -524,6 +573,16 @@ class Manager(object):
if isinstance(source, YamlProvider): if isinstance(source, YamlProvider):
source.populate(zone) source.populate(zone)
# check that processors are in order if any are specified
processors = config.get('processors', [])
try:
# same as above, but for processors this time
for processor in processors:
collected.append(self.processors[processor])
except KeyError:
raise ManagerException('Zone {}, unknown processor: {}'
.format(zone_name, processor))
def get_zone(self, zone_name): def get_zone(self, zone_name):
if not zone_name[-1] == '.': if not zone_name[-1] == '.':
raise ManagerException('Invalid zone name {}, missing ending dot' raise ManagerException('Invalid zone name {}, missing ending dot'


+ 6
- 0
octodns/processor/__init__.py View File

@ -0,0 +1,6 @@
#
#
#
from __future__ import absolute_import, division, print_function, \
unicode_literals

+ 30
- 0
octodns/processor/base.py View File

@ -0,0 +1,30 @@
#
#
#
from __future__ import absolute_import, division, print_function, \
unicode_literals
from ..zone import Zone
class BaseProcessor(object):
def __init__(self, name):
self.name = name
def _clone_zone(self, zone):
return Zone(zone.name, sub_zones=zone.sub_zones)
def process_source_zone(self, zone, sources):
# sources may be empty, as will be the case for aliased zones
return zone
def process_target_zone(self, zone, target):
return zone
def process_plan(self, plan, sources, target):
# plan may be None if no changes were detected up until now, the
# process may still create a plan.
# sources may be empty, as will be the case for aliased zones
return plan

+ 44
- 0
octodns/processor/filter.py View File

@ -0,0 +1,44 @@
#
#
#
from __future__ import absolute_import, division, print_function, \
unicode_literals
from .base import BaseProcessor
class TypeAllowlistFilter(BaseProcessor):
def __init__(self, name, allowlist):
super(TypeAllowlistFilter, self).__init__(name)
self.allowlist = set(allowlist)
def _process(self, zone, *args, **kwargs):
ret = self._clone_zone(zone)
for record in zone.records:
if record._type in self.allowlist:
ret.add_record(record)
return ret
process_source_zone = _process
process_target_zone = _process
class TypeRejectlistFilter(BaseProcessor):
def __init__(self, name, rejectlist):
super(TypeRejectlistFilter, self).__init__(name)
self.rejectlist = set(rejectlist)
def _process(self, zone, *args, **kwargs):
ret = self._clone_zone(zone)
for record in zone.records:
if record._type not in self.rejectlist:
ret.add_record(record)
return ret
process_source_zone = _process
process_target_zone = _process

+ 103
- 0
octodns/processor/ownership.py View File

@ -0,0 +1,103 @@
#
#
#
from __future__ import absolute_import, division, print_function, \
unicode_literals
from collections import defaultdict
from ..provider.plan import Plan
from ..record import Record
from .base import BaseProcessor
# Mark anything octoDNS is managing that way it can know it's safe to modify or
# delete. We'll take ownership of existing records that we're told to manage
# and thus "own" them going forward.
class OwnershipProcessor(BaseProcessor):
def __init__(self, name, txt_name='_owner', txt_value='*octodns*'):
super(OwnershipProcessor, self).__init__(name)
self.txt_name = txt_name
self.txt_value = txt_value
self._txt_values = [txt_value]
def process_source_zone(self, zone, *args, **kwargs):
ret = self._clone_zone(zone)
for record in zone.records:
# Always copy over the source records
ret.add_record(record)
# Then create and add an ownership TXT for each of them
record_name = record.name.replace('*', '_wildcard')
if record.name:
name = '{}.{}.{}'.format(self.txt_name, record._type,
record_name)
else:
name = '{}.{}'.format(self.txt_name, record._type)
txt = Record.new(zone, name, {
'type': 'TXT',
'ttl': 60,
'value': self.txt_value,
})
ret.add_record(txt)
return ret
def _is_ownership(self, record):
return record._type == 'TXT' and \
record.name.startswith(self.txt_name) \
and record.values == self._txt_values
def process_plan(self, plan, *args, **kwargs):
if not plan:
# If we don't have any change there's nothing to do
return plan
# First find all the ownership info
owned = defaultdict(dict)
# We need to look for ownership in both the desired and existing
# states, many things will show up in both, but that's fine.
for record in list(plan.existing.records) + list(plan.desired.records):
if self._is_ownership(record):
pieces = record.name.split('.', 2)
if len(pieces) > 2:
_, _type, name = pieces
name = name.replace('_wildcard', '*')
else:
_type = pieces[1]
name = ''
owned[name][_type.upper()] = True
# Cases:
# - Configured in source
# - We'll fully CRU/manage it adding ownership TXT,
# thanks to process_source_zone, if needed
# - Not in source
# - Has an ownership TXT - delete it & the ownership TXT
# - Does not have an ownership TXT - don't delete it
# - Special records like octodns-meta
# - Should be left alone and should not have ownerthis TXTs
filtered_changes = []
for change in plan.changes:
record = change.record
if not self._is_ownership(record) and \
record._type not in owned[record.name] and \
record.name != 'octodns-meta':
# It's not an ownership TXT, it's not owned, and it's not
# special we're going to ignore it
continue
# We own this record or owned it up until now so whatever the
# change is we should do
filtered_changes.append(change)
if plan.changes != filtered_changes:
return Plan(plan.existing, plan.desired, filtered_changes,
plan.exists, plan.update_pcent_threshold,
plan.delete_pcent_threshold)
return plan

+ 845
- 70
octodns/provider/azuredns.py
File diff suppressed because it is too large
View File


+ 4
- 1
octodns/provider/base.py View File

@ -44,7 +44,7 @@ class BaseProvider(BaseSource):
''' '''
return [] return []
def plan(self, desired):
def plan(self, desired, processors=[]):
self.log.info('plan: desired=%s', desired.name) self.log.info('plan: desired=%s', desired.name)
existing = Zone(desired.name, desired.sub_zones) existing = Zone(desired.name, desired.sub_zones)
@ -55,6 +55,9 @@ class BaseProvider(BaseSource):
self.log.warn('Provider %s used in target mode did not return ' self.log.warn('Provider %s used in target mode did not return '
'exists', self.id) 'exists', self.id)
for processor in processors:
existing = processor.process_target_zone(existing, target=self)
# compute the changes at the zone/record level # compute the changes at the zone/record level
changes = existing.changes(desired, self) changes = existing.changes(desired, self)


+ 3
- 3
octodns/provider/dnsimple.py View File

@ -51,8 +51,8 @@ class DnsimpleClient(object):
resp.raise_for_status() resp.raise_for_status()
return resp return resp
def domain(self, name):
path = '/domains/{}'.format(name)
def zone(self, name):
path = '/zones/{}'.format(name)
return self._request('GET', path).json() return self._request('GET', path).json()
def domain_create(self, name): def domain_create(self, name):
@ -442,7 +442,7 @@ class DnsimpleProvider(BaseProvider):
domain_name = desired.name[:-1] domain_name = desired.name[:-1]
try: try:
self._client.domain(domain_name)
self._client.zone(domain_name)
except DnsimpleClientNotFound: except DnsimpleClientNotFound:
self.log.debug('_apply: no matching zone, creating domain') self.log.debug('_apply: no matching zone, creating domain')
self._client.domain_create(domain_name) self._client.domain_create(domain_name)


+ 6
- 6
octodns/provider/dyn.py View File

@ -604,7 +604,7 @@ class DynProvider(BaseProvider):
return record return record
def _is_traffic_director_dyanmic(self, td, rulesets):
def _is_traffic_director_dynamic(self, td, rulesets):
for ruleset in rulesets: for ruleset in rulesets:
try: try:
pieces = ruleset.label.split(':') pieces = ruleset.label.split(':')
@ -632,7 +632,7 @@ class DynProvider(BaseProvider):
continue continue
# critical to call rulesets once, each call loads them :-( # critical to call rulesets once, each call loads them :-(
rulesets = td.rulesets rulesets = td.rulesets
if self._is_traffic_director_dyanmic(td, rulesets):
if self._is_traffic_director_dynamic(td, rulesets):
record = \ record = \
self._populate_dynamic_traffic_director(zone, fqdn, self._populate_dynamic_traffic_director(zone, fqdn,
_type, td, _type, td,
@ -705,7 +705,7 @@ class DynProvider(BaseProvider):
label) label)
extra.append(Update(record, record)) extra.append(Update(record, record))
continue continue
if _monitor_doesnt_match(monitor, record.healthcheck_host,
if _monitor_doesnt_match(monitor, record.healthcheck_host(),
record.healthcheck_path, record.healthcheck_path,
record.healthcheck_protocol, record.healthcheck_protocol,
record.healthcheck_port): record.healthcheck_port):
@ -828,13 +828,13 @@ class DynProvider(BaseProvider):
self.traffic_director_monitors[label] = \ self.traffic_director_monitors[label] = \
self.traffic_director_monitors[fqdn] self.traffic_director_monitors[fqdn]
del self.traffic_director_monitors[fqdn] del self.traffic_director_monitors[fqdn]
if _monitor_doesnt_match(monitor, record.healthcheck_host,
if _monitor_doesnt_match(monitor, record.healthcheck_host(),
record.healthcheck_path, record.healthcheck_path,
record.healthcheck_protocol, record.healthcheck_protocol,
record.healthcheck_port): record.healthcheck_port):
self.log.info('_traffic_director_monitor: updating monitor ' self.log.info('_traffic_director_monitor: updating monitor '
'for %s', label) 'for %s', label)
monitor.update(record.healthcheck_host,
monitor.update(record.healthcheck_host(),
record.healthcheck_path, record.healthcheck_path,
record.healthcheck_protocol, record.healthcheck_protocol,
record.healthcheck_port) record.healthcheck_port)
@ -845,7 +845,7 @@ class DynProvider(BaseProvider):
monitor = DSFMonitor(label, protocol=record.healthcheck_protocol, monitor = DSFMonitor(label, protocol=record.healthcheck_protocol,
response_count=2, probe_interval=60, response_count=2, probe_interval=60,
retries=2, port=record.healthcheck_port, retries=2, port=record.healthcheck_port,
active='Y', host=record.healthcheck_host,
active='Y', host=record.healthcheck_host(),
timeout=self.MONITOR_TIMEOUT, timeout=self.MONITOR_TIMEOUT,
header=self.MONITOR_HEADER, header=self.MONITOR_HEADER,
path=record.healthcheck_path) path=record.healthcheck_path)


+ 1
- 1
octodns/provider/easydns.py View File

@ -59,7 +59,7 @@ class EasyDNSClient(object):
self.base_path = self.SANDBOX if sandbox else self.LIVE self.base_path = self.SANDBOX if sandbox else self.LIVE
sess = Session() sess = Session()
sess.headers.update({'Authorization': 'Basic {}' sess.headers.update({'Authorization': 'Basic {}'
.format(self.auth_key)})
.format(self.auth_key.decode('utf-8'))})
sess.headers.update({'accept': 'application/json'}) sess.headers.update({'accept': 'application/json'})
self._sess = sess self._sess = sess


+ 339
- 0
octodns/provider/hetzner.py View File

@ -0,0 +1,339 @@
#
#
#
from __future__ import absolute_import, division, print_function, \
unicode_literals
from collections import defaultdict
from requests import Session
import logging
from ..record import Record
from .base import BaseProvider
class HetznerClientException(Exception):
pass
class HetznerClientNotFound(HetznerClientException):
def __init__(self):
super(HetznerClientNotFound, self).__init__('Not Found')
class HetznerClientUnauthorized(HetznerClientException):
def __init__(self):
super(HetznerClientUnauthorized, self).__init__('Unauthorized')
class HetznerClient(object):
BASE_URL = 'https://dns.hetzner.com/api/v1'
def __init__(self, token):
session = Session()
session.headers.update({'Auth-API-Token': token})
self._session = session
def _do(self, method, path, params=None, data=None):
url = '{}{}'.format(self.BASE_URL, path)
response = self._session.request(method, url, params=params, json=data)
if response.status_code == 401:
raise HetznerClientUnauthorized()
if response.status_code == 404:
raise HetznerClientNotFound()
response.raise_for_status()
return response
def _do_json(self, method, path, params=None, data=None):
return self._do(method, path, params, data).json()
def zone_get(self, name):
params = {'name': name}
return self._do_json('GET', '/zones', params)['zones'][0]
def zone_create(self, name, ttl=None):
data = {'name': name, 'ttl': ttl}
return self._do_json('POST', '/zones', data=data)['zone']
def zone_records_get(self, zone_id):
params = {'zone_id': zone_id}
records = self._do_json('GET', '/records', params=params)['records']
for record in records:
if record['name'] == '@':
record['name'] = ''
return records
def zone_record_create(self, zone_id, name, _type, value, ttl=None):
data = {'name': name or '@', 'ttl': ttl, 'type': _type, 'value': value,
'zone_id': zone_id}
self._do('POST', '/records', data=data)
def zone_record_delete(self, zone_id, record_id):
self._do('DELETE', '/records/{}'.format(record_id))
class HetznerProvider(BaseProvider):
'''
Hetzner DNS provider using API v1
hetzner:
class: octodns.provider.hetzner.HetznerProvider
# Your Hetzner API token (required)
token: foo
'''
SUPPORTS_GEO = False
SUPPORTS_DYNAMIC = False
SUPPORTS = set(('A', 'AAAA', 'CAA', 'CNAME', 'MX', 'NS', 'SRV', 'TXT'))
def __init__(self, id, token, *args, **kwargs):
self.log = logging.getLogger('HetznerProvider[{}]'.format(id))
self.log.debug('__init__: id=%s, token=***', id)
super(HetznerProvider, self).__init__(id, *args, **kwargs)
self._client = HetznerClient(token)
self._zone_records = {}
self._zone_metadata = {}
self._zone_name_to_id = {}
def _append_dot(self, value):
if value == '@' or value[-1] == '.':
return value
return '{}.'.format(value)
def zone_metadata(self, zone_id=None, zone_name=None):
if zone_name is not None:
if zone_name in self._zone_name_to_id:
zone_id = self._zone_name_to_id[zone_name]
else:
zone = self._client.zone_get(name=zone_name[:-1])
zone_id = zone['id']
self._zone_name_to_id[zone_name] = zone_id
self._zone_metadata[zone_id] = zone
return self._zone_metadata[zone_id]
def _record_ttl(self, record):
default_ttl = self.zone_metadata(zone_id=record['zone_id'])['ttl']
return record['ttl'] if 'ttl' in record else default_ttl
def _data_for_multiple(self, _type, records):
values = [record['value'].replace(';', '\\;') for record in records]
return {
'ttl': self._record_ttl(records[0]),
'type': _type,
'values': values
}
_data_for_A = _data_for_multiple
_data_for_AAAA = _data_for_multiple
def _data_for_CAA(self, _type, records):
values = []
for record in records:
value_without_spaces = record['value'].replace(' ', '')
flags = value_without_spaces[0]
tag = value_without_spaces[1:].split('"')[0]
value = record['value'].split('"')[1]
values.append({
'flags': int(flags),
'tag': tag,
'value': value,
})
return {
'ttl': self._record_ttl(records[0]),
'type': _type,
'values': values
}
def _data_for_CNAME(self, _type, records):
record = records[0]
return {
'ttl': self._record_ttl(record),
'type': _type,
'value': self._append_dot(record['value'])
}
def _data_for_MX(self, _type, records):
values = []
for record in records:
value_stripped_split = record['value'].strip().split(' ')
preference = value_stripped_split[0]
exchange = value_stripped_split[-1]
values.append({
'preference': int(preference),
'exchange': self._append_dot(exchange)
})
return {
'ttl': self._record_ttl(records[0]),
'type': _type,
'values': values
}
def _data_for_NS(self, _type, records):
values = []
for record in records:
values.append(self._append_dot(record['value']))
return {
'ttl': self._record_ttl(records[0]),
'type': _type,
'values': values,
}
def _data_for_SRV(self, _type, records):
values = []
for record in records:
value_stripped = record['value'].strip()
priority = value_stripped.split(' ')[0]
weight = value_stripped[len(priority):].strip().split(' ')[0]
target = value_stripped.split(' ')[-1]
port = value_stripped[:-len(target)].strip().split(' ')[-1]
values.append({
'port': int(port),
'priority': int(priority),
'target': self._append_dot(target),
'weight': int(weight)
})
return {
'ttl': self._record_ttl(records[0]),
'type': _type,
'values': values
}
_data_for_TXT = _data_for_multiple
def zone_records(self, zone):
if zone.name not in self._zone_records:
try:
zone_id = self.zone_metadata(zone_name=zone.name)['id']
self._zone_records[zone.name] = \
self._client.zone_records_get(zone_id)
except HetznerClientNotFound:
return []
return self._zone_records[zone.name]
def populate(self, zone, target=False, lenient=False):
self.log.debug('populate: name=%s, target=%s, lenient=%s', zone.name,
target, lenient)
values = defaultdict(lambda: defaultdict(list))
for record in self.zone_records(zone):
_type = record['type']
if _type not in self.SUPPORTS:
self.log.warning('populate: skipping unsupported %s record',
_type)
continue
values[record['name']][record['type']].append(record)
before = len(zone.records)
for name, types in values.items():
for _type, records in types.items():
data_for = getattr(self, '_data_for_{}'.format(_type))
record = Record.new(zone, name, data_for(_type, records),
source=self, lenient=lenient)
zone.add_record(record, lenient=lenient)
exists = zone.name in self._zone_records
self.log.info('populate: found %s records, exists=%s',
len(zone.records) - before, exists)
return exists
def _params_for_multiple(self, record):
for value in record.values:
yield {
'value': value.replace('\\;', ';'),
'name': record.name,
'ttl': record.ttl,
'type': record._type
}
_params_for_A = _params_for_multiple
_params_for_AAAA = _params_for_multiple
def _params_for_CAA(self, record):
for value in record.values:
data = '{} {} "{}"'.format(value.flags, value.tag, value.value)
yield {
'value': data,
'name': record.name,
'ttl': record.ttl,
'type': record._type
}
def _params_for_single(self, record):
yield {
'value': record.value,
'name': record.name,
'ttl': record.ttl,
'type': record._type
}
_params_for_CNAME = _params_for_single
def _params_for_MX(self, record):
for value in record.values:
data = '{} {}'.format(value.preference, value.exchange)
yield {
'value': data,
'name': record.name,
'ttl': record.ttl,
'type': record._type
}
_params_for_NS = _params_for_multiple
def _params_for_SRV(self, record):
for value in record.values:
data = '{} {} {} {}'.format(value.priority, value.weight,
value.port, value.target)
yield {
'value': data,
'name': record.name,
'ttl': record.ttl,
'type': record._type
}
_params_for_TXT = _params_for_multiple
def _apply_Create(self, zone_id, change):
new = change.new
params_for = getattr(self, '_params_for_{}'.format(new._type))
for params in params_for(new):
self._client.zone_record_create(zone_id, params['name'],
params['type'], params['value'],
params['ttl'])
def _apply_Update(self, zone_id, change):
# It's way simpler to delete-then-recreate than to update
self._apply_Delete(zone_id, change)
self._apply_Create(zone_id, change)
def _apply_Delete(self, zone_id, change):
existing = change.existing
zone = existing.zone
for record in self.zone_records(zone):
if existing.name == record['name'] and \
existing._type == record['type']:
self._client.zone_record_delete(zone_id, record['id'])
def _apply(self, plan):
desired = plan.desired
changes = plan.changes
self.log.debug('_apply: zone=%s, len(changes)=%d', desired.name,
len(changes))
try:
zone_id = self.zone_metadata(zone_name=desired.name)['id']
except HetznerClientNotFound:
self.log.debug('_apply: no matching zone, creating domain')
zone_id = self._client.zone_create(desired.name[:-1])['id']
for change in changes:
class_name = change.__class__.__name__
getattr(self, '_apply_{}'.format(class_name))(zone_id, change)
# Clear out the cache if any
self._zone_records.pop(desired.name, None)

+ 76
- 36
octodns/provider/ns1.py View File

@ -406,7 +406,7 @@ class Ns1Provider(BaseProvider):
for piece in note.split(' '): for piece in note.split(' '):
try: try:
k, v = piece.split(':', 1) k, v = piece.split(':', 1)
data[k] = v
data[k] = v if v != '' else None
except ValueError: except ValueError:
pass pass
return data return data
@ -464,10 +464,10 @@ class Ns1Provider(BaseProvider):
pass pass
return pool_name return pool_name
def _data_for_dynamic_A(self, _type, record):
def _data_for_dynamic(self, _type, record):
# First make sure we have the expected filters config # First make sure we have the expected filters config
if not self._valid_filter_config(record['filters'], record['domain']): if not self._valid_filter_config(record['filters'], record['domain']):
self.log.error('_data_for_dynamic_A: %s %s has unsupported '
self.log.error('_data_for_dynamic: %s %s has unsupported '
'filters', record['domain'], _type) 'filters', record['domain'], _type)
raise Ns1Exception('Unrecognized advanced record') raise Ns1Exception('Unrecognized advanced record')
@ -479,31 +479,45 @@ class Ns1Provider(BaseProvider):
# region. # region.
pools = defaultdict(lambda: {'fallback': None, 'values': []}) pools = defaultdict(lambda: {'fallback': None, 'values': []})
for answer in record['answers']: for answer in record['answers']:
# region (group name in the UI) is the pool name
pool_name = answer['region']
# Get the actual pool name by removing the type
pool_name = self._parse_dynamic_pool_name(pool_name)
pool = pools[pool_name]
meta = answer['meta'] meta = answer['meta']
notes = self._parse_notes(meta.get('note', ''))
value = text_type(answer['answer'][0]) value = text_type(answer['answer'][0])
if meta['priority'] == 1:
# priority 1 means this answer is part of the pools own values
value_dict = {
'value': value,
'weight': int(meta.get('weight', 1)),
}
# If we have the original pool name and the catchall pool name
# in the answers, they point at the same pool. Add values only
# once
if value_dict not in pool['values']:
pool['values'].append(value_dict)
if notes.get('from', False) == '--default--':
# It's a final/default value, record it and move on
default.add(value)
continue
# NS1 pool names can be found in notes > v0.9.11, in order to allow
# us to find fallback-only pools/values. Before that we used
# `region` (group name in the UI) and only paid attention to
# priority=1 (first level)
notes_pool_name = notes.get('pool', None)
if notes_pool_name is None:
# < v0.9.11
if meta['priority'] != 1:
# Ignore all but priority 1
continue
# And use region's pool name as the pool name
pool_name = self._parse_dynamic_pool_name(answer['region'])
else: else:
# It's a fallback, we only care about it if it's a
# final/default
notes = self._parse_notes(meta.get('note', ''))
if notes.get('from', False) == '--default--':
default.add(value)
# > v0.9.11, use the notes-based name and consider all values
pool_name = notes_pool_name
pool = pools[pool_name]
value_dict = {
'value': value,
'weight': int(meta.get('weight', 1)),
}
if value_dict not in pool['values']:
# If we haven't seen this value before add it to the pool
pool['values'].append(value_dict)
# If there's a fallback recorded in the value for its pool go ahead
# and use it, another v0.9.11 thing
fallback = notes.get('fallback', None)
if fallback is not None:
pool['fallback'] = fallback
# The regions objects map to rules, but it's a bit fuzzy since they're # The regions objects map to rules, but it's a bit fuzzy since they're
# tied to pools on the NS1 side, e.g. we can only have 1 rule per pool, # tied to pools on the NS1 side, e.g. we can only have 1 rule per pool,
@ -528,7 +542,7 @@ class Ns1Provider(BaseProvider):
rules[rule_order] = rule rules[rule_order] = rule
# The group notes field in the UI is a `note` on the region here, # The group notes field in the UI is a `note` on the region here,
# that's where we can find our pool's fallback.
# that's where we can find our pool's fallback in < v0.9.11 anyway
if 'fallback' in notes: if 'fallback' in notes:
# set the fallback pool name # set the fallback pool name
pools[pool_name]['fallback'] = notes['fallback'] pools[pool_name]['fallback'] = notes['fallback']
@ -588,16 +602,22 @@ class Ns1Provider(BaseProvider):
rules = list(rules.values()) rules = list(rules.values())
rules.sort(key=lambda r: (r['_order'], r['pool'])) rules.sort(key=lambda r: (r['_order'], r['pool']))
return {
data = {
'dynamic': { 'dynamic': {
'pools': pools, 'pools': pools,
'rules': rules, 'rules': rules,
}, },
'ttl': record['ttl'], 'ttl': record['ttl'],
'type': _type, 'type': _type,
'values': sorted(default),
} }
if _type == 'CNAME':
data['value'] = default[0]
else:
data['values'] = default
return data
def _data_for_A(self, _type, record): def _data_for_A(self, _type, record):
if record.get('tier', 1) > 1: if record.get('tier', 1) > 1:
# Advanced record, see if it's first answer has a note # Advanced record, see if it's first answer has a note
@ -607,7 +627,7 @@ class Ns1Provider(BaseProvider):
first_answer_note = '' first_answer_note = ''
# If that note includes a `from` (pool name) it's a dynamic record # If that note includes a `from` (pool name) it's a dynamic record
if 'from:' in first_answer_note: if 'from:' in first_answer_note:
return self._data_for_dynamic_A(_type, record)
return self._data_for_dynamic(_type, record)
# If not it's an old geo record # If not it's an old geo record
return self._data_for_geo_A(_type, record) return self._data_for_geo_A(_type, record)
@ -646,6 +666,10 @@ class Ns1Provider(BaseProvider):
} }
def _data_for_CNAME(self, _type, record): def _data_for_CNAME(self, _type, record):
if record.get('tier', 1) > 1:
# Advanced dynamic record
return self._data_for_dynamic(_type, record)
try: try:
value = record['short_answers'][0] value = record['short_answers'][0]
except IndexError: except IndexError:
@ -822,6 +846,10 @@ class Ns1Provider(BaseProvider):
# This monitor does not belong to this record # This monitor does not belong to this record
config = monitor['config'] config = monitor['config']
value = config['host'] value = config['host']
if record._type == 'CNAME':
# Append a trailing dot for CNAME records so that
# lookup by a CNAME answer works
value = value + '.'
monitors[value] = monitor monitors[value] = monitor
return monitors return monitors
@ -872,6 +900,10 @@ class Ns1Provider(BaseProvider):
host = record.fqdn[:-1] host = record.fqdn[:-1]
_type = record._type _type = record._type
if _type == 'CNAME':
# NS1 does not accept a host value with a trailing dot
value = value[:-1]
ret = { ret = {
'active': True, 'active': True,
'config': { 'config': {
@ -897,7 +929,7 @@ class Ns1Provider(BaseProvider):
if record.healthcheck_protocol != 'TCP': if record.healthcheck_protocol != 'TCP':
# IF it's HTTP we need to send the request string # IF it's HTTP we need to send the request string
path = record.healthcheck_path path = record.healthcheck_path
host = record.healthcheck_host
host = record.healthcheck_host(value=value)
request = r'GET {path} HTTP/1.0\r\nHost: {host}\r\n' \ request = r'GET {path} HTTP/1.0\r\nHost: {host}\r\n' \
r'User-agent: NS1\r\n\r\n'.format(path=path, host=host) r'User-agent: NS1\r\n\r\n'.format(path=path, host=host)
ret['config']['send'] = request ret['config']['send'] = request
@ -978,12 +1010,15 @@ class Ns1Provider(BaseProvider):
seen.add(current_pool_name) seen.add(current_pool_name)
pool = pools[current_pool_name] pool = pools[current_pool_name]
for answer in pool_answers[current_pool_name]: for answer in pool_answers[current_pool_name]:
fallback = pool.data['fallback']
answer = { answer = {
'answer': answer['answer'], 'answer': answer['answer'],
'meta': { 'meta': {
'priority': priority, 'priority': priority,
'note': self._encode_notes({ 'note': self._encode_notes({
'from': pool_label, 'from': pool_label,
'pool': current_pool_name,
'fallback': fallback or '',
}), }),
'up': { 'up': {
'feed': answer['feed_id'], 'feed': answer['feed_id'],
@ -1013,7 +1048,7 @@ class Ns1Provider(BaseProvider):
} }
answers.append(answer) answers.append(answer)
def _params_for_dynamic_A(self, record):
def _params_for_dynamic(self, record):
pools = record.dynamic.pools pools = record.dynamic.pools
# Convert rules to regions # Convert rules to regions
@ -1114,10 +1149,14 @@ class Ns1Provider(BaseProvider):
'feed_id': feed_id, 'feed_id': feed_id,
}) })
if record._type == 'CNAME':
default_values = [record.value]
else:
default_values = record.values
default_answers = [{ default_answers = [{
'answer': [v], 'answer': [v],
'weight': 1, 'weight': 1,
} for v in record.values]
} for v in default_values]
# Build our list of answers # Build our list of answers
# The regions dictionary built above already has the required pool # The regions dictionary built above already has the required pool
@ -1146,7 +1185,7 @@ class Ns1Provider(BaseProvider):
def _params_for_A(self, record): def _params_for_A(self, record):
if getattr(record, 'dynamic', False): if getattr(record, 'dynamic', False):
return self._params_for_dynamic_A(record)
return self._params_for_dynamic(record)
elif hasattr(record, 'geo'): elif hasattr(record, 'geo'):
return self._params_for_geo_A(record) return self._params_for_geo_A(record)
@ -1171,8 +1210,10 @@ class Ns1Provider(BaseProvider):
values = [(v.flags, v.tag, v.value) for v in record.values] values = [(v.flags, v.tag, v.value) for v in record.values]
return {'answers': values, 'ttl': record.ttl}, None return {'answers': values, 'ttl': record.ttl}, None
# TODO: dynamic CNAME support
def _params_for_CNAME(self, record): def _params_for_CNAME(self, record):
if getattr(record, 'dynamic', False):
return self._params_for_dynamic(record)
return {'answers': [record.value], 'ttl': record.ttl}, None return {'answers': [record.value], 'ttl': record.ttl}, None
_params_for_ALIAS = _params_for_CNAME _params_for_ALIAS = _params_for_CNAME
@ -1250,8 +1291,7 @@ class Ns1Provider(BaseProvider):
extra.append(Update(record, record)) extra.append(Update(record, record))
continue continue
for have in self._monitors_for(record).values():
value = have['config']['host']
for value, have in self._monitors_for(record).items():
expected = self._monitor_gen(record, value) expected = self._monitor_gen(record, value)
# TODO: find values which have missing monitors # TODO: find values which have missing monitors
if not self._monitor_is_match(expected, have): if not self._monitor_is_match(expected, have):


+ 1
- 1
octodns/provider/plan.py View File

@ -50,7 +50,7 @@ class Plan(object):
except AttributeError: except AttributeError:
existing_n = 0 existing_n = 0
self.log.debug('__init__: Creates=%d, Updates=%d, Deletes=%d'
self.log.debug('__init__: Creates=%d, Updates=%d, Deletes=%d '
'Existing=%d', 'Existing=%d',
self.change_counts['Create'], self.change_counts['Create'],
self.change_counts['Update'], self.change_counts['Update'],


+ 7
- 2
octodns/provider/route53.py View File

@ -1084,7 +1084,7 @@ class Route53Provider(BaseProvider):
try: try:
ip_address(text_type(value)) ip_address(text_type(value))
# We're working with an IP, host is the Host header # We're working with an IP, host is the Host header
healthcheck_host = record.healthcheck_host
healthcheck_host = record.healthcheck_host(value=value)
except (AddressValueError, ValueError): except (AddressValueError, ValueError):
# This isn't an IP, host is the value, value should be None # This isn't an IP, host is the value, value should be None
healthcheck_host = value healthcheck_host = value
@ -1253,7 +1253,12 @@ class Route53Provider(BaseProvider):
return self._gen_mods('DELETE', existing_records, existing_rrsets) return self._gen_mods('DELETE', existing_records, existing_rrsets)
def _extra_changes_update_needed(self, record, rrset): def _extra_changes_update_needed(self, record, rrset):
healthcheck_host = record.healthcheck_host
if record._type == 'CNAME':
# For CNAME, healthcheck host by default points to the CNAME value
healthcheck_host = rrset['ResourceRecords'][0]['Value']
else:
healthcheck_host = record.healthcheck_host()
healthcheck_path = record.healthcheck_path healthcheck_path = record.healthcheck_path
healthcheck_protocol = record.healthcheck_protocol healthcheck_protocol = record.healthcheck_protocol
healthcheck_port = record.healthcheck_port healthcheck_port = record.healthcheck_port


+ 2
- 2
octodns/provider/transip.py View File

@ -49,8 +49,8 @@ class TransipProvider(BaseProvider):
''' '''
SUPPORTS_GEO = False SUPPORTS_GEO = False
SUPPORTS_DYNAMIC = False SUPPORTS_DYNAMIC = False
SUPPORTS = set(
('A', 'AAAA', 'CNAME', 'MX', 'SRV', 'SPF', 'TXT', 'SSHFP', 'CAA'))
SUPPORTS = set(('A', 'AAAA', 'CNAME', 'MX', 'NS', 'SRV', 'SPF', 'TXT',
'SSHFP', 'CAA'))
# unsupported by OctoDNS: 'TLSA' # unsupported by OctoDNS: 'TLSA'
MIN_TTL = 120 MIN_TTL = 120
TIMEOUT = 15 TIMEOUT = 15


+ 25
- 8
octodns/provider/ultra.py View File

@ -36,12 +36,12 @@ class UltraProvider(BaseProvider):
''' '''
Neustar UltraDNS provider Neustar UltraDNS provider
Documentation for Ultra REST API requires a login:
https://portal.ultradns.com/static/docs/REST-API_User_Guide.pdf
Implemented to the May 20, 2020 version of the document (dated on page ii)
Also described as Version 2.83.0 (title page)
Documentation for Ultra REST API:
https://ultra-portalstatic.ultradns.com/static/docs/REST-API_User_Guide.pdf
Implemented to the May 26, 2021 version of the document (dated on page ii)
Also described as Version 3.18.0 (title page)
Tested against 3.0.0-20200627220036.81047f5
Tested against 3.20.1-20210521075351.36b9297
As determined by querying https://api.ultradns.com/version As determined by querying https://api.ultradns.com/version
ultra: ultra:
@ -57,6 +57,7 @@ class UltraProvider(BaseProvider):
RECORDS_TO_TYPE = { RECORDS_TO_TYPE = {
'A (1)': 'A', 'A (1)': 'A',
'AAAA (28)': 'AAAA', 'AAAA (28)': 'AAAA',
'APEXALIAS (65282)': 'ALIAS',
'CAA (257)': 'CAA', 'CAA (257)': 'CAA',
'CNAME (5)': 'CNAME', 'CNAME (5)': 'CNAME',
'MX (15)': 'MX', 'MX (15)': 'MX',
@ -72,6 +73,7 @@ class UltraProvider(BaseProvider):
SUPPORTS_GEO = False SUPPORTS_GEO = False
SUPPORTS_DYNAMIC = False SUPPORTS_DYNAMIC = False
TIMEOUT = 5 TIMEOUT = 5
ZONE_REQUEST_LIMIT = 100
def _request(self, method, path, params=None, def _request(self, method, path, params=None,
data=None, json=None, json_response=True): data=None, json=None, json_response=True):
@ -151,7 +153,7 @@ class UltraProvider(BaseProvider):
def zones(self): def zones(self):
if self._zones is None: if self._zones is None:
offset = 0 offset = 0
limit = 100
limit = self.ZONE_REQUEST_LIMIT
zones = [] zones = []
paging = True paging = True
while paging: while paging:
@ -211,6 +213,7 @@ class UltraProvider(BaseProvider):
_data_for_PTR = _data_for_single _data_for_PTR = _data_for_single
_data_for_CNAME = _data_for_single _data_for_CNAME = _data_for_single
_data_for_ALIAS = _data_for_single
def _data_for_CAA(self, _type, records): def _data_for_CAA(self, _type, records):
return { return {
@ -374,6 +377,7 @@ class UltraProvider(BaseProvider):
} }
_contents_for_PTR = _contents_for_CNAME _contents_for_PTR = _contents_for_CNAME
_contents_for_ALIAS = _contents_for_CNAME
def _contents_for_SRV(self, record): def _contents_for_SRV(self, record):
return { return {
@ -401,8 +405,15 @@ class UltraProvider(BaseProvider):
def _gen_data(self, record): def _gen_data(self, record):
zone_name = self._remove_prefix(record.fqdn, record.name + '.') zone_name = self._remove_prefix(record.fqdn, record.name + '.')
# UltraDNS treats the `APEXALIAS` type as the octodns `ALIAS`.
if record._type == "ALIAS":
record_type = "APEXALIAS"
else:
record_type = record._type
path = '/v2/zones/{}/rrsets/{}/{}'.format(zone_name, path = '/v2/zones/{}/rrsets/{}/{}'.format(zone_name,
record._type,
record_type,
record.fqdn) record.fqdn)
contents_for = getattr(self, '_contents_for_{}'.format(record._type)) contents_for = getattr(self, '_contents_for_{}'.format(record._type))
return path, contents_for(record) return path, contents_for(record)
@ -444,7 +455,13 @@ class UltraProvider(BaseProvider):
existing._type == self.RECORDS_TO_TYPE[record['rrtype']]: existing._type == self.RECORDS_TO_TYPE[record['rrtype']]:
zone_name = self._remove_prefix(existing.fqdn, zone_name = self._remove_prefix(existing.fqdn,
existing.name + '.') existing.name + '.')
# UltraDNS treats the `APEXALIAS` type as the octodns `ALIAS`.
existing_type = existing._type
if existing_type == "ALIAS":
existing_type = "APEXALIAS"
path = '/v2/zones/{}/rrsets/{}/{}'.format(zone_name, path = '/v2/zones/{}/rrsets/{}/{}'.format(zone_name,
existing._type,
existing_type,
existing.fqdn) existing.fqdn)
self._delete(path, json_response=False) self._delete(path, json_response=False)

+ 24
- 11
octodns/record/__init__.py View File

@ -183,15 +183,11 @@ class Record(EqualityTupleMixin):
def included(self): def included(self):
return self._octodns.get('included', []) return self._octodns.get('included', [])
@property
def healthcheck_host(self):
def healthcheck_host(self, value=None):
healthcheck = self._octodns.get('healthcheck', {}) healthcheck = self._octodns.get('healthcheck', {})
if healthcheck.get('protocol', None) == 'TCP': if healthcheck.get('protocol', None) == 'TCP':
return None return None
try:
return healthcheck['host']
except KeyError:
return self.fqdn[:-1]
return healthcheck.get('host', self.fqdn[:-1]) or value
@property @property
def healthcheck_path(self): def healthcheck_path(self):
@ -417,6 +413,7 @@ class _ValueMixin(object):
class _DynamicPool(object): class _DynamicPool(object):
log = getLogger('_DynamicPool')
def __init__(self, _id, data): def __init__(self, _id, data):
self._id = _id self._id = _id
@ -429,6 +426,15 @@ class _DynamicPool(object):
] ]
values.sort(key=lambda d: d['value']) values.sort(key=lambda d: d['value'])
# normalize weight of a single-value pool
if len(values) == 1:
weight = data['values'][0].get('weight', 1)
if weight != 1:
self.log.warn(
'Using weight=1 instead of %s for single-value pool %s',
weight, _id)
values[0]['weight'] = 1
fallback = data.get('fallback', None) fallback = data.get('fallback', None)
self.data = { self.data = {
'fallback': fallback if fallback != 'default' else None, 'fallback': fallback if fallback != 'default' else None,
@ -531,6 +537,7 @@ class _DynamicMixin(object):
pools_exist = set() pools_exist = set()
pools_seen = set() pools_seen = set()
pools_seen_as_fallback = set()
if not isinstance(pools, dict): if not isinstance(pools, dict):
reasons.append('pools must be a dict') reasons.append('pools must be a dict')
elif not pools: elif not pools:
@ -572,10 +579,17 @@ class _DynamicMixin(object):
reasons.append('missing value in pool "{}" ' reasons.append('missing value in pool "{}" '
'value {}'.format(_id, value_num)) 'value {}'.format(_id, value_num))
if len(values) == 1 and values[0].get('weight', 1) != 1:
reasons.append('pool "{}" has single value with '
'weight!=1'.format(_id))
fallback = pool.get('fallback', None) fallback = pool.get('fallback', None)
if fallback is not None and fallback not in pools:
reasons.append('undefined fallback "{}" for pool "{}"'
.format(fallback, _id))
if fallback is not None:
if fallback in pools:
pools_seen_as_fallback.add(fallback)
else:
reasons.append('undefined fallback "{}" for pool "{}"'
.format(fallback, _id))
# Check for loops # Check for loops
fallback = pools[_id].get('fallback', None) fallback = pools[_id].get('fallback', None)
@ -624,7 +638,6 @@ class _DynamicMixin(object):
if pool not in pools: if pool not in pools:
reasons.append('rule {} undefined pool "{}"' reasons.append('rule {} undefined pool "{}"'
.format(rule_num, pool)) .format(rule_num, pool))
pools_seen.add(pool)
elif pool in pools_seen and geos: elif pool in pools_seen and geos:
reasons.append('rule {} invalid, target pool "{}" ' reasons.append('rule {} invalid, target pool "{}" '
'reused'.format(rule_num, pool)) 'reused'.format(rule_num, pool))
@ -644,7 +657,7 @@ class _DynamicMixin(object):
reasons.extend(GeoCodes.validate(geo, 'rule {} ' reasons.extend(GeoCodes.validate(geo, 'rule {} '
.format(rule_num))) .format(rule_num)))
unused = pools_exist - pools_seen
unused = pools_exist - pools_seen - pools_seen_as_fallback
if unused: if unused:
unused = '", "'.join(sorted(unused)) unused = '", "'.join(sorted(unused))
reasons.append('unused pools: "{}"'.format(unused)) reasons.append('unused pools: "{}"'.format(unused))


+ 5
- 3
requirements.txt View File

@ -1,6 +1,8 @@
PyYaml==5.3.1
azure-common==1.1.25
azure-mgmt-dns==3.0.0
PyYaml==5.4
azure-common==1.1.27
azure-identity==1.5.0
azure-mgmt-dns==8.0.0
azure-mgmt-trafficmanager==0.51.0
boto3==1.15.9 boto3==1.15.9
botocore==1.18.9 botocore==1.18.9
dnspython==1.16.0 dnspython==1.16.0


+ 4
- 0
script/coverage View File

@ -25,6 +25,10 @@ export DYN_CUSTOMER=
export DYN_PASSWORD= export DYN_PASSWORD=
export DYN_USERNAME= export DYN_USERNAME=
export GOOGLE_APPLICATION_CREDENTIALS= export GOOGLE_APPLICATION_CREDENTIALS=
export ARM_CLIENT_ID=
export ARM_CLIENT_SECRET=
export ARM_TENANT_ID=
export ARM_SUBSCRIPTION_ID=
# Don't allow disabling coverage # Don't allow disabling coverage
grep -r -I --line-number "# pragma: +no.*cover" octodns && { grep -r -I --line-number "# pragma: +no.*cover" octodns && {


+ 4
- 0
script/test View File

@ -25,5 +25,9 @@ export DYN_CUSTOMER=
export DYN_PASSWORD= export DYN_PASSWORD=
export DYN_USERNAME= export DYN_USERNAME=
export GOOGLE_APPLICATION_CREDENTIALS= export GOOGLE_APPLICATION_CREDENTIALS=
export ARM_CLIENT_ID=
export ARM_CLIENT_SECRET=
export ARM_TENANT_ID=
export ARM_SUBSCRIPTION_ID=
nosetests "$@" nosetests "$@"

+ 23
- 0
tests/config/dynamic.tests.yaml View File

@ -109,6 +109,29 @@ cname:
- pool: iad - pool: iad
type: CNAME type: CNAME
value: target.unit.tests. value: target.unit.tests.
pool-only-in-fallback:
dynamic:
pools:
one:
fallback: two
values:
- value: 1.1.1.1
three:
values:
- value: 3.3.3.3
two:
values:
- value: 2.2.2.2
rules:
- geos:
- NA-US
pool: one
- geos:
- AS-SG
pool: three
ttl: 300
type: A
values: [4.4.4.4]
real-ish-a: real-ish-a:
dynamic: dynamic:
pools: pools:


+ 6
- 0
tests/config/plan-output-filehandle.yaml View File

@ -0,0 +1,6 @@
manager:
plan_outputs:
"doesntexist":
class: octodns.provider.plan.DoesntExist
providers: {}
zones: {}

+ 23
- 0
tests/config/processors-missing-class.yaml View File

@ -0,0 +1,23 @@
providers:
config:
class: octodns.provider.yaml.YamlProvider
directory: tests/config
dump:
class: octodns.provider.yaml.YamlProvider
directory: env/YAML_TMP_DIR
geo:
class: helpers.GeoProvider
nosshfp:
class: helpers.NoSshFpProvider
processors:
no-class: {}
zones:
unit.tests.:
processors:
- noop
sources:
- in
targets:
- dump

+ 25
- 0
tests/config/processors-wants-config.yaml View File

@ -0,0 +1,25 @@
providers:
config:
class: octodns.provider.yaml.YamlProvider
directory: tests/config
dump:
class: octodns.provider.yaml.YamlProvider
directory: env/YAML_TMP_DIR
geo:
class: helpers.GeoProvider
nosshfp:
class: helpers.NoSshFpProvider
processors:
# valid class, but it wants a param and we're not passing it
wants-config:
class: helpers.WantsConfigProcessor
zones:
unit.tests.:
processors:
- noop
sources:
- in
targets:
- dump

+ 33
- 0
tests/config/processors.yaml View File

@ -0,0 +1,33 @@
providers:
config:
class: octodns.provider.yaml.YamlProvider
directory: tests/config
dump:
class: octodns.provider.yaml.YamlProvider
directory: env/YAML_TMP_DIR
geo:
class: helpers.GeoProvider
nosshfp:
class: helpers.NoSshFpProvider
processors:
# Just testing config so any processor will do
noop:
class: octodns.processor.base.BaseProcessor
zones:
unit.tests.:
processors:
- noop
sources:
- config
targets:
- dump
bad.unit.tests.:
processors:
- doesnt-exist
sources:
- in
targets:
- dump

+ 17
- 0
tests/config/unknown-processor.yaml View File

@ -0,0 +1,17 @@
manager:
max_workers: 2
providers:
in:
class: octodns.provider.yaml.YamlProvider
directory: tests/config
dump:
class: octodns.provider.yaml.YamlProvider
directory: env/YAML_TMP_DIR
zones:
unit.tests.:
sources:
- in
processors:
- missing
targets:
- dump

+ 223
- 0
tests/fixtures/hetzner-records.json View File

@ -0,0 +1,223 @@
{
"records": [
{
"id": "SOA",
"type": "SOA",
"name": "@",
"value": "hydrogen.ns.hetzner.com. dns.hetzner.com. 1 86400 10800 3600000 3600",
"zone_id": "unit.tests",
"created": "0000-00-00 00:00:00.000 +0000 UTC",
"modified": "0000-00-00 00:00:00.000 +0000 UTC"
},
{
"id": "NS:sub:0",
"type": "NS",
"name": "sub",
"value": "6.2.3.4",
"ttl": 3600,
"zone_id": "unit.tests",
"created": "0000-00-00 00:00:00.000 +0000 UTC",
"modified": "0000-00-00 00:00:00.000 +0000 UTC"
},
{
"id": "NS:sub:1",
"type": "NS",
"name": "sub",
"value": "7.2.3.4",
"ttl": 3600,
"zone_id": "unit.tests",
"created": "0000-00-00 00:00:00.000 +0000 UTC",
"modified": "0000-00-00 00:00:00.000 +0000 UTC"
},
{
"id": "SRV:_srv._tcp:0",
"type": "SRV",
"name": "_srv._tcp",
"value": "10 20 30 foo-1.unit.tests",
"ttl": 600,
"zone_id": "unit.tests",
"created": "0000-00-00 00:00:00.000 +0000 UTC",
"modified": "0000-00-00 00:00:00.000 +0000 UTC"
},
{
"id": "SRV:_srv._tcp:1",
"type": "SRV",
"name": "_srv._tcp",
"value": "12 20 30 foo-2.unit.tests",
"ttl": 600,
"zone_id": "unit.tests",
"created": "0000-00-00 00:00:00.000 +0000 UTC",
"modified": "0000-00-00 00:00:00.000 +0000 UTC"
},
{
"id": "TXT:txt:0",
"type": "TXT",
"name": "txt",
"value": "\"Bah bah black sheep\"",
"ttl": 600,
"zone_id": "unit.tests",
"created": "0000-00-00 00:00:00.000 +0000 UTC",
"modified": "0000-00-00 00:00:00.000 +0000 UTC"
},
{
"id": "TXT:txt:1",
"type": "TXT",
"name": "txt",
"value": "\"have you any wool.\"",
"ttl": 600,
"zone_id": "unit.tests",
"created": "0000-00-00 00:00:00.000 +0000 UTC",
"modified": "0000-00-00 00:00:00.000 +0000 UTC"
},
{
"id": "A:@:0",
"type": "A",
"name": "@",
"value": "1.2.3.4",
"ttl": 300,
"zone_id": "unit.tests",
"created": "0000-00-00 00:00:00.000 +0000 UTC",
"modified": "0000-00-00 00:00:00.000 +0000 UTC"
},
{
"id": "A:@:1",
"type": "A",
"name": "@",
"value": "1.2.3.5",
"ttl": 300,
"zone_id": "unit.tests",
"created": "0000-00-00 00:00:00.000 +0000 UTC",
"modified": "0000-00-00 00:00:00.000 +0000 UTC"
},
{
"id": "A:www:0",
"type": "A",
"name": "www",
"value": "2.2.3.6",
"ttl": 300,
"zone_id": "unit.tests",
"created": "0000-00-00 00:00:00.000 +0000 UTC",
"modified": "0000-00-00 00:00:00.000 +0000 UTC"
},
{
"id": "MX:mx:0",
"type": "MX",
"name": "mx",
"value": "10 smtp-4.unit.tests",
"ttl": 300,
"zone_id": "unit.tests",
"created": "0000-00-00 00:00:00.000 +0000 UTC",
"modified": "0000-00-00 00:00:00.000 +0000 UTC"
},
{
"id": "MX:mx:1",
"type": "MX",
"name": "mx",
"value": "20 smtp-2.unit.tests",
"ttl": 300,
"zone_id": "unit.tests",
"created": "0000-00-00 00:00:00.000 +0000 UTC",
"modified": "0000-00-00 00:00:00.000 +0000 UTC"
},
{
"id": "MX:mx:2",
"type": "MX",
"name": "mx",
"value": "30 smtp-3.unit.tests",
"ttl": 300,
"zone_id": "unit.tests",
"created": "0000-00-00 00:00:00.000 +0000 UTC",
"modified": "0000-00-00 00:00:00.000 +0000 UTC"
},
{
"id": "MX:mx:3",
"type": "MX",
"name": "mx",
"value": "40 smtp-1.unit.tests",
"ttl": 300,
"zone_id": "unit.tests",
"created": "0000-00-00 00:00:00.000 +0000 UTC",
"modified": "0000-00-00 00:00:00.000 +0000 UTC"
},
{
"id": "AAAA:aaaa:0",
"type": "AAAA",
"name": "aaaa",
"value": "2601:644:500:e210:62f8:1dff:feb8:947a",
"ttl": 600,
"zone_id": "unit.tests",
"created": "0000-00-00 00:00:00.000 +0000 UTC",
"modified": "0000-00-00 00:00:00.000 +0000 UTC"
},
{
"id": "CNAME:cname:0",
"type": "CNAME",
"name": "cname",
"value": "unit.tests",
"ttl": 300,
"zone_id": "unit.tests",
"created": "0000-00-00 00:00:00.000 +0000 UTC",
"modified": "0000-00-00 00:00:00.000 +0000 UTC"
},
{
"id": "A:www.sub:0",
"type": "A",
"name": "www.sub",
"value": "2.2.3.6",
"ttl": 300,
"zone_id": "unit.tests",
"created": "0000-00-00 00:00:00.000 +0000 UTC",
"modified": "0000-00-00 00:00:00.000 +0000 UTC"
},
{
"id": "TXT:txt:2",
"type": "TXT",
"name": "txt",
"value": "v=DKIM1;k=rsa;s=email;h=sha256;p=A/kinda+of/long/string+with+numb3rs",
"ttl": 600,
"zone_id": "unit.tests",
"created": "0000-00-00 00:00:00.000 +0000 UTC",
"modified": "0000-00-00 00:00:00.000 +0000 UTC"
},
{
"id": "CAA:@:0",
"type": "CAA",
"name": "@",
"value": "0 issue \"ca.unit.tests\"",
"ttl": 3600,
"zone_id": "unit.tests",
"created": "0000-00-00 00:00:00.000 +0000 UTC",
"modified": "0000-00-00 00:00:00.000 +0000 UTC"
},
{
"id": "CNAME:included:0",
"type": "CNAME",
"name": "included",
"value": "unit.tests",
"ttl": 3600,
"zone_id": "unit.tests",
"created": "0000-00-00 00:00:00.000 +0000 UTC",
"modified": "0000-00-00 00:00:00.000 +0000 UTC"
},
{
"id": "SRV:_imap._tcp:0",
"type": "SRV",
"name": "_imap._tcp",
"value": "0 0 0 .",
"ttl": 600,
"zone_id": "unit.tests",
"created": "0000-00-00 00:00:00.000 +0000 UTC",
"modified": "0000-00-00 00:00:00.000 +0000 UTC"
},
{
"id": "SRV:_pop3._tcp:0",
"type": "SRV",
"name": "_pop3._tcp",
"value": "0 0 0 .",
"ttl": 600,
"zone_id": "unit.tests",
"created": "0000-00-00 00:00:00.000 +0000 UTC",
"modified": "0000-00-00 00:00:00.000 +0000 UTC"
}
]
}

+ 43
- 0
tests/fixtures/hetzner-zones.json View File

@ -0,0 +1,43 @@
{
"zones": [
{
"id": "unit.tests",
"name": "unit.tests",
"ttl": 3600,
"registrar": "",
"legacy_dns_host": "",
"legacy_ns": [],
"ns": [],
"created": "0000-00-00 00:00:00.000 +0000 UTC",
"verified": "",
"modified": "0000-00-00 00:00:00.000 +0000 UTC",
"project": "",
"owner": "",
"permission": "",
"zone_type": {
"id": "",
"name": "",
"description": "",
"prices": null
},
"status": "verified",
"paused": false,
"is_secondary_dns": false,
"txt_verification": {
"name": "",
"token": ""
},
"records_count": null
}
],
"meta": {
"pagination": {
"page": 1,
"per_page": 100,
"previous_page": 1,
"next_page": 1,
"last_page": 1,
"total_entries": 1
}
}
}

+ 9
- 0
tests/fixtures/ultra-records-page-2.json View File

@ -32,7 +32,16 @@
"rdata": [ "rdata": [
"www.octodns1.test." "www.octodns1.test."
] ]
},
{
"ownerName": "host1.octodns1.test.",
"rrtype": "RRSET (70)",
"ttl": 3600,
"rdata": [
"E3B0C44298FC1C149AFBF4C8996FB92427AE41E4649B934CA495991B7852B855"
]
} }
], ],
"resultInfo": { "resultInfo": {
"totalCount": 13, "totalCount": 13,


+ 30
- 0
tests/helpers.py View File

@ -7,6 +7,10 @@ from __future__ import absolute_import, division, print_function, \
from shutil import rmtree from shutil import rmtree
from tempfile import mkdtemp from tempfile import mkdtemp
from logging import getLogger
from octodns.processor.base import BaseProcessor
from octodns.provider.base import BaseProvider
class SimpleSource(object): class SimpleSource(object):
@ -90,3 +94,29 @@ class TemporaryDirectory(object):
rmtree(self.dirname) rmtree(self.dirname)
else: else:
raise Exception(self.dirname) raise Exception(self.dirname)
class WantsConfigProcessor(BaseProcessor):
def __init__(self, name, some_config):
super(WantsConfigProcessor, self).__init__(name)
class PlannableProvider(BaseProvider):
log = getLogger('PlannableProvider')
SUPPORTS_GEO = False
SUPPORTS_DYNAMIC = False
SUPPORTS = set(('A',))
def __init__(self, *args, **kwargs):
super(PlannableProvider, self).__init__(*args, **kwargs)
def populate(self, zone, source=False, target=False, lenient=False):
pass
def supports(self, record):
return True
def __repr__(self):
return self.__class__.__name__

+ 149
- 9
tests/test_octodns_manager.py View File

@ -8,16 +8,19 @@ from __future__ import absolute_import, division, print_function, \
from os import environ from os import environ
from os.path import dirname, join from os.path import dirname, join
from six import text_type from six import text_type
from unittest import TestCase
from octodns.record import Record
from octodns.manager import _AggregateTarget, MainThreadExecutor, Manager, \ from octodns.manager import _AggregateTarget, MainThreadExecutor, Manager, \
ManagerException ManagerException
from octodns.processor.base import BaseProcessor
from octodns.record import Create, Delete, Record
from octodns.yaml import safe_load from octodns.yaml import safe_load
from octodns.zone import Zone from octodns.zone import Zone
from mock import MagicMock, patch
from unittest import TestCase
from helpers import DynamicProvider, GeoProvider, NoSshFpProvider, \ from helpers import DynamicProvider, GeoProvider, NoSshFpProvider, \
SimpleProvider, TemporaryDirectory
PlannableProvider, SimpleProvider, TemporaryDirectory
config_dir = join(dirname(__file__), 'config') config_dir = join(dirname(__file__), 'config')
@ -336,6 +339,11 @@ class TestManager(TestCase):
Manager(get_config_filename('simple-alias-zone.yaml')) \ Manager(get_config_filename('simple-alias-zone.yaml')) \
.validate_configs() .validate_configs()
with self.assertRaises(ManagerException) as ctx:
Manager(get_config_filename('unknown-processor.yaml')) \
.validate_configs()
self.assertTrue('unknown processor' in text_type(ctx.exception))
def test_get_zone(self): def test_get_zone(self):
Manager(get_config_filename('simple.yaml')).get_zone('unit.tests.') Manager(get_config_filename('simple.yaml')).get_zone('unit.tests.')
@ -356,20 +364,152 @@ class TestManager(TestCase):
class NoLenient(SimpleProvider): class NoLenient(SimpleProvider):
def populate(self, zone, source=False):
def populate(self, zone):
pass pass
# This should be ok, we'll fall back to not passing it # This should be ok, we'll fall back to not passing it
manager._populate_and_plan('unit.tests.', [NoLenient()], [])
manager._populate_and_plan('unit.tests.', [], [NoLenient()], [])
class OtherType(SimpleProvider):
class NoZone(SimpleProvider):
def populate(self, zone, lenient=False):
raise TypeError('something else')
# This will blow up, we don't fallback for source
with self.assertRaises(TypeError) as ctx:
manager._populate_and_plan('unit.tests.', [], [OtherType()],
[])
self.assertEquals('something else', text_type(ctx.exception))
def populate(self, lenient=False):
def test_plan_processors_fallback(self):
with TemporaryDirectory() as tmpdir:
environ['YAML_TMP_DIR'] = tmpdir.dirname
# Only allow a target that doesn't exist
manager = Manager(get_config_filename('simple.yaml'))
class NoProcessors(SimpleProvider):
def plan(self, zone):
pass pass
# This should be ok, we'll fall back to not passing it
manager._populate_and_plan('unit.tests.', [], [],
[NoProcessors()])
class OtherType(SimpleProvider):
def plan(self, zone, processors):
raise TypeError('something else')
# This will blow up, we don't fallback for source # This will blow up, we don't fallback for source
with self.assertRaises(TypeError):
manager._populate_and_plan('unit.tests.', [NoZone()], [])
with self.assertRaises(TypeError) as ctx:
manager._populate_and_plan('unit.tests.', [], [],
[OtherType()])
self.assertEquals('something else', text_type(ctx.exception))
@patch('octodns.manager.Manager._get_named_class')
def test_sync_passes_file_handle(self, mock):
plan_output_mock = MagicMock()
plan_output_class_mock = MagicMock()
plan_output_class_mock.return_value = plan_output_mock
mock.return_value = plan_output_class_mock
fh_mock = MagicMock()
Manager(get_config_filename('plan-output-filehandle.yaml')
).sync(plan_output_fh=fh_mock)
# Since we only care about the fh kwarg, and different _PlanOutputs are
# are free to require arbitrary kwargs anyway, we concern ourselves
# with checking the value of fh only.
plan_output_mock.run.assert_called()
_, kwargs = plan_output_mock.run.call_args
self.assertEqual(fh_mock, kwargs.get('fh'))
def test_processor_config(self):
# Smoke test loading a valid config
manager = Manager(get_config_filename('processors.yaml'))
self.assertEquals(['noop'], list(manager.processors.keys()))
# This zone specifies a valid processor
manager.sync(['unit.tests.'])
with self.assertRaises(ManagerException) as ctx:
# This zone specifies a non-existant processor
manager.sync(['bad.unit.tests.'])
self.assertTrue('Zone bad.unit.tests., unknown processor: '
'doesnt-exist' in text_type(ctx.exception))
with self.assertRaises(ManagerException) as ctx:
Manager(get_config_filename('processors-missing-class.yaml'))
self.assertTrue('Processor no-class is missing class' in
text_type(ctx.exception))
with self.assertRaises(ManagerException) as ctx:
Manager(get_config_filename('processors-wants-config.yaml'))
self.assertTrue('Incorrect processor config for wants-config' in
text_type(ctx.exception))
def test_processors(self):
manager = Manager(get_config_filename('simple.yaml'))
targets = [PlannableProvider('prov')]
zone = Zone('unit.tests.', [])
record = Record.new(zone, 'a', {
'ttl': 30,
'type': 'A',
'value': '1.2.3.4',
})
# muck with sources
class MockProcessor(BaseProcessor):
def process_source_zone(self, zone, sources):
zone = self._clone_zone(zone)
zone.add_record(record)
return zone
mock = MockProcessor('mock')
plans, zone = manager._populate_and_plan('unit.tests.', [mock], [],
targets)
# Our mock was called and added the record
self.assertEquals(record, list(zone.records)[0])
# We got a create for the thing added to the expected state (source)
self.assertIsInstance(plans[0][1].changes[0], Create)
# muck with targets
class MockProcessor(BaseProcessor):
def process_target_zone(self, zone, target):
zone = self._clone_zone(zone)
zone.add_record(record)
return zone
mock = MockProcessor('mock')
plans, zone = manager._populate_and_plan('unit.tests.', [mock], [],
targets)
# No record added since it's target this time
self.assertFalse(zone.records)
# We got a delete for the thing added to the existing state (target)
self.assertIsInstance(plans[0][1].changes[0], Delete)
# muck with plans
class MockProcessor(BaseProcessor):
def process_target_zone(self, zone, target):
zone = self._clone_zone(zone)
zone.add_record(record)
return zone
def process_plan(self, plans, sources, target):
# get rid of the change
plans.changes.pop(0)
mock = MockProcessor('mock')
plans, zone = manager._populate_and_plan('unit.tests.', [mock], [],
targets)
# We planned a delete again, but this time removed it from the plan, so
# no plans
self.assertFalse(plans)
class TestMainThreadExecutor(TestCase): class TestMainThreadExecutor(TestCase):


+ 90
- 0
tests/test_octodns_processor_filter.py View File

@ -0,0 +1,90 @@
#
#
#
from __future__ import absolute_import, division, print_function, \
unicode_literals
from unittest import TestCase
from octodns.processor.filter import TypeAllowlistFilter, TypeRejectlistFilter
from octodns.record import Record
from octodns.zone import Zone
zone = Zone('unit.tests.', [])
for record in [
Record.new(zone, 'a', {
'ttl': 30,
'type': 'A',
'value': '1.2.3.4',
}),
Record.new(zone, 'aaaa', {
'ttl': 30,
'type': 'AAAA',
'value': '::1',
}),
Record.new(zone, 'txt', {
'ttl': 30,
'type': 'TXT',
'value': 'Hello World!',
}),
Record.new(zone, 'a2', {
'ttl': 30,
'type': 'A',
'value': '2.3.4.5',
}),
Record.new(zone, 'txt2', {
'ttl': 30,
'type': 'TXT',
'value': 'That will do',
}),
]:
zone.add_record(record)
class TestTypeAllowListFilter(TestCase):
def test_basics(self):
filter_a = TypeAllowlistFilter('only-a', set(('A')))
got = filter_a.process_source_zone(zone)
self.assertEquals(['a', 'a2'], sorted([r.name for r in got.records]))
filter_aaaa = TypeAllowlistFilter('only-aaaa', ('AAAA',))
got = filter_aaaa.process_source_zone(zone)
self.assertEquals(['aaaa'], sorted([r.name for r in got.records]))
filter_txt = TypeAllowlistFilter('only-txt', ['TXT'])
got = filter_txt.process_target_zone(zone)
self.assertEquals(['txt', 'txt2'],
sorted([r.name for r in got.records]))
filter_a_aaaa = TypeAllowlistFilter('only-aaaa', set(('A', 'AAAA')))
got = filter_a_aaaa.process_target_zone(zone)
self.assertEquals(['a', 'a2', 'aaaa'],
sorted([r.name for r in got.records]))
class TestTypeRejectListFilter(TestCase):
def test_basics(self):
filter_a = TypeRejectlistFilter('not-a', set(('A')))
got = filter_a.process_source_zone(zone)
self.assertEquals(['aaaa', 'txt', 'txt2'],
sorted([r.name for r in got.records]))
filter_aaaa = TypeRejectlistFilter('not-aaaa', ('AAAA',))
got = filter_aaaa.process_source_zone(zone)
self.assertEquals(['a', 'a2', 'txt', 'txt2'],
sorted([r.name for r in got.records]))
filter_txt = TypeRejectlistFilter('not-txt', ['TXT'])
got = filter_txt.process_target_zone(zone)
self.assertEquals(['a', 'a2', 'aaaa'],
sorted([r.name for r in got.records]))
filter_a_aaaa = TypeRejectlistFilter('not-a-aaaa', set(('A', 'AAAA')))
got = filter_a_aaaa.process_target_zone(zone)
self.assertEquals(['txt', 'txt2'],
sorted([r.name for r in got.records]))

+ 146
- 0
tests/test_octodns_processor_ownership.py View File

@ -0,0 +1,146 @@
#
#
#
from __future__ import absolute_import, division, print_function, \
unicode_literals
from unittest import TestCase
from octodns.processor.ownership import OwnershipProcessor
from octodns.record import Delete, Record
from octodns.zone import Zone
from helpers import PlannableProvider
zone = Zone('unit.tests.', [])
records = {}
for record in [
Record.new(zone, '', {
'ttl': 30,
'type': 'A',
'values': [
'1.2.3.4',
'5.6.7.8',
],
}),
Record.new(zone, 'the-a', {
'ttl': 30,
'type': 'A',
'value': '1.2.3.4',
}),
Record.new(zone, 'the-aaaa', {
'ttl': 30,
'type': 'AAAA',
'value': '::1',
}),
Record.new(zone, 'the-txt', {
'ttl': 30,
'type': 'TXT',
'value': 'Hello World!',
}),
Record.new(zone, '*', {
'ttl': 30,
'type': 'A',
'value': '4.3.2.1',
}),
]:
records[record.name] = record
zone.add_record(record)
class TestOwnershipProcessor(TestCase):
def test_process_source_zone(self):
ownership = OwnershipProcessor('ownership')
got = ownership.process_source_zone(zone)
self.assertEquals([
'',
'*',
'_owner.a',
'_owner.a._wildcard',
'_owner.a.the-a',
'_owner.aaaa.the-aaaa',
'_owner.txt.the-txt',
'the-a',
'the-aaaa',
'the-txt',
], sorted([r.name for r in got.records]))
found = False
for record in got.records:
if record.name.startswith(ownership.txt_name):
self.assertEquals([ownership.txt_value], record.values)
# test _is_ownership while we're in here
self.assertTrue(ownership._is_ownership(record))
found = True
else:
self.assertFalse(ownership._is_ownership(record))
self.assertTrue(found)
def test_process_plan(self):
ownership = OwnershipProcessor('ownership')
provider = PlannableProvider('helper')
# No plan, is a quick noop
self.assertFalse(ownership.process_plan(None))
# Nothing exists create both records and ownership
ownership_added = ownership.process_source_zone(zone)
plan = provider.plan(ownership_added)
self.assertTrue(plan)
# Double the number of records
self.assertEquals(len(records) * 2, len(plan.changes))
# Now process the plan, shouldn't make any changes, we're creating
# everything
got = ownership.process_plan(plan)
self.assertTrue(got)
self.assertEquals(len(records) * 2, len(got.changes))
# Something extra exists and doesn't have ownership TXT, leave it
# alone, we don't own it.
extra_a = Record.new(zone, 'extra-a', {
'ttl': 30,
'type': 'A',
'value': '4.4.4.4',
})
plan.existing.add_record(extra_a)
# If we'd done a "real" plan we'd have a delete for the extra thing.
plan.changes.append(Delete(extra_a))
# Process the plan, shouldn't make any changes since the extra bit is
# something we don't own
got = ownership.process_plan(plan)
self.assertTrue(got)
self.assertEquals(len(records) * 2, len(got.changes))
# Something extra exists and does have an ownership record so we will
# delete it...
copy = Zone('unit.tests.', [])
for record in records.values():
if record.name != 'the-a':
copy.add_record(record)
# New ownership, without the `the-a`
ownership_added = ownership.process_source_zone(copy)
self.assertEquals(len(records) * 2 - 2, len(ownership_added.records))
plan = provider.plan(ownership_added)
# Fake the extra existing by adding the record, its ownership, and the
# two delete changes.
the_a = records['the-a']
plan.existing.add_record(the_a)
name = '{}.a.the-a'.format(ownership.txt_name)
the_a_ownership = Record.new(zone, name, {
'ttl': 30,
'type': 'TXT',
'value': ownership.txt_value,
})
plan.existing.add_record(the_a_ownership)
plan.changes.append(Delete(the_a))
plan.changes.append(Delete(the_a_ownership))
# Finally process the plan, should be a noop and we should get the same
# plan out, meaning the planned deletes were allowed to happen.
got = ownership.process_plan(plan)
self.assertTrue(got)
self.assertEquals(plan, got)
self.assertEquals(len(plan.changes), len(got.changes))

+ 1718
- 29
tests/test_octodns_provider_azuredns.py
File diff suppressed because it is too large
View File


+ 65
- 2
tests/test_octodns_provider_base.py View File

@ -9,9 +9,10 @@ from logging import getLogger
from six import text_type from six import text_type
from unittest import TestCase from unittest import TestCase
from octodns.record import Create, Delete, Record, Update
from octodns.processor.base import BaseProcessor
from octodns.provider.base import BaseProvider from octodns.provider.base import BaseProvider
from octodns.provider.plan import Plan, UnsafePlan from octodns.provider.plan import Plan, UnsafePlan
from octodns.record import Create, Delete, Record, Update
from octodns.zone import Zone from octodns.zone import Zone
@ -21,7 +22,7 @@ class HelperProvider(BaseProvider):
SUPPORTS = set(('A',)) SUPPORTS = set(('A',))
id = 'test' id = 'test'
def __init__(self, extra_changes, apply_disabled=False,
def __init__(self, extra_changes=[], apply_disabled=False,
include_change_callback=None): include_change_callback=None):
self.__extra_changes = extra_changes self.__extra_changes = extra_changes
self.apply_disabled = apply_disabled self.apply_disabled = apply_disabled
@ -43,6 +44,29 @@ class HelperProvider(BaseProvider):
pass pass
class TrickyProcessor(BaseProcessor):
def __init__(self, name, add_during_process_target_zone):
super(TrickyProcessor, self).__init__(name)
self.add_during_process_target_zone = add_during_process_target_zone
self.reset()
def reset(self):
self.existing = None
self.target = None
def process_target_zone(self, existing, target):
self.existing = existing
self.target = target
new = self._clone_zone(existing)
for record in existing.records:
new.add_record(record)
for record in self.add_during_process_target_zone:
new.add_record(record)
return new
class TestBaseProvider(TestCase): class TestBaseProvider(TestCase):
def test_base_provider(self): def test_base_provider(self):
@ -138,6 +162,45 @@ class TestBaseProvider(TestCase):
self.assertTrue(plan) self.assertTrue(plan)
self.assertEquals(1, len(plan.changes)) self.assertEquals(1, len(plan.changes))
def test_plan_with_processors(self):
zone = Zone('unit.tests.', [])
record = Record.new(zone, 'a', {
'ttl': 30,
'type': 'A',
'value': '1.2.3.4',
})
provider = HelperProvider()
# Processor that adds a record to the zone, which planning will then
# delete since it won't know anything about it
tricky = TrickyProcessor('tricky', [record])
plan = provider.plan(zone, processors=[tricky])
self.assertTrue(plan)
self.assertEquals(1, len(plan.changes))
self.assertIsInstance(plan.changes[0], Delete)
# Called processor stored its params
self.assertTrue(tricky.existing)
self.assertEquals(zone.name, tricky.existing.name)
# Chain of processors happen one after the other
other = Record.new(zone, 'b', {
'ttl': 30,
'type': 'A',
'value': '5.6.7.8',
})
# Another processor will add its record, thus 2 deletes
another = TrickyProcessor('tricky', [other])
plan = provider.plan(zone, processors=[tricky, another])
self.assertTrue(plan)
self.assertEquals(2, len(plan.changes))
self.assertIsInstance(plan.changes[0], Delete)
self.assertIsInstance(plan.changes[1], Delete)
# 2nd processor stored its params, and we'll see the record the
# first one added
self.assertTrue(another.existing)
self.assertEquals(zone.name, another.existing.name)
self.assertEquals(1, len(another.existing.records))
def test_apply(self): def test_apply(self):
ignored = Zone('unit.tests.', []) ignored = Zone('unit.tests.', [])


+ 341
- 0
tests/test_octodns_provider_hetzner.py View File

@ -0,0 +1,341 @@
#
#
#
from __future__ import absolute_import, division, print_function, \
unicode_literals
from mock import Mock, call
from os.path import dirname, join
from requests import HTTPError
from requests_mock import ANY, mock as requests_mock
from six import text_type
from unittest import TestCase
from octodns.record import Record
from octodns.provider.hetzner import HetznerClientNotFound, \
HetznerProvider
from octodns.provider.yaml import YamlProvider
from octodns.zone import Zone
class TestHetznerProvider(TestCase):
expected = Zone('unit.tests.', [])
source = YamlProvider('test', join(dirname(__file__), 'config'))
source.populate(expected)
def test_populate(self):
provider = HetznerProvider('test', 'token')
# Bad auth
with requests_mock() as mock:
mock.get(ANY, status_code=401,
text='{"message":"Invalid authentication credentials"}')
with self.assertRaises(Exception) as ctx:
zone = Zone('unit.tests.', [])
provider.populate(zone)
self.assertEquals('Unauthorized', text_type(ctx.exception))
# General error
with requests_mock() as mock:
mock.get(ANY, status_code=502, text='Things caught fire')
with self.assertRaises(HTTPError) as ctx:
zone = Zone('unit.tests.', [])
provider.populate(zone)
self.assertEquals(502, ctx.exception.response.status_code)
# Non-existent zone doesn't populate anything
with requests_mock() as mock:
mock.get(ANY, status_code=404,
text='{"zone":{"id":"","name":"","ttl":0,"registrar":"",'
'"legacy_dns_host":"","legacy_ns":null,"ns":null,'
'"created":"","verified":"","modified":"","project":"",'
'"owner":"","permission":"","zone_type":{"id":"",'
'"name":"","description":"","prices":null},"status":"",'
'"paused":false,"is_secondary_dns":false,'
'"txt_verification":{"name":"","token":""},'
'"records_count":0},"error":{'
'"message":"zone not found","code":404}}')
zone = Zone('unit.tests.', [])
provider.populate(zone)
self.assertEquals(set(), zone.records)
# No diffs == no changes
with requests_mock() as mock:
base = provider._client.BASE_URL
with open('tests/fixtures/hetzner-zones.json') as fh:
mock.get('{}/zones'.format(base), text=fh.read())
with open('tests/fixtures/hetzner-records.json') as fh:
mock.get('{}/records'.format(base), text=fh.read())
zone = Zone('unit.tests.', [])
provider.populate(zone)
self.assertEquals(13, len(zone.records))
changes = self.expected.changes(zone, provider)
self.assertEquals(0, len(changes))
# 2nd populate makes no network calls/all from cache
again = Zone('unit.tests.', [])
provider.populate(again)
self.assertEquals(13, len(again.records))
# bust the cache
del provider._zone_records[zone.name]
def test_apply(self):
provider = HetznerProvider('test', 'token')
resp = Mock()
resp.json = Mock()
provider._client._do = Mock(return_value=resp)
domain_after_creation = {'zone': {
'id': 'unit.tests',
'name': 'unit.tests',
'ttl': 3600,
}}
# non-existent domain, create everything
resp.json.side_effect = [
HetznerClientNotFound, # no zone in populate
HetznerClientNotFound, # no zone during apply
domain_after_creation,
]
plan = provider.plan(self.expected)
# No root NS, no ignored, no excluded, no unsupported
n = len(self.expected.records) - 9
self.assertEquals(n, len(plan.changes))
self.assertEquals(n, provider.apply(plan))
self.assertFalse(plan.exists)
provider._client._do.assert_has_calls([
# created the zone
call('POST', '/zones', None, {
'name': 'unit.tests',
'ttl': None,
}),
# created all the records with their expected data
call('POST', '/records', data={
'name': '@',
'ttl': 300,
'type': 'A',
'value': '1.2.3.4',
'zone_id': 'unit.tests',
}),
call('POST', '/records', data={
'name': '@',
'ttl': 300,
'type': 'A',
'value': '1.2.3.5',
'zone_id': 'unit.tests',
}),
call('POST', '/records', data={
'name': '@',
'ttl': 3600,
'type': 'CAA',
'value': '0 issue "ca.unit.tests"',
'zone_id': 'unit.tests',
}),
call('POST', '/records', data={
'name': '_imap._tcp',
'ttl': 600,
'type': 'SRV',
'value': '0 0 0 .',
'zone_id': 'unit.tests',
}),
call('POST', '/records', data={
'name': '_pop3._tcp',
'ttl': 600,
'type': 'SRV',
'value': '0 0 0 .',
'zone_id': 'unit.tests',
}),
call('POST', '/records', data={
'name': '_srv._tcp',
'ttl': 600,
'type': 'SRV',
'value': '10 20 30 foo-1.unit.tests.',
'zone_id': 'unit.tests',
}),
call('POST', '/records', data={
'name': '_srv._tcp',
'ttl': 600,
'type': 'SRV',
'value': '12 20 30 foo-2.unit.tests.',
'zone_id': 'unit.tests',
}),
call('POST', '/records', data={
'name': 'aaaa',
'ttl': 600,
'type': 'AAAA',
'value': '2601:644:500:e210:62f8:1dff:feb8:947a',
'zone_id': 'unit.tests',
}),
call('POST', '/records', data={
'name': 'cname',
'ttl': 300,
'type': 'CNAME',
'value': 'unit.tests.',
'zone_id': 'unit.tests',
}),
call('POST', '/records', data={
'name': 'included',
'ttl': 3600,
'type': 'CNAME',
'value': 'unit.tests.',
'zone_id': 'unit.tests',
}),
call('POST', '/records', data={
'name': 'mx',
'ttl': 300,
'type': 'MX',
'value': '10 smtp-4.unit.tests.',
'zone_id': 'unit.tests',
}),
call('POST', '/records', data={
'name': 'mx',
'ttl': 300,
'type': 'MX',
'value': '20 smtp-2.unit.tests.',
'zone_id': 'unit.tests',
}),
call('POST', '/records', data={
'name': 'mx',
'ttl': 300,
'type': 'MX',
'value': '30 smtp-3.unit.tests.',
'zone_id': 'unit.tests',
}),
call('POST', '/records', data={
'name': 'mx',
'ttl': 300,
'type': 'MX',
'value': '40 smtp-1.unit.tests.',
'zone_id': 'unit.tests',
}),
call('POST', '/records', data={
'name': 'sub',
'ttl': 3600,
'type': 'NS',
'value': '6.2.3.4.',
'zone_id': 'unit.tests',
}),
call('POST', '/records', data={
'name': 'sub',
'ttl': 3600,
'type': 'NS',
'value': '7.2.3.4.',
'zone_id': 'unit.tests',
}),
call('POST', '/records', data={
'name': 'txt',
'ttl': 600,
'type': 'TXT',
'value': 'Bah bah black sheep',
'zone_id': 'unit.tests',
}),
call('POST', '/records', data={
'name': 'txt',
'ttl': 600,
'type': 'TXT',
'value': 'have you any wool.',
'zone_id': 'unit.tests',
}),
call('POST', '/records', data={
'name': 'txt',
'ttl': 600,
'type': 'TXT',
'value': 'v=DKIM1;k=rsa;s=email;h=sha256;'
'p=A/kinda+of/long/string+with+numb3rs',
'zone_id': 'unit.tests',
}),
call('POST', '/records', data={
'name': 'www',
'ttl': 300,
'type': 'A',
'value': '2.2.3.6',
'zone_id': 'unit.tests',
}),
call('POST', '/records', data={
'name': 'www.sub',
'ttl': 300,
'type': 'A',
'value': '2.2.3.6',
'zone_id': 'unit.tests',
}),
])
self.assertEquals(24, provider._client._do.call_count)
provider._client._do.reset_mock()
# delete 1 and update 1
provider._client.zone_get = Mock(return_value={
'id': 'unit.tests',
'name': 'unit.tests',
'ttl': 3600,
})
provider._client.zone_records_get = Mock(return_value=[
{
'type': 'A',
'id': 'one',
'created': '0000-00-00T00:00:00Z',
'modified': '0000-00-00T00:00:00Z',
'zone_id': 'unit.tests',
'name': 'www',
'value': '1.2.3.4',
'ttl': 300,
},
{
'type': 'A',
'id': 'two',
'created': '0000-00-00T00:00:00Z',
'modified': '0000-00-00T00:00:00Z',
'zone_id': 'unit.tests',
'name': 'www',
'value': '2.2.3.4',
'ttl': 300,
},
{
'type': 'A',
'id': 'three',
'created': '0000-00-00T00:00:00Z',
'modified': '0000-00-00T00:00:00Z',
'zone_id': 'unit.tests',
'name': 'ttl',
'value': '3.2.3.4',
'ttl': 600,
},
])
# Domain exists, we don't care about return
resp.json.side_effect = ['{}']
wanted = Zone('unit.tests.', [])
wanted.add_record(Record.new(wanted, 'ttl', {
'ttl': 300,
'type': 'A',
'value': '3.2.3.4',
}))
plan = provider.plan(wanted)
self.assertTrue(plan.exists)
self.assertEquals(2, len(plan.changes))
self.assertEquals(2, provider.apply(plan))
# recreate for update, and delete for the 2 parts of the other
provider._client._do.assert_has_calls([
call('POST', '/records', data={
'name': 'ttl',
'ttl': 300,
'type': 'A',
'value': '3.2.3.4',
'zone_id': 'unit.tests',
}),
call('DELETE', '/records/one'),
call('DELETE', '/records/two'),
call('DELETE', '/records/three'),
], any_order=True)

+ 272
- 11
tests/test_octodns_provider_ns1.py View File

@ -578,6 +578,34 @@ class TestNs1ProviderDynamic(TestCase):
'meta': {}, 'meta': {},
}) })
def cname_record(self):
return Record.new(self.zone, 'foo', {
'dynamic': {
'pools': {
'iad': {
'values': [{
'value': 'iad.unit.tests.',
}],
},
},
'rules': [{
'pool': 'iad',
}],
},
'octodns': {
'healthcheck': {
'host': 'send.me',
'path': '/_ping',
'port': 80,
'protocol': 'HTTP',
}
},
'ttl': 33,
'type': 'CNAME',
'value': 'value.unit.tests.',
'meta': {},
})
def test_notes(self): def test_notes(self):
provider = Ns1Provider('test', 'api-key') provider = Ns1Provider('test', 'api-key')
@ -609,6 +637,12 @@ class TestNs1ProviderDynamic(TestCase):
}, },
'notes': 'host:unit.tests type:A', 'notes': 'host:unit.tests type:A',
} }
monitor_five = {
'config': {
'host': 'iad.unit.tests',
},
'notes': 'host:foo.unit.tests type:CNAME',
}
provider._client._monitors_cache = { provider._client._monitors_cache = {
'one': monitor_one, 'one': monitor_one,
'two': { 'two': {
@ -624,6 +658,7 @@ class TestNs1ProviderDynamic(TestCase):
'notes': 'host:other.unit.tests type:A', 'notes': 'host:other.unit.tests type:A',
}, },
'four': monitor_four, 'four': monitor_four,
'five': monitor_five,
} }
# Would match, but won't get there b/c it's not dynamic # Would match, but won't get there b/c it's not dynamic
@ -641,6 +676,11 @@ class TestNs1ProviderDynamic(TestCase):
'2.3.4.5': monitor_four, '2.3.4.5': monitor_four,
}, provider._monitors_for(self.record())) }, provider._monitors_for(self.record()))
# Check match for CNAME values
self.assertEquals({
'iad.unit.tests.': monitor_five,
}, provider._monitors_for(self.cname_record()))
def test_uuid(self): def test_uuid(self):
# Just a smoke test/for coverage # Just a smoke test/for coverage
provider = Ns1Provider('test', 'api-key') provider = Ns1Provider('test', 'api-key')
@ -717,6 +757,10 @@ class TestNs1ProviderDynamic(TestCase):
self.assertFalse(monitor['config']['ssl']) self.assertFalse(monitor['config']['ssl'])
self.assertEquals('host:unit.tests type:A', monitor['notes']) self.assertEquals('host:unit.tests type:A', monitor['notes'])
record._octodns['healthcheck']['host'] = None
monitor = provider._monitor_gen(record, value)
self.assertTrue(r'\nHost: 3.4.5.6\r' in monitor['config']['send'])
record._octodns['healthcheck']['protocol'] = 'HTTPS' record._octodns['healthcheck']['protocol'] = 'HTTPS'
monitor = provider._monitor_gen(record, value) monitor = provider._monitor_gen(record, value)
self.assertTrue(monitor['config']['ssl']) self.assertTrue(monitor['config']['ssl'])
@ -728,6 +772,14 @@ class TestNs1ProviderDynamic(TestCase):
# No http response expected # No http response expected
self.assertFalse('rules' in monitor) self.assertFalse('rules' in monitor)
def test_monitor_gen_CNAME(self):
provider = Ns1Provider('test', 'api-key')
value = 'iad.unit.tests.'
record = self.cname_record()
monitor = provider._monitor_gen(record, value)
self.assertEquals(value[:-1], monitor['config']['host'])
def test_monitor_is_match(self): def test_monitor_is_match(self):
provider = Ns1Provider('test', 'api-key') provider = Ns1Provider('test', 'api-key')
@ -1117,14 +1169,21 @@ class TestNs1ProviderDynamic(TestCase):
# finally has a catchall. Those are examples of the two ways pools get # finally has a catchall. Those are examples of the two ways pools get
# expanded. # expanded.
# #
# lhr splits in two, with a region and country.
# lhr splits in two, with a region and country and includes a fallback
#
# All values now include their own `pool:` name
# #
# well as both lhr georegion (for contients) and country. The first is # well as both lhr georegion (for contients) and country. The first is
# an example of a repeated target pool in a rule (only allowed when the # an example of a repeated target pool in a rule (only allowed when the
# 2nd is a catchall.) # 2nd is a catchall.)
self.assertEquals(['from:--default--', 'from:iad__catchall',
'from:iad__country', 'from:iad__georegion',
'from:lhr__country', 'from:lhr__georegion'],
self.assertEquals(['fallback: from:iad__catchall pool:iad',
'fallback: from:iad__country pool:iad',
'fallback: from:iad__georegion pool:iad',
'fallback: from:lhr__country pool:iad',
'fallback: from:lhr__georegion pool:iad',
'fallback:iad from:lhr__country pool:lhr',
'fallback:iad from:lhr__georegion pool:lhr',
'from:--default--'],
sorted(notes.keys())) sorted(notes.keys()))
# All the iad's should match (after meta and region were removed) # All the iad's should match (after meta and region were removed)
@ -1242,7 +1301,7 @@ class TestNs1ProviderDynamic(TestCase):
('mid-2', 'fid-2'), ('mid-2', 'fid-2'),
('mid-3', 'fid-3'), ('mid-3', 'fid-3'),
] ]
# This indirectly calls into _params_for_dynamic_A and tests the
# This indirectly calls into _params_for_dynamic and tests the
# handling to get there # handling to get there
record = self.record() record = self.record()
ret, _ = provider._params_for_A(record) ret, _ = provider._params_for_A(record)
@ -1270,7 +1329,39 @@ class TestNs1ProviderDynamic(TestCase):
params, _ = provider._params_for_geo_A(record) params, _ = provider._params_for_geo_A(record)
self.assertEquals([], params['filters']) self.assertEquals([], params['filters'])
def test_data_for_dynamic_A(self):
@patch('octodns.provider.ns1.Ns1Provider._monitor_sync')
@patch('octodns.provider.ns1.Ns1Provider._monitors_for')
def test_params_for_dynamic_CNAME(self, monitors_for_mock,
monitor_sync_mock):
provider = Ns1Provider('test', 'api-key')
# pre-fill caches to avoid extranious calls (things we're testing
# elsewhere)
provider._client._datasource_id = 'foo'
provider._client._feeds_for_monitors = {
'mon-id': 'feed-id',
}
# provider._params_for_A() calls provider._monitors_for() and
# provider._monitor_sync(). Mock their return values so that we don't
# make NS1 API calls during tests
monitors_for_mock.reset_mock()
monitor_sync_mock.reset_mock()
monitors_for_mock.side_effect = [{
'iad.unit.tests.': 'mid-1',
}]
monitor_sync_mock.side_effect = [
('mid-1', 'fid-1'),
]
record = self.cname_record()
ret, _ = provider._params_for_CNAME(record)
# Check if the default value was correctly read and populated
# All other dynamic record test cases are covered by dynamic_A tests
self.assertEquals(ret['answers'][-1]['answer'][0], 'value.unit.tests.')
def test_data_for_dynamic(self):
provider = Ns1Provider('test', 'api-key') provider = Ns1Provider('test', 'api-key')
# Unexpected filters throws an error # Unexpected filters throws an error
@ -1279,7 +1370,7 @@ class TestNs1ProviderDynamic(TestCase):
'filters': [], 'filters': [],
} }
with self.assertRaises(Ns1Exception) as ctx: with self.assertRaises(Ns1Exception) as ctx:
provider._data_for_dynamic_A('A', ns1_record)
provider._data_for_dynamic('A', ns1_record)
self.assertEquals('Unrecognized advanced record', self.assertEquals('Unrecognized advanced record',
text_type(ctx.exception)) text_type(ctx.exception))
@ -1291,7 +1382,7 @@ class TestNs1ProviderDynamic(TestCase):
'regions': {}, 'regions': {},
'ttl': 42, 'ttl': 42,
} }
data = provider._data_for_dynamic_A('A', ns1_record)
data = provider._data_for_dynamic('A', ns1_record)
self.assertEquals({ self.assertEquals({
'dynamic': { 'dynamic': {
'pools': {}, 'pools': {},
@ -1396,7 +1487,7 @@ class TestNs1ProviderDynamic(TestCase):
'tier': 3, 'tier': 3,
'ttl': 42, 'ttl': 42,
} }
data = provider._data_for_dynamic_A('A', ns1_record)
data = provider._data_for_dynamic('A', ns1_record)
self.assertEquals({ self.assertEquals({
'dynamic': { 'dynamic': {
'pools': { 'pools': {
@ -1440,7 +1531,7 @@ class TestNs1ProviderDynamic(TestCase):
}, data) }, data)
# Same answer if we go through _data_for_A which out sources the job to # Same answer if we go through _data_for_A which out sources the job to
# _data_for_dynamic_A
# _data_for_dynamic
data2 = provider._data_for_A('A', ns1_record) data2 = provider._data_for_A('A', ns1_record)
self.assertEquals(data, data2) self.assertEquals(data, data2)
@ -1451,7 +1542,7 @@ class TestNs1ProviderDynamic(TestCase):
ns1_record['regions'][old_style_catchall_pool_name] = \ ns1_record['regions'][old_style_catchall_pool_name] = \
ns1_record['regions'][catchall_pool_name] ns1_record['regions'][catchall_pool_name]
del ns1_record['regions'][catchall_pool_name] del ns1_record['regions'][catchall_pool_name]
data3 = provider._data_for_dynamic_A('A', ns1_record)
data3 = provider._data_for_dynamic('A', ns1_record)
self.assertEquals(data, data2) self.assertEquals(data, data2)
# Oceania test cases # Oceania test cases
@ -1471,6 +1562,176 @@ class TestNs1ProviderDynamic(TestCase):
self.assertTrue( self.assertTrue(
'OC-{}'.format(c) in data4['dynamic']['rules'][0]['geos']) 'OC-{}'.format(c) in data4['dynamic']['rules'][0]['geos'])
# Test out fallback only pools and new-style notes
ns1_record = {
'answers': [{
'answer': ['1.1.1.1'],
'meta': {
'priority': 1,
'note': 'from:one__country pool:one fallback:two',
},
'region': 'one_country',
}, {
'answer': ['2.2.2.2'],
'meta': {
'priority': 2,
'note': 'from:one__country pool:two fallback:three',
},
'region': 'one_country',
}, {
'answer': ['3.3.3.3'],
'meta': {
'priority': 3,
'note': 'from:one__country pool:three fallback:',
},
'region': 'one_country',
}, {
'answer': ['5.5.5.5'],
'meta': {
'priority': 4,
'note': 'from:--default--',
},
'region': 'one_country',
}, {
'answer': ['4.4.4.4'],
'meta': {
'priority': 1,
'note': 'from:four__country pool:four fallback:',
},
'region': 'four_country',
}, {
'answer': ['5.5.5.5'],
'meta': {
'priority': 2,
'note': 'from:--default--',
},
'region': 'four_country',
}],
'domain': 'unit.tests',
'filters': filters,
'regions': {
'one__country': {
'meta': {
'note': 'rule-order:1 fallback:two',
'country': ['CA'],
'us_state': ['OR'],
},
},
'four__country': {
'meta': {
'note': 'rule-order:2',
'country': ['CA'],
'us_state': ['OR'],
},
},
catchall_pool_name: {
'meta': {
'note': 'rule-order:3',
},
}
},
'tier': 3,
'ttl': 42,
}
data = provider._data_for_dynamic('A', ns1_record)
self.assertEquals({
'dynamic': {
'pools': {
'four': {
'fallback': None,
'values': [{'value': '4.4.4.4', 'weight': 1}]
},
'one': {
'fallback': 'two',
'values': [{'value': '1.1.1.1', 'weight': 1}]
},
'three': {
'fallback': None,
'values': [{'value': '3.3.3.3', 'weight': 1}]
},
'two': {
'fallback': 'three',
'values': [{'value': '2.2.2.2', 'weight': 1}]
},
},
'rules': [{
'_order': '1',
'geos': ['NA-CA', 'NA-US-OR'],
'pool': 'one'
}, {
'_order': '2',
'geos': ['NA-CA', 'NA-US-OR'],
'pool': 'four'
}, {
'_order': '3', 'pool': 'iad'}
]
},
'ttl': 42,
'type': 'A',
'values': ['5.5.5.5']
}, data)
def test_data_for_dynamic_CNAME(self):
provider = Ns1Provider('test', 'api-key')
# Test out a small setup that just covers default value validation
# Everything else is same as dynamic A whose tests will cover all
# other options and test cases
# Not testing for geo/region specific cases
filters = provider._get_updated_filter_chain(False, False)
catchall_pool_name = 'iad__catchall'
ns1_record = {
'answers': [{
'answer': ['iad.unit.tests.'],
'meta': {
'priority': 1,
'weight': 12,
'note': 'from:{}'.format(catchall_pool_name),
},
'region': catchall_pool_name,
}, {
'answer': ['value.unit.tests.'],
'meta': {
'priority': 2,
'note': 'from:--default--',
},
'region': catchall_pool_name,
}],
'domain': 'foo.unit.tests',
'filters': filters,
'regions': {
catchall_pool_name: {
'meta': {
'note': 'rule-order:1',
},
}
},
'tier': 3,
'ttl': 43,
'type': 'CNAME',
}
data = provider._data_for_CNAME('CNAME', ns1_record)
self.assertEquals({
'dynamic': {
'pools': {
'iad': {
'fallback': None,
'values': [{
'value': 'iad.unit.tests.',
'weight': 12,
}],
},
},
'rules': [{
'_order': '1',
'pool': 'iad',
}],
},
'ttl': 43,
'type': 'CNAME',
'value': 'value.unit.tests.',
}, data)
@patch('ns1.rest.records.Records.retrieve') @patch('ns1.rest.records.Records.retrieve')
@patch('ns1.rest.zones.Zones.retrieve') @patch('ns1.rest.zones.Zones.retrieve')
@patch('octodns.provider.ns1.Ns1Provider._monitors_for') @patch('octodns.provider.ns1.Ns1Provider._monitors_for')


+ 187
- 0
tests/test_octodns_provider_route53.py View File

@ -1166,6 +1166,31 @@ class TestRoute53Provider(TestCase):
}) })
stubber.add_response('change_tags_for_resource', {}) stubber.add_response('change_tags_for_resource', {})
health_check_config = {
'EnableSNI': False,
'FailureThreshold': 6,
'FullyQualifiedDomainName': '4.2.3.4',
'IPAddress': '4.2.3.4',
'MeasureLatency': True,
'Port': 8080,
'RequestInterval': 10,
'ResourcePath': '/_status',
'Type': 'HTTP'
}
stubber.add_response('create_health_check', {
'HealthCheck': {
'Id': '43',
'CallerReference': self.caller_ref,
'HealthCheckConfig': health_check_config,
'HealthCheckVersion': 1,
},
'Location': 'http://url',
}, {
'CallerReference': ANY,
'HealthCheckConfig': health_check_config,
})
stubber.add_response('change_tags_for_resource', {})
record = Record.new(self.expected, '', { record = Record.new(self.expected, '', {
'ttl': 61, 'ttl': 61,
'type': 'A', 'type': 'A',
@ -1191,6 +1216,11 @@ class TestRoute53Provider(TestCase):
# when allowed to create we do # when allowed to create we do
id = provider.get_health_check_id(record, value, True) id = provider.get_health_check_id(record, value, True)
self.assertEquals('42', id) self.assertEquals('42', id)
# when allowed to create and when host is None
record._octodns['healthcheck']['host'] = None
id = provider.get_health_check_id(record, value, True)
self.assertEquals('43', id)
stubber.assert_no_pending_responses() stubber.assert_no_pending_responses()
# A CNAME style healthcheck, without a value # A CNAME style healthcheck, without a value
@ -1962,6 +1992,163 @@ class TestRoute53Provider(TestCase):
self.assertEquals(1, len(extra)) self.assertEquals(1, len(extra))
stubber.assert_no_pending_responses() stubber.assert_no_pending_responses()
def test_extra_change_dynamic_has_health_check_cname(self):
provider, stubber = self._get_stubbed_provider()
list_hosted_zones_resp = {
'HostedZones': [{
'Name': 'unit.tests.',
'Id': 'z42',
'CallerReference': 'abc',
}],
'Marker': 'm',
'IsTruncated': False,
'MaxItems': '100',
}
stubber.add_response('list_hosted_zones', list_hosted_zones_resp, {})
# record with geo and no health check returns change
desired = Zone('unit.tests.', [])
record = Record.new(desired, 'cname', {
'ttl': 30,
'type': 'CNAME',
'value': 'cname.unit.tests.',
'dynamic': {
'pools': {
'one': {
'values': [{
'value': 'one.cname.unit.tests.',
}],
},
},
'rules': [{
'pool': 'one',
}],
},
})
desired.add_record(record)
list_resource_record_sets_resp = {
'ResourceRecordSets': [{
# Not dynamic value and other name
'Name': 'unit.tests.',
'Type': 'CNAME',
'GeoLocation': {
'CountryCode': '*',
},
'ResourceRecords': [{
'Value': 'cname.unit.tests.',
}],
'TTL': 61,
# All the non-matches have a different Id so we'll fail if they
# match
'HealthCheckId': '33',
}, {
# Not dynamic value, matching name, other type
'Name': 'cname.unit.tests.',
'Type': 'AAAA',
'ResourceRecords': [{
'Value': '2001:0db8:3c4d:0015:0000:0000:1a2f:1a4b'
}],
'TTL': 61,
'HealthCheckId': '33',
}, {
# default value pool
'Name': '_octodns-default-value.cname.unit.tests.',
'Type': 'CNAME',
'GeoLocation': {
'CountryCode': '*',
},
'ResourceRecords': [{
'Value': 'cname.unit.tests.',
}],
'TTL': 61,
'HealthCheckId': '33',
}, {
# different record
'Name': '_octodns-two-value.other.unit.tests.',
'Type': 'CNAME',
'GeoLocation': {
'CountryCode': '*',
},
'ResourceRecords': [{
'Value': 'cname.unit.tests.',
}],
'TTL': 61,
'HealthCheckId': '33',
}, {
# same everything, but different type
'Name': '_octodns-one-value.cname.unit.tests.',
'Type': 'AAAA',
'ResourceRecords': [{
'Value': '2001:0db8:3c4d:0015:0000:0000:1a2f:1a4b'
}],
'TTL': 61,
'HealthCheckId': '33',
}, {
# same everything, sub
'Name': '_octodns-one-value.sub.cname.unit.tests.',
'Type': 'CNAME',
'ResourceRecords': [{
'Value': 'cname.unit.tests.',
}],
'TTL': 61,
'HealthCheckId': '33',
}, {
# match
'Name': '_octodns-one-value.cname.unit.tests.',
'Type': 'CNAME',
'ResourceRecords': [{
'Value': 'one.cname.unit.tests.',
}],
'TTL': 61,
'HealthCheckId': '42',
}],
'IsTruncated': False,
'MaxItems': '100',
}
stubber.add_response('list_resource_record_sets',
list_resource_record_sets_resp,
{'HostedZoneId': 'z42'})
stubber.add_response('list_health_checks', {
'HealthChecks': [{
'Id': '42',
'CallerReference': self.caller_ref,
'HealthCheckConfig': {
'Type': 'HTTPS',
'FullyQualifiedDomainName': 'one.cname.unit.tests.',
'ResourcePath': '/_dns',
'Type': 'HTTPS',
'Port': 443,
'MeasureLatency': True,
'RequestInterval': 10,
},
'HealthCheckVersion': 2,
}],
'IsTruncated': False,
'MaxItems': '100',
'Marker': '',
})
extra = provider._extra_changes(desired=desired, changes=[])
self.assertEquals(0, len(extra))
stubber.assert_no_pending_responses()
# change b/c of healthcheck path
record._octodns['healthcheck'] = {
'path': '/_ready'
}
extra = provider._extra_changes(desired=desired, changes=[])
self.assertEquals(1, len(extra))
stubber.assert_no_pending_responses()
# no change b/c healthcheck host ignored for dynamic cname
record._octodns['healthcheck'] = {
'host': 'foo.bar.io'
}
extra = provider._extra_changes(desired=desired, changes=[])
self.assertEquals(0, len(extra))
stubber.assert_no_pending_responses()
def _get_test_plan(self, max_changes): def _get_test_plan(self, max_changes):
provider = Route53Provider('test', 'abc', '123', max_changes) provider = Route53Provider('test', 'abc', '123', max_changes)


+ 6
- 5
tests/test_octodns_provider_transip.py View File

@ -56,10 +56,11 @@ class MockDomainService(DomainService):
_dns_entries.extend(entries_for(name, record)) _dns_entries.extend(entries_for(name, record))
# NS is not supported as a DNS Entry,
# so it should cover the if statement
# Add a non-supported type
# so it triggers the "is supported" (transip.py:115) check and
# give 100% code coverage
_dns_entries.append( _dns_entries.append(
DnsEntry('@', '3600', 'NS', 'ns01.transip.nl.'))
DnsEntry('@', '3600', 'BOGUS', 'ns01.transip.nl.'))
self.mockupEntries = _dns_entries self.mockupEntries = _dns_entries
@ -222,7 +223,7 @@ N4OiVz1I3rbZGYa396lpxO6ku8yCglisL1yrSP6DdEUp66ntpKVd
provider._client = MockDomainService('unittest', self.bogus_key) provider._client = MockDomainService('unittest', self.bogus_key)
plan = provider.plan(_expected) plan = provider.plan(_expected)
self.assertEqual(14, plan.change_counts['Create'])
self.assertEqual(15, plan.change_counts['Create'])
self.assertEqual(0, plan.change_counts['Update']) self.assertEqual(0, plan.change_counts['Update'])
self.assertEqual(0, plan.change_counts['Delete']) self.assertEqual(0, plan.change_counts['Delete'])
@ -235,7 +236,7 @@ N4OiVz1I3rbZGYa396lpxO6ku8yCglisL1yrSP6DdEUp66ntpKVd
provider = TransipProvider('test', 'unittest', self.bogus_key) provider = TransipProvider('test', 'unittest', self.bogus_key)
provider._client = MockDomainService('unittest', self.bogus_key) provider._client = MockDomainService('unittest', self.bogus_key)
plan = provider.plan(_expected) plan = provider.plan(_expected)
self.assertEqual(14, len(plan.changes))
self.assertEqual(15, len(plan.changes))
changes = provider.apply(plan) changes = provider.apply(plan)
self.assertEqual(changes, len(plan.changes)) self.assertEqual(changes, len(plan.changes))


+ 12
- 3
tests/test_octodns_provider_ultra.py View File

@ -274,7 +274,7 @@ class TestUltraProvider(TestCase):
self.assertTrue(provider.populate(zone)) self.assertTrue(provider.populate(zone))
self.assertEquals('octodns1.test.', zone.name) self.assertEquals('octodns1.test.', zone.name)
self.assertEquals(11, len(zone.records))
self.assertEquals(12, len(zone.records))
self.assertEquals(4, mock.call_count) self.assertEquals(4, mock.call_count)
def test_apply(self): def test_apply(self):
@ -352,8 +352,8 @@ class TestUltraProvider(TestCase):
})) }))
plan = provider.plan(wanted) plan = provider.plan(wanted)
self.assertEquals(10, len(plan.changes))
self.assertEquals(10, provider.apply(plan))
self.assertEquals(11, len(plan.changes))
self.assertEquals(11, provider.apply(plan))
self.assertTrue(plan.exists) self.assertTrue(plan.exists)
provider._request.assert_has_calls([ provider._request.assert_has_calls([
@ -492,6 +492,15 @@ class TestUltraProvider(TestCase):
Record.new(zone, 'txt', Record.new(zone, 'txt',
{'ttl': 60, 'type': 'TXT', {'ttl': 60, 'type': 'TXT',
'values': ['abc', 'def']})), 'values': ['abc', 'def']})),
# ALIAS
('', 'ALIAS',
'/v2/zones/unit.tests./rrsets/APEXALIAS/unit.tests.',
{'ttl': 60, 'rdata': ['target.unit.tests.']},
Record.new(zone, '',
{'ttl': 60, 'type': 'ALIAS',
'value': 'target.unit.tests.'})),
): ):
# Validate path and payload based on record meet expectations # Validate path and payload based on record meet expectations
path, payload = provider._gen_data(expected_record) path, payload = provider._gen_data(expected_record)


+ 9
- 5
tests/test_octodns_provider_yaml.py View File

@ -38,7 +38,7 @@ class TestYamlProvider(TestCase):
self.assertEquals(22, len(zone.records)) self.assertEquals(22, len(zone.records))
source.populate(dynamic_zone) source.populate(dynamic_zone)
self.assertEquals(5, len(dynamic_zone.records))
self.assertEquals(6, len(dynamic_zone.records))
# Assumption here is that a clean round-trip means that everything # Assumption here is that a clean round-trip means that everything
# worked as expected, data that went in came back out and could be # worked as expected, data that went in came back out and could be
@ -68,11 +68,11 @@ class TestYamlProvider(TestCase):
# Dynamic plan # Dynamic plan
plan = target.plan(dynamic_zone) plan = target.plan(dynamic_zone)
self.assertEquals(5, len([c for c in plan.changes
self.assertEquals(6, len([c for c in plan.changes
if isinstance(c, Create)])) if isinstance(c, Create)]))
self.assertFalse(isfile(dynamic_yaml_file)) self.assertFalse(isfile(dynamic_yaml_file))
# Apply it # Apply it
self.assertEquals(5, target.apply(plan))
self.assertEquals(6, target.apply(plan))
self.assertTrue(isfile(dynamic_yaml_file)) self.assertTrue(isfile(dynamic_yaml_file))
# There should be no changes after the round trip # There should be no changes after the round trip
@ -148,6 +148,10 @@ class TestYamlProvider(TestCase):
self.assertTrue('value' in dyna) self.assertTrue('value' in dyna)
# self.assertTrue('dynamic' in dyna) # self.assertTrue('dynamic' in dyna)
dyna = data.pop('pool-only-in-fallback')
self.assertTrue('value' in dyna)
# self.assertTrue('dynamic' in dyna)
# make sure nothing is left # make sure nothing is left
self.assertEquals([], list(data.keys())) self.assertEquals([], list(data.keys()))
@ -397,7 +401,7 @@ class TestOverridingYamlProvider(TestCase):
# Load the base, should see the 5 records # Load the base, should see the 5 records
base.populate(zone) base.populate(zone)
got = {r.name: r for r in zone.records} got = {r.name: r for r in zone.records}
self.assertEquals(5, len(got))
self.assertEquals(6, len(got))
# We get the "dynamic" A from the base config # We get the "dynamic" A from the base config
self.assertTrue('dynamic' in got['a'].data) self.assertTrue('dynamic' in got['a'].data)
# No added # No added
@ -406,7 +410,7 @@ class TestOverridingYamlProvider(TestCase):
# Load the overrides, should replace one and add 1 # Load the overrides, should replace one and add 1
override.populate(zone) override.populate(zone)
got = {r.name: r for r in zone.records} got = {r.name: r for r in zone.records}
self.assertEquals(6, len(got))
self.assertEquals(7, len(got))
# 'a' was replaced with a generic record # 'a' was replaced with a generic record
self.assertEquals({ self.assertEquals({
'ttl': 3600, 'ttl': 3600,


+ 49
- 5
tests/test_octodns_record.py View File

@ -1015,17 +1015,33 @@ class TestRecord(TestCase):
} }
}) })
self.assertEquals('/_ready', new.healthcheck_path) self.assertEquals('/_ready', new.healthcheck_path)
self.assertEquals('bleep.bloop', new.healthcheck_host)
self.assertEquals('bleep.bloop', new.healthcheck_host())
self.assertEquals('HTTP', new.healthcheck_protocol) self.assertEquals('HTTP', new.healthcheck_protocol)
self.assertEquals(8080, new.healthcheck_port) self.assertEquals(8080, new.healthcheck_port)
# empty host value in healthcheck
new = Record.new(self.zone, 'a', {
'ttl': 44,
'type': 'A',
'value': '1.2.3.4',
'octodns': {
'healthcheck': {
'path': '/_ready',
'host': None,
'protocol': 'HTTP',
'port': 8080,
}
}
})
self.assertEquals('1.2.3.4', new.healthcheck_host(value="1.2.3.4"))
new = Record.new(self.zone, 'a', { new = Record.new(self.zone, 'a', {
'ttl': 44, 'ttl': 44,
'type': 'A', 'type': 'A',
'value': '1.2.3.4', 'value': '1.2.3.4',
}) })
self.assertEquals('/_dns', new.healthcheck_path) self.assertEquals('/_dns', new.healthcheck_path)
self.assertEquals('a.unit.tests', new.healthcheck_host)
self.assertEquals('a.unit.tests', new.healthcheck_host())
self.assertEquals('HTTPS', new.healthcheck_protocol) self.assertEquals('HTTPS', new.healthcheck_protocol)
self.assertEquals(443, new.healthcheck_port) self.assertEquals(443, new.healthcheck_port)
@ -1044,7 +1060,7 @@ class TestRecord(TestCase):
} }
}) })
self.assertIsNone(new.healthcheck_path) self.assertIsNone(new.healthcheck_path)
self.assertIsNone(new.healthcheck_host)
self.assertIsNone(new.healthcheck_host())
self.assertEquals('TCP', new.healthcheck_protocol) self.assertEquals('TCP', new.healthcheck_protocol)
self.assertEquals(8080, new.healthcheck_port) self.assertEquals(8080, new.healthcheck_port)
@ -1059,7 +1075,7 @@ class TestRecord(TestCase):
} }
}) })
self.assertIsNone(new.healthcheck_path) self.assertIsNone(new.healthcheck_path)
self.assertIsNone(new.healthcheck_host)
self.assertIsNone(new.healthcheck_host())
self.assertEquals('TCP', new.healthcheck_protocol) self.assertEquals('TCP', new.healthcheck_protocol)
self.assertEquals(443, new.healthcheck_port) self.assertEquals(443, new.healthcheck_port)
@ -3013,6 +3029,7 @@ class TestDynamicRecords(TestCase):
'pools': { 'pools': {
'one': { 'one': {
'values': [{ 'values': [{
'weight': 10,
'value': '3.3.3.3', 'value': '3.3.3.3',
}], }],
}, },
@ -3412,7 +3429,7 @@ class TestDynamicRecords(TestCase):
self.assertEquals(['pool "one" is missing values'], self.assertEquals(['pool "one" is missing values'],
ctx.exception.reasons) ctx.exception.reasons)
# pool valu not a dict
# pool value not a dict
a_data = { a_data = {
'dynamic': { 'dynamic': {
'pools': { 'pools': {
@ -3596,6 +3613,33 @@ class TestDynamicRecords(TestCase):
self.assertEquals(['invalid weight "foo" in pool "three" value 2'], self.assertEquals(['invalid weight "foo" in pool "three" value 2'],
ctx.exception.reasons) ctx.exception.reasons)
# single value with weight!=1
a_data = {
'dynamic': {
'pools': {
'one': {
'values': [{
'weight': 12,
'value': '6.6.6.6',
}],
},
},
'rules': [{
'pool': 'one',
}],
},
'ttl': 60,
'type': 'A',
'values': [
'1.1.1.1',
'2.2.2.2',
],
}
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, 'bad', a_data)
self.assertEquals(['pool "one" has single value with weight!=1'],
ctx.exception.reasons)
# invalid fallback # invalid fallback
a_data = { a_data = {
'dynamic': { 'dynamic': {


Loading…
Cancel
Save