Browse Source

Merge pull request #1 from github/initial-import

Initial import of OSS OctoDNS
pull/2/head
Ross McFarland 9 years ago
committed by GitHub
parent
commit
3a02a76d31
81 changed files with 15272 additions and 0 deletions
  1. +11
    -0
      .git_hooks_pre-commit
  2. +53
    -0
      .github/CONTRIBUTING.md
  3. +11
    -0
      .gitignore
  4. +7
    -0
      .travis.yml
  5. +22
    -0
      LICENSE
  6. +219
    -0
      README.md
  7. BIN
      docs/assets/deploy.png
  8. BIN
      docs/assets/noop.png
  9. BIN
      docs/assets/pr.png
  10. +8
    -0
      octodns/__init__.py
  11. +6
    -0
      octodns/cmds/__init__.py
  12. +69
    -0
      octodns/cmds/args.py
  13. +29
    -0
      octodns/cmds/compare.py
  14. +25
    -0
      octodns/cmds/dump.py
  15. +99
    -0
      octodns/cmds/report.py
  16. +37
    -0
      octodns/cmds/sync.py
  17. +22
    -0
      octodns/cmds/validate.py
  18. +309
    -0
      octodns/manager.py
  19. +6
    -0
      octodns/provider/__init__.py
  20. +116
    -0
      octodns/provider/base.py
  21. +249
    -0
      octodns/provider/cloudflare.py
  22. +349
    -0
      octodns/provider/dnsimple.py
  23. +651
    -0
      octodns/provider/dyn.py
  24. +361
    -0
      octodns/provider/powerdns.py
  25. +651
    -0
      octodns/provider/route53.py
  26. +82
    -0
      octodns/provider/yaml.py
  27. +549
    -0
      octodns/record.py
  28. +6
    -0
      octodns/source/__init__.py
  29. +33
    -0
      octodns/source/base.py
  30. +208
    -0
      octodns/source/tinydns.py
  31. +79
    -0
      octodns/yaml.py
  32. +117
    -0
      octodns/zone.py
  33. +6
    -0
      requirements-dev.txt
  34. +17
    -0
      requirements.txt
  35. +35
    -0
      script/bootstrap
  36. +30
    -0
      script/cibuild
  37. +30
    -0
      script/coverage
  38. +21
    -0
      script/lint
  39. +15
    -0
      script/sdist
  40. +28
    -0
      script/test
  41. +46
    -0
      setup.py
  42. +4
    -0
      tests/config/bad-provider-class-module.yaml
  43. +4
    -0
      tests/config/bad-provider-class-no-module.yaml
  44. +4
    -0
      tests/config/bad-provider-class.yaml
  45. +1
    -0
      tests/config/empty.yaml
  46. +3
    -0
      tests/config/missing-provider-class.yaml
  47. +4
    -0
      tests/config/missing-provider-config.yaml
  48. +6
    -0
      tests/config/missing-provider-env.yaml
  49. +3
    -0
      tests/config/missing-sources.yaml
  50. +13
    -0
      tests/config/no-dump.yaml
  51. +13
    -0
      tests/config/simple-validate.yaml
  52. +35
    -0
      tests/config/simple.yaml
  53. +10
    -0
      tests/config/subzone.unit.tests.yaml
  54. +108
    -0
      tests/config/unit.tests.yaml
  55. +28
    -0
      tests/config/unknown-provider.yaml
  56. +8
    -0
      tests/config/unordered.yaml
  57. +188
    -0
      tests/fixtures/cloudflare-dns_records-page-1.json
  58. +116
    -0
      tests/fixtures/cloudflare-dns_records-page-2.json
  59. +140
    -0
      tests/fixtures/cloudflare-zones-page-1.json
  60. +140
    -0
      tests/fixtures/cloudflare-zones-page-2.json
  61. +106
    -0
      tests/fixtures/dnsimple-invalid-content.json
  62. +314
    -0
      tests/fixtures/dnsimple-page-1.json
  63. +138
    -0
      tests/fixtures/dnsimple-page-2.json
  64. +4190
    -0
      tests/fixtures/dyn-traffic-director-get.json
  65. +235
    -0
      tests/fixtures/powerdns-full-data.json
  66. +69
    -0
      tests/helpers.py
  67. +203
    -0
      tests/test_octodns_manager.py
  68. +170
    -0
      tests/test_octodns_provider_base.py
  69. +273
    -0
      tests/test_octodns_provider_cloudflare.py
  70. +202
    -0
      tests/test_octodns_provider_dnsimple.py
  71. +1155
    -0
      tests/test_octodns_provider_dyn.py
  72. +290
    -0
      tests/test_octodns_provider_powerdns.py
  73. +1145
    -0
      tests/test_octodns_provider_route53.py
  74. +111
    -0
      tests/test_octodns_provider_yaml.py
  75. +765
    -0
      tests/test_octodns_record.py
  76. +176
    -0
      tests/test_octodns_source_tinydns.py
  77. +61
    -0
      tests/test_octodns_yaml.py
  78. +174
    -0
      tests/test_octodns_zone.py
  79. BIN
      tests/zones/.is-needed-for-tests
  80. +48
    -0
      tests/zones/example.com
  81. +7
    -0
      tests/zones/other.foo

+ 11
- 0
.git_hooks_pre-commit View File

@ -0,0 +1,11 @@
#!/bin/sh
set -e
HOOKS=`dirname $0`
GIT=`dirname $HOOKS`
ROOT=`dirname $GIT`
source $ROOT/env/bin/activate
$ROOT/script/lint
$ROOT/script/test

+ 53
- 0
.github/CONTRIBUTING.md View File

@ -0,0 +1,53 @@
# Contributing
Hi there! We're thrilled that you'd like to contribute to OctoDNS. Your help is essential for keeping it great.
Please note that this project adheres to the [Open Code of Conduct](http://todogroup.org/opencodeofconduct/#GitHub%20OctoDNS/opensource@github.com). By participating in this project you agree to abide by its terms.
If you have questions, or you'd like to check with us before embarking on a major development effort, please [open an issue](https://github.com/github/octodns/issues/new).
## How to contribute
This project uses the [GitHub Flow](https://guides.github.com/introduction/flow/). That means that the `master` branch is stable and new development is done in feature branches. Feature branches are merged into the `master` branch via a Pull Request.
0. Fork and clone the repository
0. Configure and install the dependencies: `script/bootstrap`
0. Make sure the tests pass on your machine: `script/test`
0. Create a new branch: `git checkout -b my-branch-name`
0. Make your change, add tests, and make sure the tests still pass
0. Make sure that `./script/lint` passes without any warnings
0. Make sure that coverage is at :100:% `script/coverage` and open `htmlcov/index.html`
* You can open PRs for :eyes: & discussion prior to this
0. Push to your fork and submit a pull request
We will handle updating the version, tagging the release, and releasing the gem. Please don't bump the version or otherwise attempt to take on these administrative internal tasks as part of your pull request.
Here are a few things you can do that will increase the likelihood of your pull request being accepted:
* Follow [pep8](https://www.python.org/dev/peps/pep-0008/)
- Write thorough tests. No PRs will be merged without :100:% code coverage. More than that tests should be very thorough and cover as many (edge) cases as possible. We're working with DNS here and bugs can have a major impact so we need to do as much as reasonably possible to ensure quality. While :100:% doesn't even begin to mean there are no bugs, getting there often requires close inspection & a relatively complete understanding of the code. More times than no the endevor will uncover at least minor problems.
- Bug fixes require specific tests covering the addressed behavior.
- Write or update documentation. If you have added a feature or changed an existing one, please make appropriate changes to the docs. Doc-only PRs are always welcome.
- Keep your change as focused as possible. If there are multiple changes you would like to make that are not dependent upon each other, consider submitting them as separate pull requests.
- We target Python 2.7, but have taken steps to make Python 3 support as easy as possible when someone decides it's needed. PR welcome.
- Write a [good commit message](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html).
## License note
We can only accept contributions that are compatible with the MIT license.
It's OK to depend on gems licensed under either Apache 2.0 or MIT, but we cannot add dependencies on any gems that are licensed under GPL.
Any contributions you make must be under the MIT license.
## Resources
- [Contributing to Open Source on GitHub](https://guides.github.com/activities/contributing-to-open-source/)
- [Using Pull Requests](https://help.github.com/articles/using-pull-requests/)
- [GitHub Help](https://help.github.com)

+ 11
- 0
.gitignore View File

@ -0,0 +1,11 @@
*.pyc
.coverage
.env
coverage.xml
dist/
env/
htmlcov/
nosetests.xml
octodns.egg-info/
output/
tmp/

+ 7
- 0
.travis.yml View File

@ -0,0 +1,7 @@
language: python
python:
- 2.7
script: ./script/cibuild
notifications:
email:
- ross@github.com

+ 22
- 0
LICENSE View File

@ -0,0 +1,22 @@
Copyright (c) 2017 GitHub, Inc.
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.

+ 219
- 0
README.md View File

@ -0,0 +1,219 @@
# OctoDNS
## DNS as code - Tools for managing DNS across multiple providers
In the vein of [infrastructure as
code](https://en.wikipedia.org/wiki/Infrastructure_as_Code) OctoDNS provides a set of tools & patterns that make it easy to manage your DNS records across multiple providers. The resulting config can live in a repository and be [deployed](https://github.com/blog/1241-deploying-at-github) just like the rest of your code, maintaining a clear history and using your existing review & workflow.
The architecture is pluggable and the tooling is flexible to make it applicable to a wide variety of use-cases. Effort has been made to make adding new providers as easy as possible. In the simple case that involves writing of a single `class` and a couple hundred lines of code, most of which is translating between the provider's schema and OctoDNS's. More on some of the ways we use it and how to go about extending it below and in the [/docs directory](/docs).
It is similar to [Netflix/denominator](https://github.com/Netflix/denominator).
## Getting started
### Workspace
Running through the following commands will install the latest release of OctoDNS and set up a place for your config files to live.
```
$ mkdir dns
$ cd dns
$ virtualenv env
...
$ source env/bin/activate
$ pip install octodns
$ mkdir config
```
### Config
We start by creating a config file to tell OctoDNS about our providers and the zone(s) we want it to manage. Below we're setting up a `YamlProvider` to source records from our config files and both a `Route53Provider` and `DynProvider` to serve as the targets for those records. You can have any number of zones set up and any number of sources of data and targets for records for each. You can also have multiple config files, that make use of separate accounts and each manage a distinct set of zones. A good example of this this might be `./config/staging.yaml` & `./config/production.yaml`. We'll focus on a `config/production.yaml`.
```yaml
---
providers:
config:
class: octodns.provider.yaml.YamlProvider
directory: ./config
dyn:
class: octodns.provider.dyn.DynProvider
customer: 1234
username: 'username'
password: env/DYN_PASSWORD
route53:
class: octodns.provider.route53.Route53Provider
access_key_id: env/AWS_ACCESS_KEY_ID
secret_access_key: env/AWS_SECRET_ACCESS_KEY
zones:
example.com.:
sources:
- config
targets:
- dyn
- route53
```
`class` is a special key that tells OctoDNS what python class should be loaded. Any other keys will be passed as configuration values to that provider. In general any sensitive or frequently rotated values should come from environmental variables. When OctoDNS sees a value that starts with `env/` it will look for that value in the process's environment and pass the result along.
Now that we have something to tell OctoDNS about our providers & zones we need to tell it about or records. We'll keep it simple for now and just create a single `A` record at the top-level of the domain.
`config/example.com.yaml`
```yaml
---
'':
ttl: 60
type: A
values:
- 1.2.3.4
- 1.2.3.5
```
### Noop
We're ready to do a dry-run with our new setup to see what changes it would make. Since we're pretending here we'll act like there are no existing records for `example.com.` in our accounts on either provider.
```
$ octodns-sync --config-file=./config/production.yaml
...
********************************************************************************
* example.com.
********************************************************************************
* route53 (Route53Provider)
* Create <ARecord A 60, example.com., [u'1.2.3.4', '1.2.3.5']>
* Summary: Creates=1, Updates=0, Deletes=0, Existing Records=0
* dyn (DynProvider)
* Create <ARecord A 60, example.com., [u'1.2.3.4', '1.2.3.5']>
* Summary: Creates=1, Updates=0, Deletes=0, Existing Records=0
********************************************************************************
...
```
There will be other logging information presented on the screen, but successful runs of sync will always end with a summary like the above for any providers & zones with changes. If there are no changes a message saying so will be printed instead. Above we're creating a new zone in both providers so they show the same change, but that doesn't always have to be the case. If to start one of them had a different state you would see the changes OctoDNS intends to make to sync them up.
### Making changes
**WARNING**: OctoDNS assumes ownership of any domain you point it to. When you tell it to act it will do whatever is necessary to try and match up states including deleting any unexpected records. Be careful when playing around with OctoDNS. It's best to experiment with a fake zone or one without any data that matters until your comfortable with the system.
Now it's time to tell OctoDNS to make things happen. We'll invoke it again with the same options and add a `--doit` on the end to tell it this time we actually want it to try and make the specified changes.
```
$ octodns-sync --config-file=./config/production.yaml --doit
...
```
The output here would be the same as before with a few more log lines at the end as it makes the actual changes. After which the config in Route53 and Dyn should match what's in the yaml file.
### Workflow
In the above case we manually ran OctoDNS from the command line. That works and it's better than heading into the provider GUIs and making changes by clicking around, but OctoDNS is designed to be run as part of a deploy process. The implementation details are well beyond the scope of this README, but here is an example of the workflow we use at GitHub. It follows the way [GitHub itself is branch deployed](https://githubengineering.com/deploying-branches-to-github-com/).
The first step is to create a PR with your changes.
![](/docs/assets/pr.png)
Assuming the code tests and config validation statuses are green the next step is to do a noop deploy and verify that the changes OctoDNS plans to make are the ones you expect.
![](/docs/assets/noop.png)
After that comes a set of reviews. One from a teammate who should have full context on what you're trying to accomplish and visibility in to the changes you're making to do it. The other is from a member of the team here at GitHub that owns DNS, mostly as a sanity check and to make sure that best practices are being followed. As much of that as possible is baked into `octodns-validate`.
After the reviews it's time to branch deploy the change.
![](/docs/assets/deploy.png)
If that goes smoothly, you again see the expected changes, and verify them with `dig` and/or `octodns-report` you're good to hit the merge button. If there are problems you can quickly do a `.deploy dns/master` to go back to the previous state.
### Bootstrapping config files
Very few situations will involve starting with a blank slate which is why there's tooling built in to pull existing data out of providers into a matching config file.
```
$ octodns-dump --config-file=config/production.yaml --output-dir=tmp/ example.com. route53
2017-03-15T13:33:34 INFO Manager __init__: config_file=tmp/production.yaml
2017-03-15T13:33:34 INFO Manager dump: zone=example.com., sources=('route53',)
2017-03-15T13:33:36 INFO Route53Provider[route53] populate: found 64 records
2017-03-15T13:33:36 INFO YamlProvider[dump] plan: desired=example.com.
2017-03-15T13:33:36 INFO YamlProvider[dump] plan: Creates=64, Updates=0, Deletes=0, Existing Records=0
2017-03-15T13:33:36 INFO YamlProvider[dump] apply: making changes
```
The above command pulled the existing data out of Route53 and placed the results into `tmp/example.com.yaml`. That file can be inspected and moved into `config/` to become the new source. If things are working as designed a subsequent noop sync should show zero changes.
## Custom Sources and Providers
You can check out the [source](/octodns/source/) and [provider](/octodns/provider/) directory to see what's currently supported. Sources act as a source of record information. TinyDnsProvider is currently the only OSS source, though we have several others internally that are specific to our environment. These include something to pull host data from [gPanel](https://githubengineering.com/githubs-metal-cloud/) and a similar provider that sources information about our network gear to create both `A` & `PTR` records for their interfaces. Things that might make good OSS sources might include an `ElbSource` that pulls information about [AWS Elastic Load Balancers](https://aws.amazon.com/elasticloadbalancing/) and dynamically creates `CNAME`s for them, or `Ec2Source` that pulls instance information so that records can be created for hosts similar to how our `GPanelProvider` works. An `AxfrSource` could be really interesting as well. Another case where a source may make sense is if you'd like to export data from a legacy service that you have no plans to push changes back into.
Most of the things included in OctoDNS are providers, the obvious difference being that they can serve as both sources and targets of data. We'd really like to see this list grow over time so if you use an unsupported provider then PRs are welcome. The existing providers should serve as reasonable examples. Those that have no GeoDNS support are relatively straightforward. Unfortunately most of the APIs involved to do GeoDNS style traffic management are complex and somewhat inconsistent so adding support for that function would be nice, but is optional and best done in a separate pass.
The `class` key in the providers config section can be used to point to arbitrary classes in the python path so internal or 3rd party providers can easily be included with no coordiation beyond getting them into PYTHONPATH, most likely installed into the virtualenv with OctoDNS.
## Other Uses
### Syncing between providers
While the primary use-case is to sync a set of yaml config files up to one or more DNS providers, OctoDNS has been built in such a way that you can easily source and target things arbitrarily. As a quick example the config below would sync `githubtest.net.` from Route53 to Dyn.
```yaml
---
providers:
route53:
class: octodns.provider.route53.Route53Provider
access_key_id: env/AWS_ACCESS_KEY_ID
secret_access_key: env/AWS_SECRET_ACCESS_KEY
dyn:
class: octodns.provider.dyn.DynProvider
customer: env/DYN_CUSTOMER
username: env/DYN_USERNAME
password: env/DYN_PASSWORD
zones:
githubtest.net.:
sources:
- route53
targets:
- dyn
```
### Dynamic sources
Internally we use custom sources to create records based on dynamic data that changes frequently without direct human intervention. An example of that might look something like the following. For hosts this mechanism is janitorial, run periodically, making sure the correct records exist as long as the host is alive and ensuring they are removed after the host is destroyed. The host provisioning and destruction processes do the actual work to create and destroy the records.
```yaml
---
providers:
gpanel-site:
class: github.octodns.source.gpanel.GPanelProvider
host: 'gpanel.site.github.foo'
token: env/GPANEL_SITE_TOKEN
powerdns-site:
class: octodns.provider.powerdns.PowerDnsProvider
host: 'internal-dns.site.github.foo'
api_key: env/POWERDNS_SITE_API_KEY
zones:
hosts.site.github.foo.:
sources:
- gpanel-site
targets:
- powerdns-site
```
## Contributing
Please see our [contributing document](/.github/CONTRIBUTING.md) if you would like to participate!
## Getting help
If you have a problem or suggestion, please [open an issue](https://github.com/github/octodns/issues/new) in this repository, and we will do our best to help. Please note that this project adheres to the [Open Code of Conduct](http://todogroup.org/opencodeofconduct/#GitHub%20OctoDNS/opensource@github.com).
## License
OctoDNS is licensed under the [MIT license](LICENSE).
## Authors
OctoDNS was designed and authored by [Ross McFarland](https://github.com/ross) and [Joe Williams](https://github.com/joewilliams). It is now maintained, reviewed, and tested by Ross, Joe, and the rest of the Site Reliability Engineering team at GitHub.

BIN
docs/assets/deploy.png View File

Before After
Width: 1624  |  Height: 266  |  Size: 80 KiB

BIN
docs/assets/noop.png View File

Before After
Width: 1413  |  Height: 266  |  Size: 74 KiB

BIN
docs/assets/pr.png View File

Before After
Width: 1016  |  Height: 1211  |  Size: 208 KiB

+ 8
- 0
octodns/__init__.py View File

@ -0,0 +1,8 @@
'''
OctoDNS: DNS as code - Tools for managing DNS across multiple providers
'''
from __future__ import absolute_import, division, print_function, \
unicode_literals
__VERSION__ = '0.8.0'

+ 6
- 0
octodns/cmds/__init__.py View File

@ -0,0 +1,6 @@
#
#
#
from __future__ import absolute_import, division, print_function, \
unicode_literals

+ 69
- 0
octodns/cmds/args.py View File

@ -0,0 +1,69 @@
#
#
#
from __future__ import absolute_import, division, print_function, \
unicode_literals
from argparse import ArgumentParser as _Base
from logging import DEBUG, INFO, WARN, Formatter, StreamHandler, \
getLogger
from logging.handlers import SysLogHandler
from sys import stderr, stdout
class ArgumentParser(_Base):
'''
Manages argument parsing and adds some defaults and takes action on them.
Also manages logging setup.
'''
def __init__(self, *args, **kwargs):
super(ArgumentParser, self).__init__(*args, **kwargs)
def parse_args(self, default_log_level=INFO):
self.add_argument('--log-stream-stdout', action='store_true',
default=False,
help='Log to stdout instead of stderr')
_help = 'Send logging data to syslog in addition to stderr'
self.add_argument('--log-syslog', action='store_true', default=False,
help=_help)
self.add_argument('--syslog-device', default='/dev/log',
help='Syslog device')
self.add_argument('--syslog-facility', default='local0',
help='Syslog facility')
_help = 'Increase verbosity to get details and help track down issues'
self.add_argument('--debug', action='store_true', default=False,
help=_help)
args = super(ArgumentParser, self).parse_args()
self._setup_logging(args, default_log_level)
return args
def _setup_logging(self, args, default_log_level):
# TODO: if/when things are multi-threaded add [%(thread)d] in to the
# format
fmt = '%(asctime)s %(levelname)-5s %(name)s %(message)s'
formatter = Formatter(fmt=fmt, datefmt='%Y-%m-%dT%H:%M:%S ')
stream = stdout if args.log_stream_stdout else stderr
handler = StreamHandler(stream=stream)
handler.setFormatter(formatter)
logger = getLogger()
logger.addHandler(handler)
if args.log_syslog:
fmt = 'octodns[%(process)-5s:%(thread)d]: %(name)s ' \
'%(levelname)-5s %(message)s'
handler = SysLogHandler(address=args.syslog_device,
facility=args.syslog_facility)
handler.setFormatter(Formatter(fmt=fmt))
logger.addHandler(handler)
logger.level = DEBUG if args.debug else default_log_level
# boto is noisy, set it to warn
getLogger('botocore').level = WARN
# DynectSession is noisy too
getLogger('DynectSession').level = WARN

+ 29
- 0
octodns/cmds/compare.py View File

@ -0,0 +1,29 @@
#!/usr/bin/env python
'''
Octo-DNS Comparator
'''
from __future__ import absolute_import, division, print_function, \
unicode_literals
from pprint import pprint
from octodns.cmds.args import ArgumentParser
from octodns.manager import Manager
parser = ArgumentParser(description=__doc__.split('\n')[1])
parser.add_argument('--config-file', required=True,
help='The Manager configuration file to use')
parser.add_argument('--a', nargs='+', required=True,
help='First source(s) to pull data from')
parser.add_argument('--b', nargs='+', required=True,
help='Second source(s) to pull data from')
parser.add_argument('--zone', default=None, required=True,
help='Zone to compare')
args = parser.parse_args()
manager = Manager(args.config_file)
changes = manager.compare(args.a, args.b, args.zone)
pprint(changes)

+ 25
- 0
octodns/cmds/dump.py View File

@ -0,0 +1,25 @@
#!/usr/bin/env python
'''
Octo-DNS Dumper
'''
from __future__ import absolute_import, division, print_function, \
unicode_literals
from octodns.cmds.args import ArgumentParser
from octodns.manager import Manager
parser = ArgumentParser(description=__doc__.split('\n')[1])
parser.add_argument('--config-file', required=True,
help='The Manager configuration file to use')
parser.add_argument('--output-dir', required=True,
help='The directory into which the results will be '
'written (Note: will overwrite existing files)')
parser.add_argument('zone', help='Zone to dump')
parser.add_argument('source', nargs='+', help='Source(s) to pull data from')
args = parser.parse_args()
manager = Manager(args.config_file)
manager.dump(args.zone, args.output_dir, *args.source)

+ 99
- 0
octodns/cmds/report.py View File

@ -0,0 +1,99 @@
#!/usr/bin/env python
'''
Octo-DNS Reporter
'''
from __future__ import absolute_import, division, print_function, \
unicode_literals
from concurrent.futures import ThreadPoolExecutor
from dns.exception import Timeout
from dns.resolver import NXDOMAIN, NoAnswer, NoNameservers, Resolver, query
from logging import getLogger
from sys import stdout
import re
from octodns.cmds.args import ArgumentParser
from octodns.manager import Manager
from octodns.zone import Zone
class AsyncResolver(Resolver):
def __init__(self, num_workers, *args, **kwargs):
super(AsyncResolver, self).__init__(*args, **kwargs)
self.executor = ThreadPoolExecutor(max_workers=num_workers)
def query(self, *args, **kwargs):
return self.executor.submit(super(AsyncResolver, self).query, *args,
**kwargs)
parser = ArgumentParser(description=__doc__.split('\n')[1])
parser.add_argument('--config-file', required=True,
help='The Manager configuration file to use')
parser.add_argument('--zone', required=True, help='Zone to dump')
parser.add_argument('--source', required=True, default=[], action='append',
help='Source(s) to pull data from')
parser.add_argument('--num-workers', default=4,
help='Number of background workers')
parser.add_argument('--timeout', default=1,
help='Number seconds to wait for an answer')
parser.add_argument('server', nargs='+', help='Servers to query')
args = parser.parse_args()
manager = Manager(args.config_file)
log = getLogger('report')
try:
sources = [manager.providers[source] for source in args.source]
except KeyError as e:
raise Exception('Unknown source: {}'.format(e.args[0]))
zone = Zone(args.zone, manager.configured_sub_zones(args.zone))
for source in sources:
source.populate(zone)
print('name,type,ttl,{},consistent'.format(','.join(args.server)))
resolvers = []
ip_addr_re = re.compile(r'^[\d\.]+$')
for server in args.server:
resolver = AsyncResolver(configure=False,
num_workers=int(args.num_workers))
if not ip_addr_re.match(server):
server = str(query(server, 'A')[0])
log.info('server=%s', server)
resolver.nameservers = [server]
resolver.lifetime = int(args.timeout)
resolvers.append(resolver)
queries = {}
for record in sorted(zone.records):
queries[record] = [r.query(record.fqdn, record._type)
for r in resolvers]
for record, futures in sorted(queries.items(), key=lambda d: d[0]):
stdout.write(record.fqdn)
stdout.write(',')
stdout.write(record._type)
stdout.write(',')
stdout.write(str(record.ttl))
compare = {}
for future in futures:
stdout.write(',')
try:
answers = [str(r) for r in future.result()]
except (NoAnswer, NoNameservers):
answers = ['*no answer*']
except NXDOMAIN:
answers = ['*does not exist*']
except Timeout:
answers = ['*timeout*']
stdout.write(' '.join(answers))
# sorting to ignore order
answers = '*:*'.join(sorted(answers)).lower()
compare[answers] = True
stdout.write(',True\n' if len(compare) == 1 else ',False\n')

+ 37
- 0
octodns/cmds/sync.py View File

@ -0,0 +1,37 @@
#!/usr/bin/env python
'''
Octo-DNS Multiplexer
'''
from __future__ import absolute_import, division, print_function, \
unicode_literals
from octodns.cmds.args import ArgumentParser
from octodns.manager import Manager
parser = ArgumentParser(description=__doc__.split('\n')[1])
parser.add_argument('--config-file', required=True,
help='The Manager configuration file to use')
parser.add_argument('--doit', action='store_true', default=False,
help='Whether to take action or just show what would '
'change')
parser.add_argument('--force', action='store_true', default=False,
help='Acknowledge that significant changes are being made '
'and do them')
parser.add_argument('zone', nargs='*', default=[],
help='Limit sync to the specified zone(s)')
# --sources isn't an option here b/c filtering sources out would be super
# dangerous since you could eaily end up with an empty zone and delete
# everything, or even just part of things when there are multiple sources
parser.add_argument('--target', default=[], action='append',
help='Limit sync to the specified target(s)')
args = parser.parse_args()
manager = Manager(args.config_file)
manager.sync(eligible_zones=args.zone, eligible_targets=args.target,
dry_run=not args.doit, force=args.force)

+ 22
- 0
octodns/cmds/validate.py View File

@ -0,0 +1,22 @@
#!/usr/bin/env python
'''
Octo-DNS Validator
'''
from __future__ import absolute_import, division, print_function, \
unicode_literals
from logging import WARN
from octodns.cmds.args import ArgumentParser
from octodns.manager import Manager
parser = ArgumentParser(description=__doc__.split('\n')[1])
parser.add_argument('--config-file', default='./config/production.yaml',
help='The Manager configuration file to use')
args = parser.parse_args(WARN)
manager = Manager(args.config_file)
manager.validate_configs()

+ 309
- 0
octodns/manager.py View File

@ -0,0 +1,309 @@
#
#
#
from __future__ import absolute_import, division, print_function, \
unicode_literals
from StringIO import StringIO
from importlib import import_module
from os import environ
import logging
from .provider.base import BaseProvider
from .provider.yaml import YamlProvider
from .yaml import safe_load
from .zone import Zone
class _AggregateTarget(object):
id = 'aggregate'
def __init__(self, targets):
self.targets = targets
def supports(self, record):
for target in self.targets:
if not target.supports(record):
return False
return True
@property
def SUPPORTS_GEO(self):
for target in self.targets:
if not target.SUPPORTS_GEO:
return False
return True
class Manager(object):
log = logging.getLogger('Manager')
def __init__(self, config_file):
self.log.info('__init__: config_file=%s', config_file)
# Read our config file
with open(config_file, 'r') as fh:
self.config = safe_load(fh, enforce_order=False)
self.log.debug('__init__: configuring providers')
self.providers = {}
for provider_name, provider_config in self.config['providers'].items():
# Get our class and remove it from the provider_config
try:
_class = provider_config.pop('class')
except KeyError:
raise Exception('Provider {} is missing class'
.format(provider_name))
_class = self._get_provider_class(_class)
# Build up the arguments we need to pass to the provider
kwargs = {}
for k, v in provider_config.items():
try:
if v.startswith('env/'):
try:
env_var = v[4:]
v = environ[env_var]
except KeyError:
raise Exception('Incorrect provider config, '
'missing env var {}'
.format(env_var))
except AttributeError:
pass
kwargs[k] = v
try:
self.providers[provider_name] = _class(provider_name, **kwargs)
except TypeError:
raise Exception('Incorrect provider config for {}'
.format(provider_name))
zone_tree = {}
# sort by reversed strings so that parent zones always come first
for name in sorted(self.config['zones'].keys(), key=lambda s: s[::-1]):
# ignore trailing dots, and reverse
pieces = name[:-1].split('.')[::-1]
# where starts out at the top
where = zone_tree
# for all the pieces
for piece in pieces:
try:
where = where[piece]
# our current piece already exists, just point where at
# it's value
except KeyError:
# our current piece doesn't exist, create it
where[piece] = {}
# and then point where at it's newly created value
where = where[piece]
self.zone_tree = zone_tree
def _get_provider_class(self, _class):
try:
module_name, class_name = _class.rsplit('.', 1)
module = import_module(module_name)
except (ImportError, ValueError):
self.log.error('_get_provider_class: Unable to import module %s',
_class)
raise Exception('Unknown provider class: {}'.format(_class))
try:
return getattr(module, class_name)
except AttributeError:
self.log.error('_get_provider_class: Unable to get class %s from '
'module %s', class_name, module)
raise Exception('Unknown provider class: {}'.format(_class))
def configured_sub_zones(self, zone_name):
# Reversed pieces of the zone name
pieces = zone_name[:-1].split('.')[::-1]
# Point where at the root of the tree
where = self.zone_tree
# Until we've hit the bottom of this zone
try:
while pieces:
# Point where at the value of our current piece
where = where[pieces.pop(0)]
except KeyError:
self.log.debug('configured_sub_zones: unknown zone, %s, no subs',
zone_name)
return set()
# We're not pointed at the dict for our name, the keys of which will be
# any subzones
sub_zone_names = where.keys()
self.log.debug('configured_sub_zones: subs=%s', sub_zone_names)
return set(sub_zone_names)
def sync(self, eligible_zones=[], eligible_targets=[], dry_run=True,
force=False):
self.log.info('sync: eligible_zones=%s, eligible_targets=%s, '
'dry_run=%s, force=%s', eligible_zones, eligible_targets,
dry_run, force)
zones = self.config['zones'].items()
if eligible_zones:
zones = filter(lambda d: d[0] in eligible_zones, zones)
plans = []
for zone_name, config in zones:
self.log.info('sync: zone=%s', zone_name)
try:
sources = config['sources']
except KeyError:
raise Exception('Zone {} is missing sources'.format(zone_name))
try:
targets = config['targets']
except KeyError:
raise Exception('Zone {} is missing targets'.format(zone_name))
if eligible_targets:
targets = filter(lambda d: d in eligible_targets, targets)
self.log.info('sync: sources=%s -> targets=%s', sources, targets)
try:
sources = [self.providers[source] for source in sources]
except KeyError:
raise Exception('Zone {}, unknown source: {}'.format(zone_name,
source))
try:
trgs = []
for target in targets:
trg = self.providers[target]
if not isinstance(trg, BaseProvider):
raise Exception('{} - "{}" does not support targeting'
.format(trg, target))
trgs.append(trg)
targets = trgs
except KeyError:
raise Exception('Zone {}, unknown target: {}'.format(zone_name,
target))
self.log.debug('sync: populating')
zone = Zone(zone_name,
sub_zones=self.configured_sub_zones(zone_name))
for source in sources:
source.populate(zone)
self.log.debug('sync: planning')
for target in targets:
plan = target.plan(zone)
if plan:
plans.append((target, plan))
hr = '*************************************************************' \
'*******************\n'
buf = StringIO()
buf.write('\n')
if plans:
current_zone = None
for target, plan in plans:
if plan.desired.name != current_zone:
current_zone = plan.desired.name
buf.write(hr)
buf.write('* ')
buf.write(current_zone)
buf.write('\n')
buf.write(hr)
buf.write('* ')
buf.write(target.id)
buf.write(' (')
buf.write(target)
buf.write(')\n* ')
for change in plan.changes:
buf.write(change.__repr__(leader='* '))
buf.write('\n* ')
buf.write('Summary: ')
buf.write(plan)
buf.write('\n')
else:
buf.write(hr)
buf.write('No changes were planned\n')
buf.write(hr)
buf.write('\n')
self.log.info(buf.getvalue())
if not force:
self.log.debug('sync: checking safety')
for target, plan in plans:
plan.raise_if_unsafe()
if dry_run or config.get('always-dry-run', False):
return 0
total_changes = 0
self.log.debug('sync: applying')
for target, plan in plans:
total_changes += target.apply(plan)
self.log.info('sync: %d total changes', total_changes)
return total_changes
def compare(self, a, b, zone):
'''
Compare zone data between 2 sources.
Note: only things supported by both sources will be considered
'''
self.log.info('compare: a=%s, b=%s, zone=%s', a, b, zone)
try:
a = [self.providers[source] for source in a]
b = [self.providers[source] for source in b]
except KeyError as e:
raise Exception('Unknown source: {}'.format(e.args[0]))
sub_zones = self.configured_sub_zones(zone)
za = Zone(zone, sub_zones)
for source in a:
source.populate(za)
zb = Zone(zone, sub_zones)
for source in b:
source.populate(zb)
return zb.changes(za, _AggregateTarget(a + b))
def dump(self, zone, output_dir, source, *sources):
'''
Dump zone data from the specified source
'''
self.log.info('dump: zone=%s, sources=%s', zone, sources)
# We broke out source to force at least one to be passed, add it to any
# others we got.
sources = [source] + list(sources)
try:
sources = [self.providers[s] for s in sources]
except KeyError as e:
raise Exception('Unknown source: {}'.format(e.args[0]))
target = YamlProvider('dump', output_dir)
zone = Zone(zone, self.configured_sub_zones(zone))
for source in sources:
source.populate(zone)
plan = target.plan(zone)
target.apply(plan)
def validate_configs(self):
for zone_name, config in self.config['zones'].items():
zone = Zone(zone_name, self.configured_sub_zones(zone_name))
try:
sources = config['sources']
except KeyError:
raise Exception('Zone {} is missing sources'.format(zone_name))
try:
sources = [self.providers[source] for source in sources]
except KeyError:
raise Exception('Zone {}, unknown source: {}'.format(zone_name,
source))
for source in sources:
if isinstance(source, YamlProvider):
source.populate(zone)

+ 6
- 0
octodns/provider/__init__.py View File

@ -0,0 +1,6 @@
#
#
#
from __future__ import absolute_import, division, print_function, \
unicode_literals

+ 116
- 0
octodns/provider/base.py View File

@ -0,0 +1,116 @@
#
#
#
from __future__ import absolute_import, division, print_function, \
unicode_literals
from ..source.base import BaseSource
from ..zone import Zone
class UnsafePlan(Exception):
pass
class Plan(object):
MAX_SAFE_UPDATES = 4
MAX_SAFE_DELETES = 4
def __init__(self, existing, desired, changes):
self.existing = existing
self.desired = desired
self.changes = changes
change_counts = {
'Create': 0,
'Delete': 0,
'Update': 0
}
for change in changes:
change_counts[change.__class__.__name__] += 1
self.change_counts = change_counts
def raise_if_unsafe(self):
# TODO: what is safe really?
if self.change_counts['Update'] > self.MAX_SAFE_UPDATES:
raise UnsafePlan('Too many updates')
if self.change_counts['Delete'] > self.MAX_SAFE_DELETES:
raise UnsafePlan('Too many deletes')
def __repr__(self):
return 'Creates={}, Updates={}, Deletes={}, Existing Records={}' \
.format(self.change_counts['Create'], self.change_counts['Update'],
self.change_counts['Delete'],
len(self.existing.records))
class BaseProvider(BaseSource):
def __init__(self, id, apply_disabled=False):
super(BaseProvider, self).__init__(id)
self.log.debug('__init__: id=%s, apply_disabled=%s', id,
apply_disabled)
self.apply_disabled = apply_disabled
def _include_change(self, change):
'''
An opportunity for providers to filter out false positives due to
pecularities in their implementation. E.g. minimum TTLs.
'''
return True
def _extra_changes(self, existing, changes):
'''
An opportunity for providers to add extra changes to the plan that are
necessary to update ancilary record data or configure the zone. E.g.
base NS records.
'''
return []
def plan(self, desired):
self.log.info('plan: desired=%s', desired.name)
existing = Zone(desired.name, desired.sub_zones)
self.populate(existing, target=True)
# compute the changes at the zone/record level
changes = existing.changes(desired, self)
# allow the provider to filter out false positives
before = len(changes)
changes = filter(self._include_change, changes)
after = len(changes)
if before != after:
self.log.info('plan: filtered out %s changes', before - after)
# allow the provider to add extra changes it needs
extra = self._extra_changes(existing, changes)
if extra:
self.log.info('plan: extra changes\n %s', '\n '
.join([str(c) for c in extra]))
changes += extra
if changes:
plan = Plan(existing, desired, changes)
self.log.info('plan: %s', plan)
return plan
self.log.info('plan: No changes')
return None
def apply(self, plan):
'''
Submits actual planned changes to the provider. Returns the number of
changes made
'''
if self.apply_disabled:
self.log.info('apply: disabled')
return 0
self.log.info('apply: making changes')
self._apply(plan)
return len(plan.changes)
def _apply(self, plan):
raise NotImplementedError('Abstract base class, _apply method '
'missing')

+ 249
- 0
octodns/provider/cloudflare.py View File

@ -0,0 +1,249 @@
#
#
#
from __future__ import absolute_import, division, print_function, \
unicode_literals
from collections import defaultdict
from logging import getLogger
from requests import Session
from ..record import Record, Update
from .base import BaseProvider
class CloudflareAuthenticationError(Exception):
def __init__(self, data):
try:
message = data['errors'][0]['message']
except (IndexError, KeyError):
message = 'Authentication error'
super(CloudflareAuthenticationError, self).__init__(message)
class CloudflareProvider(BaseProvider):
SUPPORTS_GEO = False
# TODO: support SRV
UNSUPPORTED_TYPES = ('NAPTR', 'PTR', 'SOA', 'SRV', 'SSHFP')
MIN_TTL = 120
TIMEOUT = 15
def __init__(self, id, email, token, *args, **kwargs):
self.log = getLogger('CloudflareProvider[{}]'.format(id))
self.log.debug('__init__: id=%s, email=%s, token=***', id, email)
super(CloudflareProvider, self).__init__(id, *args, **kwargs)
sess = Session()
sess.headers.update({
'X-Auth-Email': email,
'X-Auth-Key': token,
})
self._sess = sess
self._zones = None
self._zone_records = {}
def supports(self, record):
return record._type not in self.UNSUPPORTED_TYPES
def _request(self, method, path, params=None, data=None):
self.log.debug('_request: method=%s, path=%s', method, path)
url = 'https://api.cloudflare.com/client/v4{}'.format(path)
resp = self._sess.request(method, url, params=params, json=data,
timeout=self.TIMEOUT)
self.log.debug('_request: status=%d', resp.status_code)
if resp.status_code == 403:
raise CloudflareAuthenticationError(resp.json())
resp.raise_for_status()
return resp.json()
@property
def zones(self):
if self._zones is None:
page = 1
zones = []
while page:
resp = self._request('GET', '/zones', params={'page': page})
zones += resp['result']
info = resp['result_info']
if info['count'] > 0 and info['count'] == info['per_page']:
page += 1
else:
page = None
self._zones = {'{}.'.format(z['name']): z['id'] for z in zones}
return self._zones
def _data_for_multiple(self, _type, records):
return {
'ttl': records[0]['ttl'],
'type': _type,
'values': [r['content'] for r in records],
}
_data_for_A = _data_for_multiple
_data_for_AAAA = _data_for_multiple
_data_for_SPF = _data_for_multiple
_data_for_TXT = _data_for_multiple
def _data_for_CNAME(self, _type, records):
only = records[0]
return {
'ttl': only['ttl'],
'type': _type,
'value': '{}.'.format(only['content'])
}
def _data_for_MX(self, _type, records):
values = []
for r in records:
values.append({
'priority': r['priority'],
'value': '{}.'.format(r['content']),
})
return {
'ttl': records[0]['ttl'],
'type': _type,
'values': values,
}
def _data_for_NS(self, _type, records):
return {
'ttl': records[0]['ttl'],
'type': _type,
'values': ['{}.'.format(r['content']) for r in records],
}
def zone_records(self, zone):
if zone.name not in self._zone_records:
zone_id = self.zones.get(zone.name, False)
if not zone_id:
return []
records = []
path = '/zones/{}/dns_records'.format(zone_id)
page = 1
while page:
resp = self._request('GET', path, params={'page': page})
records += resp['result']
info = resp['result_info']
if info['count'] > 0 and info['count'] == info['per_page']:
page += 1
else:
page = None
self._zone_records[zone.name] = records
return self._zone_records[zone.name]
def populate(self, zone, target=False):
self.log.debug('populate: name=%s', zone.name)
before = len(zone.records)
records = self.zone_records(zone)
if records:
values = defaultdict(lambda: defaultdict(list))
for record in records:
name = zone.hostname_from_fqdn(record['name'])
_type = record['type']
if _type not in self.UNSUPPORTED_TYPES:
values[name][record['type']].append(record)
for name, types in values.items():
for _type, records in types.items():
data_for = getattr(self, '_data_for_{}'.format(_type))
data = data_for(_type, records)
record = Record.new(zone, name, data, source=self)
zone.add_record(record)
self.log.info('populate: found %s records',
len(zone.records) - before)
def _include_change(self, change):
if isinstance(change, Update):
existing = change.existing.data
new = change.new.data
new['ttl'] = max(120, new['ttl'])
if new == existing:
return False
return True
def _contents_for_multiple(self, record):
for value in record.values:
yield {'content': value}
_contents_for_A = _contents_for_multiple
_contents_for_AAAA = _contents_for_multiple
_contents_for_NS = _contents_for_multiple
_contents_for_SPF = _contents_for_multiple
_contents_for_TXT = _contents_for_multiple
def _contents_for_CNAME(self, record):
yield {'content': record.value}
def _contents_for_MX(self, record):
for value in record.values:
yield {
'priority': value.priority,
'content': value.value
}
def _apply_Create(self, change):
new = change.new
zone_id = self.zones[new.zone.name]
contents_for = getattr(self, '_contents_for_{}'.format(new._type))
path = '/zones/{}/dns_records'.format(zone_id)
name = new.fqdn[:-1]
for content in contents_for(change.new):
content.update({
'name': name,
'type': new._type,
# Cloudflare has a min ttl of 120s
'ttl': max(self.MIN_TTL, new.ttl),
})
self._request('POST', path, data=content)
def _apply_Update(self, change):
# Create the new and delete the old
self._apply_Create(change)
self._apply_Delete(change)
def _apply_Delete(self, change):
existing = change.existing
existing_name = existing.fqdn[:-1]
for record in self.zone_records(existing.zone):
if existing_name == record['name'] and \
existing._type == record['type']:
path = '/zones/{}/dns_records/{}'.format(record['zone_id'],
record['id'])
self._request('DELETE', path)
def _apply(self, plan):
desired = plan.desired
changes = plan.changes
self.log.debug('_apply: zone=%s, len(changes)=%d', desired.name,
len(changes))
name = desired.name
if name not in self.zones:
self.log.debug('_apply: no matching zone, creating')
data = {
'name': name[:-1],
'jump_start': False,
}
resp = self._request('POST', '/zones', data=data)
zone_id = resp['result']['id']
self.zones[name] = zone_id
self._zone_records[name] = {}
for change in changes:
class_name = change.__class__.__name__
getattr(self, '_apply_{}'.format(class_name))(change)
# clear the cache
self._zone_records.pop(name, None)

+ 349
- 0
octodns/provider/dnsimple.py View File

@ -0,0 +1,349 @@
#
#
#
from __future__ import absolute_import, division, print_function, \
unicode_literals
from collections import defaultdict
from requests import Session
import logging
from ..record import Record
from .base import BaseProvider
class DnsimpleClientException(Exception):
pass
class DnsimpleClientNotFound(DnsimpleClientException):
def __init__(self):
super(DnsimpleClientNotFound, self).__init__('Not found')
class DnsimpleClientUnauthorized(DnsimpleClientException):
def __init__(self):
super(DnsimpleClientUnauthorized, self).__init__('Unauthorized')
class DnsimpleClient(object):
BASE = 'https://api.dnsimple.com/v2/'
def __init__(self, token, account):
self.account = account
sess = Session()
sess.headers.update({'Authorization': 'Bearer {}'.format(token)})
self._sess = sess
def _request(self, method, path, params=None, data=None):
url = '{}{}{}'.format(self.BASE, self.account, path)
resp = self._sess.request(method, url, params=params, json=data)
if resp.status_code == 401:
raise DnsimpleClientUnauthorized()
if resp.status_code == 404:
raise DnsimpleClientNotFound()
resp.raise_for_status()
return resp
def domain(self, name):
path = '/domains/{}'.format(name)
return self._request('GET', path).json()
def domain_create(self, name):
return self._request('POST', '/domains', data={'name': name})
def records(self, zone_name):
ret = []
page = 1
while True:
data = self._request('GET', '/zones/{}/records'.format(zone_name),
{'page': page}).json()
ret += data['data']
pagination = data['pagination']
if page >= pagination['total_pages']:
break
page += 1
return ret
def record_create(self, zone_name, params):
path = '/zones/{}/records'.format(zone_name)
self._request('POST', path, data=params)
def record_delete(self, zone_name, record_id):
path = '/zones/{}/records/{}'.format(zone_name, record_id)
self._request('DELETE', path)
class DnsimpleProvider(BaseProvider):
SUPPORTS_GEO = False
def __init__(self, id, token, account, *args, **kwargs):
self.log = logging.getLogger('DnsimpleProvider[{}]'.format(id))
self.log.debug('__init__: id=%s, token=***, account=%s', id, account)
super(DnsimpleProvider, self).__init__(id, *args, **kwargs)
self._client = DnsimpleClient(token, account)
self._zone_records = {}
def _data_for_multiple(self, _type, records):
return {
'ttl': records[0]['ttl'],
'type': _type,
'values': [r['content'] for r in records]
}
_data_for_A = _data_for_multiple
_data_for_AAAA = _data_for_multiple
_data_for_SPF = _data_for_multiple
_data_for_TXT = _data_for_multiple
def _data_for_CNAME(self, _type, records):
record = records[0]
return {
'ttl': record['ttl'],
'type': _type,
'value': '{}.'.format(record['content'])
}
def _data_for_MX(self, _type, records):
values = []
for record in records:
values.append({
'priority': record['priority'],
'value': '{}.'.format(record['content'])
})
return {
'ttl': records[0]['ttl'],
'type': _type,
'values': values
}
def _data_for_NAPTR(self, _type, records):
values = []
for record in records:
try:
order, preference, flags, service, regexp, replacement = \
record['content'].split(' ', 5)
except ValueError:
# their api will let you create invalid records, this
# essnetially handles that by ignoring them for values
# purposes. That will cause updates to happen to delete them if
# they shouldn't exist or update them if they're wrong
continue
values.append({
'flags': flags[1:-1],
'order': order,
'preference': preference,
'regexp': regexp[1:-1],
'replacement': replacement,
'service': service[1:-1],
})
return {
'type': _type,
'ttl': records[0]['ttl'],
'values': values
}
def _data_for_NS(self, _type, records):
values = []
for record in records:
content = record['content']
if content[-1] != '.':
content = '{}.'.format(content)
values.append(content)
return {
'ttl': records[0]['ttl'],
'type': _type,
'values': values,
}
def _data_for_PTR(self, _type, records):
record = records[0]
return {
'ttl': record['ttl'],
'type': _type,
'value': record['content']
}
def _data_for_SRV(self, _type, records):
values = []
for record in records:
try:
weight, port, target = record['content'].split(' ', 2)
except ValueError:
# see _data_for_NAPTR's continue
continue
values.append({
'port': port,
'priority': record['priority'],
'target': '{}.'.format(target),
'weight': weight
})
return {
'type': _type,
'ttl': records[0]['ttl'],
'values': values
}
def _data_for_SSHFP(self, _type, records):
values = []
for record in records:
try:
algorithm, fingerprint_type, fingerprint = \
record['content'].split(' ', 2)
except ValueError:
# see _data_for_NAPTR's continue
continue
values.append({
'algorithm': algorithm,
'fingerprint': fingerprint,
'fingerprint_type': fingerprint_type
})
return {
'type': _type,
'ttl': records[0]['ttl'],
'values': values
}
def zone_records(self, zone):
if zone.name not in self._zone_records:
try:
self._zone_records[zone.name] = \
self._client.records(zone.name[:-1])
except DnsimpleClientNotFound:
return []
return self._zone_records[zone.name]
def populate(self, zone, target=False):
self.log.debug('populate: name=%s', zone.name)
values = defaultdict(lambda: defaultdict(list))
for record in self.zone_records(zone):
_type = record['type']
if _type == 'SOA':
continue
values[record['name']][record['type']].append(record)
before = len(zone.records)
for name, types in values.items():
for _type, records in types.items():
data_for = getattr(self, '_data_for_{}'.format(_type))
record = Record.new(zone, name, data_for(_type, records))
zone.add_record(record)
self.log.info('populate: found %s records',
len(zone.records) - before)
def _params_for_multiple(self, record):
for value in record.values:
yield {
'content': value,
'name': record.name,
'ttl': record.ttl,
'type': record._type,
}
_params_for_A = _params_for_multiple
_params_for_AAAA = _params_for_multiple
_params_for_NS = _params_for_multiple
_params_for_SPF = _params_for_multiple
_params_for_TXT = _params_for_multiple
def _params_for_single(self, record):
yield {
'content': record.value,
'name': record.name,
'ttl': record.ttl,
'type': record._type
}
_params_for_CNAME = _params_for_single
_params_for_PTR = _params_for_single
def _params_for_MX(self, record):
for value in record.values:
yield {
'content': value.value,
'name': record.name,
'priority': value.priority,
'ttl': record.ttl,
'type': record._type
}
def _params_for_NAPTR(self, record):
for value in record.values:
content = '{} {} "{}" "{}" "{}" {}' \
.format(value.order, value.preference, value.flags,
value.service, value.regexp, value.replacement)
yield {
'content': content,
'name': record.name,
'ttl': record.ttl,
'type': record._type
}
def _params_for_SRV(self, record):
for value in record.values:
yield {
'content': '{} {} {}'.format(value.weight, value.port,
value.target),
'name': record.name,
'priority': value.priority,
'ttl': record.ttl,
'type': record._type
}
def _params_for_SSHFP(self, record):
for value in record.values:
yield {
'content': '{} {} {}'.format(value.algorithm,
value.fingerprint_type,
value.fingerprint),
'name': record.name,
'ttl': record.ttl,
'type': record._type
}
def _apply_Create(self, change):
new = change.new
params_for = getattr(self, '_params_for_{}'.format(new._type))
for params in params_for(new):
self._client.record_create(new.zone.name[:-1], params)
def _apply_Update(self, change):
self._apply_Create(change)
self._apply_Delete(change)
def _apply_Delete(self, change):
existing = change.existing
zone = existing.zone
for record in self.zone_records(zone):
if existing.name == record['name'] and \
existing._type == record['type']:
self._client.record_delete(zone.name[:-1], record['id'])
def _apply(self, plan):
desired = plan.desired
changes = plan.changes
self.log.debug('_apply: zone=%s, len(changes)=%d', desired.name,
len(changes))
domain_name = desired.name[:-1]
try:
self._client.domain(domain_name)
except DnsimpleClientNotFound:
self.log.debug('_apply: no matching zone, creating domain')
self._client.domain_create(domain_name)
for change in changes:
class_name = change.__class__.__name__
getattr(self, '_apply_{}'.format(class_name))(change)
# Clear out the cache if any
self._zone_records.pop(desired.name, None)

+ 651
- 0
octodns/provider/dyn.py View File

@ -0,0 +1,651 @@
#
#
#
from __future__ import absolute_import, division, print_function, \
unicode_literals
from collections import defaultdict
from dyn.tm.errors import DynectGetError
from dyn.tm.services.dsf import DSFARecord, DSFAAAARecord, DSFFailoverChain, \
DSFMonitor, DSFNode, DSFRecordSet, DSFResponsePool, DSFRuleset, \
TrafficDirector, get_all_dsf_monitors, get_all_dsf_services, \
get_response_pool
from dyn.tm.session import DynectSession
from dyn.tm.zones import Zone as DynZone
from logging import getLogger
from uuid import uuid4
from ..record import Record
from .base import BaseProvider
class _CachingDynZone(DynZone):
log = getLogger('_CachingDynZone')
_cache = {}
@classmethod
def get(cls, zone_name, create=False):
cls.log.debug('get: zone_name=%s, create=%s', zone_name, create)
# This works in dyn zone names, without the trailing .
try:
dyn_zone = cls._cache[zone_name]
cls.log.debug('get: cache hit')
except KeyError:
cls.log.debug('get: cache miss')
try:
dyn_zone = _CachingDynZone(zone_name)
cls.log.debug('get: fetched')
except DynectGetError:
if not create:
cls.log.debug("get: does't exist")
return None
# this value shouldn't really matter, it's not tied to
# whois or anything
hostname = 'hostmaster@{}'.format(zone_name[:-1])
# Try again with the params necessary to create
dyn_zone = _CachingDynZone(zone_name, ttl=3600,
contact=hostname,
serial_style='increment')
cls.log.debug('get: created')
cls._cache[zone_name] = dyn_zone
return dyn_zone
@classmethod
def flush_zone(cls, zone_name):
'''Flushes the zone cache, if there is one'''
cls.log.debug('flush_zone: zone_name=%s', zone_name)
try:
del cls._cache[zone_name]
except KeyError:
pass
def __init__(self, zone_name, *args, **kwargs):
super(_CachingDynZone, self).__init__(zone_name, *args, **kwargs)
self.flush_cache()
def flush_cache(self):
self._cached_records = None
def get_all_records(self):
if self._cached_records is None:
self._cached_records = \
super(_CachingDynZone, self).get_all_records()
return self._cached_records
def publish(self):
super(_CachingDynZone, self).publish()
self.flush_cache()
class DynProvider(BaseProvider):
RECORDS_TO_TYPE = {
'a_records': 'A',
'aaaa_records': 'AAAA',
'cname_records': 'CNAME',
'mx_records': 'MX',
'naptr_records': 'NAPTR',
'ns_records': 'NS',
'ptr_records': 'PTR',
'sshfp_records': 'SSHFP',
'spf_records': 'SPF',
'srv_records': 'SRV',
'txt_records': 'TXT',
}
TYPE_TO_RECORDS = {
'A': 'a_records',
'AAAA': 'aaaa_records',
'CNAME': 'cname_records',
'MX': 'mx_records',
'NAPTR': 'naptr_records',
'NS': 'ns_records',
'PTR': 'ptr_records',
'SSHFP': 'sshfp_records',
'SPF': 'spf_records',
'SRV': 'srv_records',
'TXT': 'txt_records',
}
# https://help.dyn.com/predefined-geotm-regions-groups/
REGION_CODES = {
'NA': 11, # Continental North America
'SA': 12, # Continental South America
'EU': 13, # Contentinal Europe
'AF': 14, # Continental Africa
'AS': 15, # Contentinal Asia
'OC': 16, # Contentinal Austrailia/Oceania
'AN': 17, # Continental Antartica
}
# Going to be lazy loaded b/c it makes a (slow) request, global
_dyn_sess = None
def __init__(self, id, customer, username, password,
traffic_directors_enabled=False, *args, **kwargs):
self.log = getLogger('DynProvider[{}]'.format(id))
self.log.debug('__init__: id=%s, customer=%s, username=%s, '
'password=***, traffic_directors_enabled=%s', id,
customer, username, traffic_directors_enabled)
# we have to set this before calling super b/c SUPPORTS_GEO requires it
self.traffic_directors_enabled = traffic_directors_enabled
super(DynProvider, self).__init__(id, *args, **kwargs)
self.customer = customer
self.username = username
self.password = password
self._cache = {}
self._traffic_directors = None
self._traffic_director_monitors = None
@property
def SUPPORTS_GEO(self):
return self.traffic_directors_enabled
def _check_dyn_sess(self):
if self._dyn_sess:
self.log.debug('_check_dyn_sess: exists')
return
self.log.debug('_check_dyn_sess: creating')
# Dynect's client is ugly, you create a session object, but then don't
# use it for anything. It just makes the other objects work behind the
# scences. :-( That probably means we can only support a single set of
# dynect creds
self._dyn_sess = DynectSession(self.customer, self.username,
self.password)
def _data_for_A(self, _type, records):
return {
'type': _type,
'ttl': records[0].ttl,
'values': [r.address for r in records]
}
_data_for_AAAA = _data_for_A
def _data_for_CNAME(self, _type, records):
record = records[0]
return {
'type': _type,
'ttl': record.ttl,
'value': record.cname,
}
def _data_for_MX(self, _type, records):
return {
'type': _type,
'ttl': records[0].ttl,
'values': [{'priority': r.preference, 'value': r.exchange}
for r in records],
}
def _data_for_NAPTR(self, _type, records):
return {
'type': _type,
'ttl': records[0].ttl,
'values': [{
'order': r.order,
'preference': r.preference,
'flags': r.flags,
'service': r.services,
'regexp': r.regexp,
'replacement': r.replacement,
} for r in records]
}
def _data_for_NS(self, _type, records):
return {
'type': _type,
'ttl': records[0].ttl,
'values': [r.nsdname for r in records]
}
def _data_for_PTR(self, _type, records):
record = records[0]
return {
'type': _type,
'ttl': record.ttl,
'value': record.ptrdname,
}
def _data_for_SPF(self, _type, records):
record = records[0]
return {
'type': _type,
'ttl': record.ttl,
'values': [r.txtdata for r in records]
}
_data_for_TXT = _data_for_SPF
def _data_for_SSHFP(self, _type, records):
return {
'type': _type,
'ttl': records[0].ttl,
'values': [{
'algorithm': r.algorithm,
'fingerprint_type': r.fptype,
'fingerprint': r.fingerprint,
} for r in records],
}
def _data_for_SRV(self, _type, records):
return {
'type': _type,
'ttl': records[0].ttl,
'values': [{
'priority': r.priority,
'weight': r.weight,
'port': r.port,
'target': r.target,
} for r in records],
}
@property
def traffic_directors(self):
if self._traffic_directors is None:
self._check_dyn_sess()
tds = defaultdict(dict)
for td in get_all_dsf_services():
try:
fqdn, _type = td.label.split(':', 1)
except ValueError:
continue
tds[fqdn][_type] = td
self._traffic_directors = dict(tds)
return self._traffic_directors
def _populate_traffic_directors(self, zone):
self.log.debug('_populate_traffic_directors: zone=%s', zone.name)
td_records = set()
for fqdn, types in self.traffic_directors.items():
# TODO: skip subzones
if not fqdn.endswith(zone.name):
continue
for _type, td in types.items():
# critical to call rulesets once, each call loads them :-(
rulesets = td.rulesets
# We start out with something that will always change show
# change in case this is a busted TD. This will prevent us from
# creating a duplicate td. We'll overwrite this with real data
# provide we have it
geo = {}
data = {
'geo': geo,
'type': _type,
'ttl': td.ttl,
'values': ['0.0.0.0']
}
for ruleset in rulesets:
try:
record_set = ruleset.response_pools[0].rs_chains[0] \
.record_sets[0]
except IndexError:
# problems indicate a malformed ruleset, ignore it
continue
_type = record_set.rdata_class
if ruleset.label.startswith('default:'):
data_for = getattr(self, '_data_for_{}'.format(_type))
data.update(data_for(_type, record_set.records))
else:
# We've stored the geo in label
try:
code, _ = ruleset.label.split(':', 1)
except ValueError:
continue
values = [r.address for r in record_set.records]
geo[code] = values
name = zone.hostname_from_fqdn(fqdn)
record = Record.new(zone, name, data, source=self)
zone.add_record(record)
td_records.add(record)
return td_records
def populate(self, zone, target=False):
self.log.info('populate: zone=%s', zone.name)
before = len(zone.records)
self._check_dyn_sess()
td_records = set()
if self.traffic_directors_enabled:
td_records = self._populate_traffic_directors(zone)
dyn_zone = _CachingDynZone.get(zone.name[:-1])
if dyn_zone:
values = defaultdict(lambda: defaultdict(list))
for _type, records in dyn_zone.get_all_records().items():
if _type == 'soa_records':
continue
_type = self.RECORDS_TO_TYPE[_type]
for record in records:
record_name = zone.hostname_from_fqdn(record.fqdn)
values[record_name][_type].append(record)
for name, types in values.items():
for _type, records in types.items():
data_for = getattr(self, '_data_for_{}'.format(_type))
data = data_for(_type, records)
record = Record.new(zone, name, data, source=self)
if record not in td_records:
zone.add_record(record)
self.log.info('populate: found %s records',
len(zone.records) - before)
def _kwargs_for_A(self, record):
return [{
'address': v,
'ttl': record.ttl,
} for v in record.values]
_kwargs_for_AAAA = _kwargs_for_A
def _kwargs_for_CNAME(self, record):
return [{
'cname': record.value,
'ttl': record.ttl,
}]
def _kwargs_for_MX(self, record):
return [{
'preference': v.priority,
'exchange': v.value,
'ttl': record.ttl,
} for v in record.values]
def _kwargs_for_NAPTR(self, record):
return [{
'flags': v.flags,
'order': v.order,
'preference': v.preference,
'regexp': v.regexp,
'replacement': v.replacement,
'services': v.service,
'ttl': record.ttl,
} for v in record.values]
def _kwargs_for_NS(self, record):
return [{
'nsdname': v,
'ttl': record.ttl,
} for v in record.values]
def _kwargs_for_PTR(self, record):
return [{
'ptrdname': record.value,
'ttl': record.ttl,
}]
def _kwargs_for_SSHFP(self, record):
return [{
'algorithm': v.algorithm,
'fptype': v.fingerprint_type,
'fingerprint': v.fingerprint,
} for v in record.values]
def _kwargs_for_SPF(self, record):
return [{
'txtdata': v,
'ttl': record.ttl,
} for v in record.values]
def _kwargs_for_SRV(self, record):
return [{
'port': v.port,
'priority': v.priority,
'target': v.target,
'weight': v.weight,
'ttl': record.ttl,
} for v in record.values]
_kwargs_for_TXT = _kwargs_for_SPF
def _traffic_director_monitor(self, fqdn):
if self._traffic_director_monitors is None:
self._traffic_director_monitors = \
{m.label: m for m in get_all_dsf_monitors()}
try:
return self._traffic_director_monitors[fqdn]
except KeyError:
monitor = DSFMonitor(fqdn, protocol='HTTPS', response_count=2,
probe_interval=60, retries=2, port=443,
active='Y', host=fqdn[:-1], timeout=10,
path='/_dns')
self._traffic_director_monitors[fqdn] = monitor
return monitor
def _find_or_create_pool(self, td, pools, label, _type, values,
monitor_id=None):
for pool in pools:
if pool.label != label:
continue
records = pool.rs_chains[0].record_sets[0].records
record_values = sorted([r.address for r in records])
if record_values == values:
# it's a match
return pool
# we need to create the pool
_class = {
'A': DSFARecord,
'AAAA': DSFAAAARecord
}[_type]
records = [_class(v) for v in values]
record_set = DSFRecordSet(_type, label, serve_count=len(records),
records=records, dsf_monitor_id=monitor_id)
chain = DSFFailoverChain(label, record_sets=[record_set])
pool = DSFResponsePool(label, rs_chains=[chain])
pool.create(td)
return pool
def _mod_rulesets(self, td, change):
new = change.new
# Response Pools
pools = {}
# Get existing pools. This should be simple, but it's not b/c the dyn
# api is a POS. We need all response pools so we can GC and check to
# make sure that what we're after doesn't already exist.
# td.all_response_pools just returns thin objects that don't include
# their rs_chains (and children down to actual records.) We could just
# foreach over those turning them into full DSFResponsePool objects
# with get_response_pool, but that'd be N round-trips. We can avoid
# those round trips in cases where the pools are in use in rules where
# they're already full objects.
# First up populate all the full pools we have under rules, the _
# prevents a td.refresh we don't need :-( seriously?
existing_rulesets = td._rulesets
for ruleset in existing_rulesets:
for pool in ruleset.response_pools:
pools[pool.response_pool_id] = pool
# Now we need to find any pools that aren't referenced by rules
for pool in td.all_response_pools:
rpid = pool.response_pool_id
if rpid not in pools:
# we want this one, but it's thin, inflate it
pools[rpid] = get_response_pool(rpid, td)
# now that we have full objects for the complete set of existing pools,
# a list will be more useful
pools = pools.values()
# Rulesets
# add the default
label = 'default:{}'.format(uuid4().hex)
ruleset = DSFRuleset(label, 'always', [])
ruleset.create(td, index=0)
pool = self._find_or_create_pool(td, pools, 'default', new._type,
new.values)
# There's no way in the client lib to create a ruleset with an existing
# pool (ref'd by id) so we have to do this round-a-bout.
active_pools = {
'default': pool.response_pool_id
}
ruleset.add_response_pool(pool.response_pool_id)
monitor_id = self._traffic_director_monitor(new.fqdn).dsf_monitor_id
# Geos ordered least to most specific so that parents will always be
# created before their children (and thus can be referenced
geos = sorted(new.geo.items(), key=lambda d: d[0])
for _, geo in geos:
if geo.subdivision_code:
criteria = {
'country': geo.country_code,
'province': geo.subdivision_code
}
elif geo.country_code:
criteria = {
'country': geo.country_code
}
else:
criteria = {
'region': self.REGION_CODES[geo.continent_code]
}
label = '{}:{}'.format(geo.code, uuid4().hex)
ruleset = DSFRuleset(label, 'geoip', [], {
'geoip': criteria
})
# Something you have to call create others the constructor does it
ruleset.create(td, index=0)
first = geo.values[0]
pool = self._find_or_create_pool(td, pools, first, new._type,
geo.values, monitor_id)
active_pools[geo.code] = pool.response_pool_id
ruleset.add_response_pool(pool.response_pool_id)
# look for parent rulesets we can add in the chain
for code in geo.parents:
try:
pool_id = active_pools[code]
# looking at client lib code, index > exists appends
ruleset.add_response_pool(pool_id, index=999)
except KeyError:
pass
# and always add default as the last
pool_id = active_pools['default']
ruleset.add_response_pool(pool_id, index=999)
# we're done with active_pools as a lookup, convert it in to a set of
# the ids in use
active_pools = set(active_pools.values())
# Clean up unused response_pools
for pool in pools:
if pool.response_pool_id in active_pools:
continue
pool.delete()
# Clean out the old rulesets
for ruleset in existing_rulesets:
ruleset.delete()
def _mod_geo_Create(self, dyn_zone, change):
new = change.new
fqdn = new.fqdn
_type = new._type
label = '{}:{}'.format(fqdn, _type)
node = DSFNode(new.zone.name, fqdn)
td = TrafficDirector(label, ttl=new.ttl, nodes=[node], publish='Y')
self.log.debug('_mod_geo_Create: td=%s', td.service_id)
self._mod_rulesets(td, change)
self.traffic_directors[fqdn] = {
_type: td
}
def _mod_geo_Update(self, dyn_zone, change):
new = change.new
if not new.geo:
# New record doesn't have geo we're going from a TD to a regular
# record
self._mod_Create(dyn_zone, change)
self._mod_geo_Delete(dyn_zone, change)
return
try:
td = self.traffic_directors[new.fqdn][new._type]
except KeyError:
# There's no td, this is actually a create, we must be going from a
# non-geo to geo record so delete the regular record as well
self._mod_geo_Create(dyn_zone, change)
self._mod_Delete(dyn_zone, change)
return
self._mod_rulesets(td, change)
def _mod_geo_Delete(self, dyn_zone, change):
existing = change.existing
fqdn_tds = self.traffic_directors[existing.fqdn]
_type = existing._type
fqdn_tds[_type].delete()
del fqdn_tds[_type]
def _mod_Create(self, dyn_zone, change):
new = change.new
kwargs_for = getattr(self, '_kwargs_for_{}'.format(new._type))
for kwargs in kwargs_for(new):
dyn_zone.add_record(new.name, new._type, **kwargs)
def _mod_Delete(self, dyn_zone, change):
existing = change.existing
if existing.name:
target = '{}.{}'.format(existing.name, existing.zone.name[:-1])
else:
target = existing.zone.name[:-1]
_type = self.TYPE_TO_RECORDS[existing._type]
for rec in dyn_zone.get_all_records()[_type]:
if rec.fqdn == target:
rec.delete()
def _mod_Update(self, dyn_zone, change):
self._mod_Delete(dyn_zone, change)
self._mod_Create(dyn_zone, change)
def _apply_traffic_directors(self, desired, changes, dyn_zone):
self.log.debug('_apply_traffic_directors: zone=%s', desired.name)
unhandled_changes = []
for c in changes:
# we only mess with changes that have geo info somewhere
if getattr(c.new, 'geo', False) or getattr(c.existing, 'geo',
False):
mod = getattr(self, '_mod_geo_{}'.format(c.__class__.__name__))
mod(dyn_zone, c)
else:
unhandled_changes.append(c)
return unhandled_changes
def _apply_regular(self, desired, changes, dyn_zone):
self.log.debug('_apply_regular: zone=%s', desired.name)
for c in changes:
mod = getattr(self, '_mod_{}'.format(c.__class__.__name__))
mod(dyn_zone, c)
# TODO: detect "extra" changes when monitors are out of date or failover
# chains are wrong etc.
def _apply(self, plan):
desired = plan.desired
changes = plan.changes
self.log.debug('_apply: zone=%s, len(changes)=%d', desired.name,
len(changes))
dyn_zone = _CachingDynZone.get(desired.name[:-1], create=True)
if self.traffic_directors_enabled:
# any changes left over don't involve geo
changes = self._apply_traffic_directors(desired, changes, dyn_zone)
self._apply_regular(desired, changes, dyn_zone)
dyn_zone.publish()

+ 361
- 0
octodns/provider/powerdns.py View File

@ -0,0 +1,361 @@
#
#
#
from __future__ import absolute_import, division, print_function, \
unicode_literals
from requests import HTTPError, Session
import logging
from ..record import Create, Record
from .base import BaseProvider
class PowerDnsBaseProvider(BaseProvider):
SUPPORTS_GEO = False
TIMEOUT = 5
def __init__(self, id, host, api_key, port=8081, *args, **kwargs):
super(PowerDnsBaseProvider, self).__init__(id, *args, **kwargs)
self.host = host
self.port = port
sess = Session()
sess.headers.update({'X-API-Key': api_key})
self._sess = sess
def _request(self, method, path, data=None):
self.log.debug('_request: method=%s, path=%s', method, path)
url = 'http://{}:{}/api/v1/servers/localhost/{}' \
.format(self.host, self.port, path)
resp = self._sess.request(method, url, json=data, timeout=self.TIMEOUT)
self.log.debug('_request: status=%d', resp.status_code)
resp.raise_for_status()
return resp
def _get(self, path, data=None):
return self._request('GET', path, data=data)
def _post(self, path, data=None):
return self._request('POST', path, data=data)
def _patch(self, path, data=None):
return self._request('PATCH', path, data=data)
def _data_for_multiple(self, rrset):
# TODO: geo not supported
return {
'type': rrset['type'],
'values': [r['content'] for r in rrset['records']],
'ttl': rrset['ttl']
}
_data_for_A = _data_for_multiple
_data_for_AAAA = _data_for_multiple
_data_for_NS = _data_for_multiple
def _data_for_single(self, rrset):
return {
'type': rrset['type'],
'value': rrset['records'][0]['content'],
'ttl': rrset['ttl']
}
_data_for_CNAME = _data_for_single
_data_for_PTR = _data_for_single
def _data_for_quoted(self, rrset):
return {
'type': rrset['type'],
'values': [r['content'][1:-1] for r in rrset['records']],
'ttl': rrset['ttl']
}
_data_for_SPF = _data_for_quoted
_data_for_TXT = _data_for_quoted
def _data_for_MX(self, rrset):
values = []
for record in rrset['records']:
priority, value = record['content'].split(' ', 1)
values.append({
'priority': priority,
'value': value,
})
return {
'type': rrset['type'],
'values': values,
'ttl': rrset['ttl']
}
def _data_for_NAPTR(self, rrset):
values = []
for record in rrset['records']:
order, preference, flags, service, regexp, replacement = \
record['content'].split(' ', 5)
values.append({
'order': order,
'preference': preference,
'flags': flags[1:-1],
'service': service[1:-1],
'regexp': regexp[1:-1],
'replacement': replacement,
})
return {
'type': rrset['type'],
'values': values,
'ttl': rrset['ttl']
}
def _data_for_SSHFP(self, rrset):
values = []
for record in rrset['records']:
algorithm, fingerprint_type, fingerprint = \
record['content'].split(' ', 2)
values.append({
'algorithm': algorithm,
'fingerprint_type': fingerprint_type,
'fingerprint': fingerprint,
})
return {
'type': rrset['type'],
'values': values,
'ttl': rrset['ttl']
}
def _data_for_SRV(self, rrset):
values = []
for record in rrset['records']:
priority, weight, port, target = \
record['content'].split(' ', 3)
values.append({
'priority': priority,
'weight': weight,
'port': port,
'target': target,
})
return {
'type': rrset['type'],
'values': values,
'ttl': rrset['ttl']
}
def populate(self, zone, target=False):
self.log.debug('populate: name=%s', zone.name)
resp = None
try:
resp = self._get('zones/{}'.format(zone.name))
self.log.debug('populate: loaded')
except HTTPError as e:
if e.response.status_code == 401:
# Nicer error message for auth problems
raise Exception('PowerDNS unauthorized host={}'
.format(self.host))
elif e.response.status_code == 422:
# 422 means powerdns doesn't know anything about the requsted
# domain. We'll just ignore it here and leave the zone
# untouched.
pass
else:
# just re-throw
raise
before = len(zone.records)
if resp:
for rrset in resp.json()['rrsets']:
_type = rrset['type']
if _type == 'SOA':
continue
data_for = getattr(self, '_data_for_{}'.format(_type))
record_name = zone.hostname_from_fqdn(rrset['name'])
record = Record.new(zone, record_name, data_for(rrset),
source=self)
zone.add_record(record)
self.log.info('populate: found %s records',
len(zone.records) - before)
def _records_for_multiple(self, record):
return [{'content': v, 'disabled': False}
for v in record.values]
_records_for_A = _records_for_multiple
_records_for_AAAA = _records_for_multiple
_records_for_NS = _records_for_multiple
def _records_for_single(self, record):
return [{'content': record.value, 'disabled': False}]
_records_for_CNAME = _records_for_single
_records_for_PTR = _records_for_single
def _records_for_quoted(self, record):
return [{'content': '"{}"'.format(v), 'disabled': False}
for v in record.values]
_records_for_SPF = _records_for_quoted
_records_for_TXT = _records_for_quoted
def _records_for_MX(self, record):
return [{
'content': '{} {}'.format(v.priority, v.value),
'disabled': False
} for v in record.values]
def _records_for_NAPTR(self, record):
return [{
'content': '{} {} "{}" "{}" "{}" {}'.format(v.order, v.preference,
v.flags, v.service,
v.regexp,
v.replacement),
'disabled': False
} for v in record.values]
def _records_for_SSHFP(self, record):
return [{
'content': '{} {} {}'.format(v.algorithm, v.fingerprint_type,
v.fingerprint),
'disabled': False
} for v in record.values]
def _records_for_SRV(self, record):
return [{
'content': '{} {} {} {}'.format(v.priority, v.weight, v.port,
v.target),
'disabled': False
} for v in record.values]
def _mod_Create(self, change):
new = change.new
records_for = getattr(self, '_records_for_{}'.format(new._type))
return {
'name': new.fqdn,
'type': new._type,
'ttl': new.ttl,
'changetype': 'REPLACE',
'records': records_for(new)
}
_mod_Update = _mod_Create
def _mod_Delete(self, change):
existing = change.existing
records_for = getattr(self, '_records_for_{}'.format(existing._type))
return {
'name': existing.fqdn,
'type': existing._type,
'ttl': existing.ttl,
'changetype': 'DELETE',
'records': records_for(existing)
}
def _get_nameserver_record(self, existing):
return None
def _extra_changes(self, existing, _):
self.log.debug('_extra_changes: zone=%s', existing.name)
ns = self._get_nameserver_record(existing)
if not ns:
return []
# sorting mostly to make things deterministic for testing, but in
# theory it let us find what we're after quickier (though sorting would
# ve more exepensive.)
for record in sorted(existing.records):
if record == ns:
# We've found the top-level NS record, return any changes
change = record.changes(ns, self)
self.log.debug('_extra_changes: change=%s', change)
if change:
# We need to modify an existing record
return [change]
# No change is necessary
return []
# No existing top-level NS
self.log.debug('_extra_changes: create')
return [Create(ns)]
def _get_error(self, http_error):
try:
return http_error.response.json()['error']
except Exception:
return ''
def _apply(self, plan):
desired = plan.desired
changes = plan.changes
self.log.debug('_apply: zone=%s, len(changes)=%d', desired.name,
len(changes))
mods = []
for change in changes:
class_name = change.__class__.__name__
mods.append(getattr(self, '_mod_{}'.format(class_name))(change))
self.log.debug('_apply: sending change request')
try:
self._patch('zones/{}'.format(desired.name),
data={'rrsets': mods})
self.log.debug('_apply: patched')
except HTTPError as e:
error = self._get_error(e)
if e.response.status_code != 422 or \
not error.startswith('Could not find domain '):
self.log.error('_apply: status=%d, text=%s',
e.response.status_code,
e.response.text)
raise
self.log.info('_apply: creating zone=%s', desired.name)
# 422 means powerdns doesn't know anything about the requsted
# domain. We'll try to create it with the correct records instead
# of update. Hopefully all the mods are creates :-)
data = {
'name': desired.name,
'kind': 'Master',
'masters': [],
'nameservers': [],
'rrsets': mods,
'soa_edit_api': 'INCEPTION-INCREMENT',
'serial': 0,
}
try:
self._post('zones', data)
except HTTPError as e:
self.log.error('_apply: status=%d, text=%s',
e.response.status_code,
e.response.text)
raise
self.log.debug('_apply: created')
self.log.debug('_apply: complete')
class PowerDnsProvider(PowerDnsBaseProvider):
def __init__(self, id, host, api_key, port=8081, nameserver_values=None,
nameserver_ttl=600, *args, **kwargs):
self.log = logging.getLogger('PowerDnsProvider[{}]'.format(id))
self.log.debug('__init__: id=%s, host=%s, port=%d, '
'nameserver_values=%s, nameserver_ttl=%d',
id, host, port, nameserver_values, nameserver_ttl)
super(PowerDnsProvider, self).__init__(id, host=host, api_key=api_key,
port=port, *args, **kwargs)
self.nameserver_values = nameserver_values
self.nameserver_ttl = nameserver_ttl
def _get_nameserver_record(self, existing):
if self.nameserver_values:
return Record.new(existing, '', {
'type': 'NS',
'ttl': self.nameserver_ttl,
'values': self.nameserver_values,
}, source=self)
return super(PowerDnsProvider, self)._get_nameserver_record(existing)

+ 651
- 0
octodns/provider/route53.py View File

@ -0,0 +1,651 @@
#
#
#
from __future__ import absolute_import, division, print_function, \
unicode_literals
from boto3 import client
from collections import defaultdict
from incf.countryutils.transformations import cca_to_ctca2
from uuid import uuid4
import logging
import re
from ..record import Record, Update
from .base import BaseProvider
class _Route53Record(object):
def __init__(self, fqdn, _type, ttl, record=None, values=None, geo=None,
health_check_id=None):
self.fqdn = fqdn
self._type = _type
self.ttl = ttl
# From here on things are a little ugly, it works, but would be nice to
# clean up someday.
if record:
values_for = getattr(self, '_values_for_{}'.format(self._type))
self.values = values_for(record)
else:
self.values = values
self.geo = geo
self.health_check_id = health_check_id
self.is_geo_default = False
@property
def _geo_code(self):
return getattr(self.geo, 'code', '')
def _values_for_values(self, record):
return record.values
_values_for_A = _values_for_values
_values_for_AAAA = _values_for_values
_values_for_NS = _values_for_values
def _values_for_value(self, record):
return [record.value]
_values_for_CNAME = _values_for_value
_values_for_PTR = _values_for_value
def _values_for_MX(self, record):
return ['{} {}'.format(v.priority, v.value) for v in record.values]
def _values_for_NAPTR(self, record):
return ['{} {} "{}" "{}" "{}" {}'
.format(v.order, v.preference,
v.flags if v.flags else '',
v.service if v.service else '',
v.regexp if v.regexp else '',
v.replacement)
for v in record.values]
def _values_for_quoted(self, record):
return ['"{}"'.format(v.replace('"', '\\"'))
for v in record.values]
_values_for_SPF = _values_for_quoted
_values_for_TXT = _values_for_quoted
def _values_for_SRV(self, record):
return ['{} {} {} {}'.format(v.priority, v.weight, v.port,
v.target)
for v in record.values]
def mod(self, action):
rrset = {
'Name': self.fqdn,
'Type': self._type,
'TTL': self.ttl,
'ResourceRecords': [{'Value': v} for v in self.values],
}
if self.is_geo_default:
rrset['GeoLocation'] = {
'CountryCode': '*'
}
rrset['SetIdentifier'] = 'default'
elif self.geo:
geo = self.geo
rrset['SetIdentifier'] = geo.code
if self.health_check_id:
rrset['HealthCheckId'] = self.health_check_id
if geo.subdivision_code:
rrset['GeoLocation'] = {
'CountryCode': geo.country_code,
'SubdivisionCode': geo.subdivision_code
}
elif geo.country_code:
rrset['GeoLocation'] = {
'CountryCode': geo.country_code
}
else:
rrset['GeoLocation'] = {
'ContinentCode': geo.continent_code
}
return {
'Action': action,
'ResourceRecordSet': rrset,
}
# NOTE: we're using __hash__ and __cmp__ methods that consider
# _Route53Records equivalent if they have the same fqdn, _type, and
# geo.ident. Values are ignored. This is usful when computing
# diffs/changes.
def __hash__(self):
return '{}:{}:{}'.format(self.fqdn, self._type,
self._geo_code).__hash__()
def __cmp__(self, other):
return 0 if (self.fqdn == other.fqdn and
self._type == other._type and
self._geo_code == other._geo_code) else 1
def __repr__(self):
return '_Route53Record<{} {:>5} {:8} {}>' \
.format(self.fqdn, self._type, self._geo_code, self.values)
octal_re = re.compile(r'\\(\d\d\d)')
def _octal_replace(s):
# See http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/
# DomainNameFormat.html
return octal_re.sub(lambda m: chr(int(m.group(1), 8)), s)
class Route53Provider(BaseProvider):
SUPPORTS_GEO = True
# This should be bumped when there are underlying changes made to the
# health check config.
HEALTH_CHECK_VERSION = '0000'
def __init__(self, id, access_key_id, secret_access_key, max_changes=1000,
*args, **kwargs):
self.max_changes = max_changes
self.log = logging.getLogger('Route53Provider[{}]'.format(id))
self.log.debug('__init__: id=%s, access_key_id=%s, '
'secret_access_key=***', id, access_key_id)
super(Route53Provider, self).__init__(id, *args, **kwargs)
self._conn = client('route53', aws_access_key_id=access_key_id,
aws_secret_access_key=secret_access_key)
self._r53_zones = None
self._r53_rrsets = {}
self._health_checks = None
def supports(self, record):
return record._type != 'SSHFP'
@property
def r53_zones(self):
if self._r53_zones is None:
self.log.debug('r53_zones: loading')
zones = {}
more = True
start = {}
while more:
resp = self._conn.list_hosted_zones()
for z in resp['HostedZones']:
zones[z['Name']] = z['Id']
more = resp['IsTruncated']
start['Marker'] = resp.get('NextMarker', None)
self._r53_zones = zones
return self._r53_zones
def _get_zone_id(self, name, create=False):
self.log.debug('_get_zone_id: name=%s', name)
if name in self.r53_zones:
id = self.r53_zones[name]
self.log.debug('_get_zone_id: id=%s', id)
return id
if create:
ref = uuid4().hex
self.log.debug('_get_zone_id: no matching zone, creating, '
'ref=%s', ref)
resp = self._conn.create_hosted_zone(Name=name,
CallerReference=ref)
self.r53_zones[name] = id = resp['HostedZone']['Id']
return id
return None
def _parse_geo(self, rrset):
try:
loc = rrset['GeoLocation']
except KeyError:
# No geo loc
return
try:
return loc['ContinentCode']
except KeyError:
# Must be country
cc = loc['CountryCode']
if cc == '*':
# This is the default
return
cn = cca_to_ctca2(cc)
try:
return '{}-{}-{}'.format(cn, cc, loc['SubdivisionCode'])
except KeyError:
return '{}-{}'.format(cn, cc)
def _data_for_geo(self, rrset):
ret = {
'type': rrset['Type'],
'values': [v['Value'] for v in rrset['ResourceRecords']],
'ttl': int(rrset['TTL'])
}
geo = self._parse_geo(rrset)
if geo:
ret['geo'] = geo
return ret
_data_for_A = _data_for_geo
_data_for_AAAA = _data_for_geo
def _data_for_single(self, rrset):
return {
'type': rrset['Type'],
'value': rrset['ResourceRecords'][0]['Value'],
'ttl': int(rrset['TTL'])
}
_data_for_PTR = _data_for_single
_data_for_CNAME = _data_for_single
def _data_for_quoted(self, rrset):
return {
'type': rrset['Type'],
'values': [rr['Value'][1:-1] for rr in rrset['ResourceRecords']],
'ttl': int(rrset['TTL'])
}
_data_for_TXT = _data_for_quoted
_data_for_SPF = _data_for_quoted
def _data_for_MX(self, rrset):
values = []
for rr in rrset['ResourceRecords']:
priority, value = rr['Value'].split(' ')
values.append({
'priority': priority,
'value': value,
})
return {
'type': rrset['Type'],
'values': values,
'ttl': int(rrset['TTL'])
}
def _data_for_NAPTR(self, rrset):
values = []
for rr in rrset['ResourceRecords']:
order, preference, flags, service, regexp, replacement = \
rr['Value'].split(' ')
flags = flags[1:-1]
service = service[1:-1]
regexp = regexp[1:-1]
values.append({
'order': order,
'preference': preference,
'flags': flags if flags else None,
'service': service if service else None,
'regexp': regexp if regexp else None,
'replacement': replacement if replacement else None,
})
return {
'type': rrset['Type'],
'values': values,
'ttl': int(rrset['TTL'])
}
def _data_for_NS(self, rrset):
return {
'type': rrset['Type'],
'values': [v['Value'] for v in rrset['ResourceRecords']],
'ttl': int(rrset['TTL'])
}
def _data_for_SRV(self, rrset):
values = []
for rr in rrset['ResourceRecords']:
priority, weight, port, target = rr['Value'].split(' ')
values.append({
'priority': priority,
'weight': weight,
'port': port,
'target': target,
})
return {
'type': rrset['Type'],
'values': values,
'ttl': int(rrset['TTL'])
}
def _load_records(self, zone_id):
if zone_id not in self._r53_rrsets:
self.log.debug('_load_records: zone_id=%s loading', zone_id)
rrsets = []
more = True
start = {}
while more:
resp = \
self._conn.list_resource_record_sets(HostedZoneId=zone_id,
**start)
rrsets += resp['ResourceRecordSets']
more = resp['IsTruncated']
if more:
start = {
'StartRecordName': resp['NextRecordName'],
'StartRecordType': resp['NextRecordType'],
}
try:
start['StartRecordIdentifier'] = \
resp['NextRecordIdentifier']
except KeyError:
pass
self._r53_rrsets[zone_id] = rrsets
return self._r53_rrsets[zone_id]
def populate(self, zone, target=False):
self.log.debug('populate: name=%s', zone.name)
before = len(zone.records)
zone_id = self._get_zone_id(zone.name)
if zone_id:
records = defaultdict(lambda: defaultdict(list))
for rrset in self._load_records(zone_id):
record_name = zone.hostname_from_fqdn(rrset['Name'])
record_name = _octal_replace(record_name)
record_type = rrset['Type']
if record_type == 'SOA':
continue
data = getattr(self, '_data_for_{}'.format(record_type))(rrset)
records[record_name][record_type].append(data)
for name, types in records.items():
for _type, data in types.items():
if len(data) > 1:
# Multiple data indicates a record with GeoDNS, convert
# them data into the format we need
geo = {}
for d in data:
try:
geo[d['geo']] = d['values']
except KeyError:
primary = d
data = primary
data['geo'] = geo
else:
data = data[0]
record = Record.new(zone, name, data, source=self)
zone.add_record(record)
self.log.info('populate: found %s records',
len(zone.records) - before)
def _gen_mods(self, action, records):
'''
Turns `_Route53Record`s in to `change_resource_record_sets` `Changes`
'''
return [r.mod(action) for r in records]
@property
def health_checks(self):
if self._health_checks is None:
# need to do the first load
self.log.debug('health_checks: loading')
checks = {}
more = True
start = {}
while more:
resp = self._conn.list_health_checks(**start)
for health_check in resp['HealthChecks']:
# our format for CallerReference is dddd:hex-uuid
ref = health_check.get('CallerReference', 'xxxxx')
if len(ref) > 4 and ref[4] != ':':
# ignore anything else
continue
checks[health_check['Id']] = health_check
more = resp['IsTruncated']
start['Marker'] = resp.get('NextMarker', None)
self._health_checks = checks
# We've got a cached version use it
return self._health_checks
def _get_health_check_id(self, record, ident, geo):
# fqdn & the first value are special, we use them to match up health
# checks to their records. Route53 health checks check a single ip and
# we're going to assume that ips are interchangeable to avoid
# health-checking each one independently
fqdn = record.fqdn
first_value = geo.values[0]
self.log.debug('_get_health_check_id: fqdn=%s, type=%s, geo=%s, '
'first_value=%s', fqdn, record._type, ident,
first_value)
# health check host can't end with a .
host = fqdn[:-1]
# we're looking for a healthcheck with the current version & our record
# type, we'll ignore anything else
expected_version_and_type = '{}:{}:'.format(self.HEALTH_CHECK_VERSION,
record._type)
for id, health_check in self.health_checks.items():
if not health_check['CallerReference'] \
.startswith(expected_version_and_type):
# not a version & type match, ignore
continue
config = health_check['HealthCheckConfig']
if host == config['FullyQualifiedDomainName'] and \
first_value == config['IPAddress']:
# this is the health check we're looking for
return id
# no existing matches, we need to create a new health check
config = {
'EnableSNI': True,
'FailureThreshold': 6,
'FullyQualifiedDomainName': host,
'IPAddress': first_value,
'MeasureLatency': True,
'Port': 443,
'RequestInterval': 10,
'ResourcePath': '/_dns',
'Type': 'HTTPS',
}
ref = '{}:{}:{}'.format(self.HEALTH_CHECK_VERSION, record._type,
uuid4().hex[:16])
resp = self._conn.create_health_check(CallerReference=ref,
HealthCheckConfig=config)
health_check = resp['HealthCheck']
id = health_check['Id']
# store the new health check so that we'll be able to find it in the
# future
self._health_checks[id] = health_check
self.log.info('_get_health_check_id: created id=%s, host=%s, '
'first_value=%s', id, host, first_value)
return id
def _gc_health_checks(self, record, new):
self.log.debug('_gc_health_checks: record=%s', record)
# Find the health checks we're using for the new route53 records
in_use = set()
for r in new:
if r.health_check_id:
in_use.add(r.health_check_id)
self.log.debug('_gc_health_checks: in_use=%s', in_use)
# Now we need to run through ALL the health checks looking for those
# that apply to this record, deleting any that do and are no longer in
# use
host = record.fqdn[:-1]
for id, health_check in self.health_checks.items():
config = health_check['HealthCheckConfig']
_type = health_check['CallerReference'].split(':', 2)[1]
# if host and the pulled out type match it applies
if host == config['FullyQualifiedDomainName'] and \
_type == record._type and id not in in_use:
# this is a health check for our fqdn & type but not one we're
# planning to use going forward
self.log.info('_gc_health_checks: deleting id=%s', id)
self._conn.delete_health_check(HealthCheckId=id)
def _gen_records(self, record, new=False):
'''
Turns an octodns.Record into one or more `_Route53Record`s
'''
records = set()
base = _Route53Record(record.fqdn, record._type, record.ttl,
record=record)
records.add(base)
if getattr(record, 'geo', False):
base.is_geo_default = True
for ident, geo in record.geo.items():
if new:
# only find health checks for records that are going to be
# around after the run
health_check_id = self._get_health_check_id(record, ident,
geo)
else:
health_check_id = None
records.add(_Route53Record(record.fqdn, record._type,
record.ttl, values=geo.values,
geo=geo,
health_check_id=health_check_id))
return records
def _mod_Create(self, change):
# New is the stuff that needs to be created
new_records = self._gen_records(change.new, True)
# Now is a good time to clear out any unused health checks since we
# know what we'll be using going forward
self._gc_health_checks(change.new, new_records)
return self._gen_mods('CREATE', new_records)
def _mod_Update(self, change):
# See comments in _Route53Record for how the set math is made to do our
# bidding here.
existing_records = self._gen_records(change.existing)
new_records = self._gen_records(change.new, True)
# Now is a good time to clear out any unused health checks since we
# know what we'll be using going forward
self._gc_health_checks(change.new, new_records)
# Things in existing, but not new are deletes
deletes = existing_records - new_records
# Things in new, but not existing are the creates
creates = new_records - existing_records
# Things in both need updating, we could optimize this and filter out
# things that haven't actually changed, but that's for another day.
upserts = existing_records & new_records
return self._gen_mods('DELETE', deletes) + \
self._gen_mods('CREATE', creates) + \
self._gen_mods('UPSERT', upserts)
def _mod_Delete(self, change):
# Existing is the thing that needs to be deleted
existing_records = self._gen_records(change.existing)
# Now is a good time to clear out all the health checks since we know
# we're done with them
self._gc_health_checks(change.existing, [])
return self._gen_mods('DELETE', existing_records)
def _extra_changes(self, existing, changes):
self.log.debug('_extra_changes: existing=%s', existing.name)
zone_id = self._get_zone_id(existing.name)
if not zone_id:
# zone doesn't exist so no extras to worry about
return []
# we'll skip extra checking for anything we're already going to change
changed = set([c.record for c in changes])
# ok, now it's time for the reason we're here, we need to go over all
# the existing records
extra = []
for record in existing.records:
if record in changed:
# already have a change for it, skipping
continue
if not getattr(record, 'geo', False):
# record doesn't support geo, we don't need to inspect it
continue
# OK this is a record we don't have change for that does have geo
# information. We need to look and see if it needs to be updated
# b/c of a health check version bump
self.log.debug('_extra_changes: inspecting=%s, %s', record.fqdn,
record._type)
fqdn = record.fqdn
# loop through all the r53 rrsets
for rrset in self._load_records(zone_id):
if fqdn != rrset['Name'] or record._type != rrset['Type']:
# not a name and type match
continue
if rrset.get('GeoLocation', {}) \
.get('CountryCode', False) == '*':
# it's a default record
continue
# we expect a healtcheck now
try:
health_check_id = rrset['HealthCheckId']
caller_ref = \
self.health_checks[health_check_id]['CallerReference']
if caller_ref.startswith(self.HEALTH_CHECK_VERSION):
# it has the right health check
continue
except KeyError:
# no health check id or one that isn't the right version
pass
# no good, doesn't have the right health check, needs an update
self.log.debug('_extra_changes: health-check caused '
'update')
extra.append(Update(record, record))
# We don't need to process this record any longer
break
return extra
def _apply(self, plan):
desired = plan.desired
changes = plan.changes
self.log.info('_apply: zone=%s, len(changes)=%d', desired.name,
len(changes))
batch = []
batch_rs_count = 0
zone_id = self._get_zone_id(desired.name, True)
for c in changes:
mods = getattr(self, '_mod_{}'.format(c.__class__.__name__))(c)
mods_rs_count = sum(
[len(m['ResourceRecordSet']['ResourceRecords']) for m in mods]
)
if mods_rs_count > self.max_changes:
# a single mod resulted in too many ResourceRecords changes
raise Exception('Too many modifications: {}'
.format(mods_rs_count))
# r53 limits changesets to 1000 entries
if (batch_rs_count + mods_rs_count) < self.max_changes:
# append to the batch
batch += mods
batch_rs_count += mods_rs_count
else:
self.log.info('_apply: sending change request for batch of '
'%d mods, %d ResourceRecords', len(batch),
batch_rs_count)
# send the batch
self._really_apply(batch, zone_id)
# start a new batch with the lefovers
batch = mods
batch_rs_count = mods_rs_count
# the way the above process works there will always be something left
# over in batch to process. In the case that we submit a batch up there
# it was always the case that there was something pushing us over
# max_changes and thus left over to submit.
self.log.info('_apply: sending change request for batch of %d mods,'
' %d ResourceRecords', len(batch),
batch_rs_count)
self._really_apply(batch, zone_id)
def _really_apply(self, batch, zone_id):
uuid = uuid4().hex
batch = {
'Comment': 'Change: {}'.format(uuid),
'Changes': batch,
}
self.log.debug('_really_apply: sending change request, comment=%s',
batch['Comment'])
resp = self._conn.change_resource_record_sets(
HostedZoneId=zone_id, ChangeBatch=batch)
self.log.debug('_really_apply: change info=%s', resp['ChangeInfo'])

+ 82
- 0
octodns/provider/yaml.py View File

@ -0,0 +1,82 @@
#
#
#
from __future__ import absolute_import, division, print_function, \
unicode_literals
from collections import defaultdict
from os import makedirs
from os.path import isdir, join
import logging
from ..record import Record
from ..yaml import safe_load, safe_dump
from .base import BaseProvider
class YamlProvider(BaseProvider):
SUPPORTS_GEO = True
def __init__(self, id, directory, default_ttl=3600, *args, **kwargs):
self.log = logging.getLogger('YamlProvider[{}]'.format(id))
self.log.debug('__init__: id=%s, directory=%s, default_ttl=%d', id,
directory, default_ttl)
super(YamlProvider, self).__init__(id, *args, **kwargs)
self.directory = directory
self.default_ttl = default_ttl
def populate(self, zone, target=False):
self.log.debug('populate: zone=%s, target=%s', zone.name, target)
if target:
# When acting as a target we ignore any existing records so that we
# create a completely new copy
return
before = len(zone.records)
filename = join(self.directory, '{}yaml'.format(zone.name))
with open(filename, 'r') as fh:
yaml_data = safe_load(fh)
if yaml_data:
for name, data in yaml_data.items():
if not isinstance(data, list):
data = [data]
for d in data:
if 'ttl' not in d:
d['ttl'] = self.default_ttl
record = Record.new(zone, name, d, source=self)
zone.add_record(record)
self.log.info('populate: found %s records',
len(zone.records) - before)
def _apply(self, plan):
desired = plan.desired
changes = plan.changes
self.log.debug('_apply: zone=%s, len(changes)=%d', desired.name,
len(changes))
# Since we don't have existing we'll only see creates
records = [c.new for c in changes]
# Order things alphabetically (records sort that way
records.sort()
data = defaultdict(list)
for record in records:
d = record.data
d['type'] = record._type
if record.ttl == self.default_ttl:
# ttl is the default, we don't need to store it
del d['ttl']
data[record.name].append(d)
# Flatten single element lists
for k in data.keys():
if len(data[k]) == 1:
data[k] = data[k][0]
if not isdir(self.directory):
makedirs(self.directory)
filename = join(self.directory, '{}yaml'.format(desired.name))
self.log.debug('_apply: writing filename=%s', filename)
with open(filename, 'w') as fh:
safe_dump(dict(data), fh)

+ 549
- 0
octodns/record.py View File

@ -0,0 +1,549 @@
#
#
#
from __future__ import absolute_import, division, print_function, \
unicode_literals
from ipaddress import IPv4Address, IPv6Address
from logging import getLogger
import re
class Change(object):
def __init__(self, existing, new):
self.existing = existing
self.new = new
@property
def record(self):
'Returns new if we have one, existing otherwise'
return self.new or self.existing
class Create(Change):
def __init__(self, new):
super(Create, self).__init__(None, new)
def __repr__(self, leader=''):
source = self.new.source.id if self.new.source else ''
return 'Create {} ({})'.format(self.new, source)
class Update(Change):
# Leader is just to allow us to work around heven eating leading whitespace
# in our output. When we call this from the Manager.sync plan summary
# section we'll pass in a leader, otherwise we'll just let it default and
# do nothing
def __repr__(self, leader=''):
source = self.new.source.id if self.new.source else ''
return 'Update\n{leader} {existing} ->\n{leader} {new} ({src})' \
.format(existing=self.existing, new=self.new, leader=leader,
src=source)
class Delete(Change):
def __init__(self, existing):
super(Delete, self).__init__(existing, None)
def __repr__(self, leader=''):
return 'Delete {}'.format(self.existing)
_unescaped_semicolon_re = re.compile(r'\w;')
class Record(object):
log = getLogger('Record')
@classmethod
def new(cls, zone, name, data, source=None):
try:
_type = data['type']
except KeyError:
fqdn = '{}.{}'.format(name, zone.name) if name else zone.name
raise Exception('Invalid record {}, missing type'.format(fqdn))
try:
_type = {
'A': ARecord,
'AAAA': AaaaRecord,
# alias
# cert
'CNAME': CnameRecord,
# dhcid
# dname
# dnskey
# ds
# ipseckey
# key
# kx
# loc
'MX': MxRecord,
'NAPTR': NaptrRecord,
'NS': NsRecord,
# nsap
'PTR': PtrRecord,
# px
# rp
# soa - would it even make sense?
'SPF': SpfRecord,
'SRV': SrvRecord,
'SSHFP': SshfpRecord,
'TXT': TxtRecord,
# url
}[_type]
except KeyError:
raise Exception('Unknown record type: "{}"'.format(_type))
return _type(zone, name, data, source=source)
def __init__(self, zone, name, data, source=None):
self.log.debug('__init__: zone.name=%s, type=%11s, name=%s', zone.name,
self.__class__.__name__, name)
self.zone = zone
# force everything lower-case just to be safe
self.name = str(name).lower() if name else name
try:
self.ttl = int(data['ttl'])
except KeyError:
raise Exception('Invalid record {}, missing ttl'.format(self.fqdn))
self.source = source
def _data(self):
return {'ttl': self.ttl}
@property
def data(self):
return self._data()
@property
def fqdn(self):
if self.name:
return '{}.{}'.format(self.name, self.zone.name)
return self.zone.name
def changes(self, other, target):
# We're assuming we have the same name and type if we're being compared
if self.ttl != other.ttl:
return Update(self, other)
# NOTE: we're using __hash__ and __cmp__ methods that consider Records
# equivalent if they have the same name & _type. Values are ignored. This
# is usful when computing diffs/changes.
def __hash__(self):
return '{}:{}'.format(self.name, self._type).__hash__()
def __cmp__(self, other):
a = '{}:{}'.format(self.name, self._type)
b = '{}:{}'.format(other.name, other._type)
return cmp(a, b)
def __repr__(self):
# Make sure this is always overridden
raise NotImplementedError('Abstract base class, __repr__ required')
class GeoValue(object):
geo_re = re.compile(r'^(?P<continent_code>\w\w)(-(?P<country_code>\w\w)'
r'(-(?P<subdivision_code>\w\w))?)?$')
def __init__(self, geo, values):
match = self.geo_re.match(geo)
if not match:
raise Exception('Invalid geo "{}"'.format(geo))
self.code = geo
self.continent_code = match.group('continent_code')
self.country_code = match.group('country_code')
self.subdivision_code = match.group('subdivision_code')
self.values = values
@property
def parents(self):
bits = self.code.split('-')[:-1]
while bits:
yield '-'.join(bits)
bits.pop()
def __cmp__(self, other):
return 0 if (self.continent_code == other.continent_code and
self.country_code == other.country_code and
self.subdivision_code == other.subdivision_code and
self.values == other.values) else 1
def __repr__(self):
return "'Geo {} {} {} {}'".format(self.continent_code,
self.country_code,
self.subdivision_code, self.values)
class _ValuesMixin(object):
def __init__(self, zone, name, data, source=None):
super(_ValuesMixin, self).__init__(zone, name, data, source=source)
try:
self.values = sorted(self._process_values(data['values']))
except KeyError:
try:
self.values = self._process_values([data['value']])
except KeyError:
raise Exception('Invalid record {}, missing value(s)'
.format(self.fqdn))
def changes(self, other, target):
if self.values != other.values:
return Update(self, other)
return super(_ValuesMixin, self).changes(other, target)
def _data(self):
ret = super(_ValuesMixin, self)._data()
if len(self.values) > 1:
ret['values'] = [getattr(v, 'data', v) for v in self.values]
else:
v = self.values[0]
ret['value'] = getattr(v, 'data', v)
return ret
def __repr__(self):
return '<{} {} {}, {}, {}>'.format(self.__class__.__name__,
self._type, self.ttl,
self.fqdn, self.values)
class _GeoMixin(_ValuesMixin):
'''
Adds GeoDNS support to a record.
Must be included before `Record`.
'''
# TODO: move away from "data" hash to strict params, it's kind of leaking
# the yaml implementation into here and then forcing it back out into
# non-yaml providers during input
def __init__(self, zone, name, data, *args, **kwargs):
super(_GeoMixin, self).__init__(zone, name, data, *args, **kwargs)
try:
self.geo = dict(data['geo'])
except KeyError:
self.geo = {}
for k, vs in self.geo.items():
vs = sorted(self._process_values(vs))
self.geo[k] = GeoValue(k, vs)
def _data(self):
ret = super(_GeoMixin, self)._data()
if self.geo:
geo = {}
for code, value in self.geo.items():
geo[code] = value.values
ret['geo'] = geo
return ret
def changes(self, other, target):
if target.SUPPORTS_GEO:
if self.geo != other.geo:
return Update(self, other)
return super(_GeoMixin, self).changes(other, target)
def __repr__(self):
if self.geo:
return '<{} {} {}, {}, {}, {}>'.format(self.__class__.__name__,
self._type, self.ttl,
self.fqdn, self.values,
self.geo)
return super(_GeoMixin, self).__repr__()
class ARecord(_GeoMixin, Record):
_type = 'A'
def _process_values(self, values):
for ip in values:
try:
IPv4Address(unicode(ip))
except Exception:
raise Exception('Invalid record {}, value {} not a valid ip'
.format(self.fqdn, ip))
return values
class AaaaRecord(_GeoMixin, Record):
_type = 'AAAA'
def _process_values(self, values):
ret = []
for ip in values:
try:
IPv6Address(unicode(ip))
ret.append(ip.lower())
except Exception:
raise Exception('Invalid record {}, value {} not a valid ip'
.format(self.fqdn, ip))
return ret
class _ValueMixin(object):
def __init__(self, zone, name, data, source=None):
super(_ValueMixin, self).__init__(zone, name, data, source=source)
try:
self.value = self._process_value(data['value'])
except KeyError:
raise Exception('Invalid record {}, missing value'
.format(self.fqdn))
def changes(self, other, target):
if self.value != other.value:
return Update(self, other)
return super(_ValueMixin, self).changes(other, target)
def _data(self):
ret = super(_ValueMixin, self)._data()
ret['value'] = getattr(self.value, 'data', self.value)
return ret
def __repr__(self):
return '<{} {} {}, {}, {}>'.format(self.__class__.__name__,
self._type, self.ttl,
self.fqdn, self.value)
class CnameRecord(_ValueMixin, Record):
_type = 'CNAME'
def _process_value(self, value):
if not value.endswith('.'):
raise Exception('Invalid record {}, value {} missing trailing .'
.format(self.fqdn, value))
return value.lower()
class MxValue(object):
def __init__(self, value):
# TODO: rename preference
self.priority = int(value['priority'])
# TODO: rename to exchange?
self.value = value['value'].lower()
@property
def data(self):
return {
'priority': self.priority,
'value': self.value,
}
def __cmp__(self, other):
if self.priority == other.priority:
return cmp(self.value, other.value)
return cmp(self.priority, other.priority)
def __repr__(self):
return "'{} {}'".format(self.priority, self.value)
class MxRecord(_ValuesMixin, Record):
_type = 'MX'
def _process_values(self, values):
ret = []
for value in values:
try:
ret.append(MxValue(value))
except KeyError as e:
raise Exception('Invalid value in record {}, missing {}'
.format(self.fqdn, e.args[0]))
return ret
class NaptrValue(object):
def __init__(self, value):
self.order = int(value['order'])
self.preference = int(value['preference'])
self.flags = value['flags']
self.service = value['service']
self.regexp = value['regexp']
self.replacement = value['replacement']
@property
def data(self):
return {
'order': self.order,
'preference': self.preference,
'flags': self.flags,
'service': self.service,
'regexp': self.regexp,
'replacement': self.replacement,
}
def __cmp__(self, other):
if self.order != other.order:
return cmp(self.order, other.order)
elif self.preference != other.preference:
return cmp(self.preference, other.preference)
elif self.flags != other.flags:
return cmp(self.flags, other.flags)
elif self.service != other.service:
return cmp(self.service, other.service)
elif self.regexp != other.regexp:
return cmp(self.regexp, other.regexp)
return cmp(self.replacement, other.replacement)
def __repr__(self):
flags = self.flags if self.flags is not None else ''
service = self.service if self.service is not None else ''
regexp = self.regexp if self.regexp is not None else ''
return "'{} {} \"{}\" \"{}\" \"{}\" {}'" \
.format(self.order, self.preference, flags, service, regexp,
self.replacement)
class NaptrRecord(_ValuesMixin, Record):
_type = 'NAPTR'
def _process_values(self, values):
ret = []
for value in values:
try:
ret.append(NaptrValue(value))
except KeyError as e:
raise Exception('Invalid value in record {}, missing {}'
.format(self.fqdn, e.args[0]))
return ret
class NsRecord(_ValuesMixin, Record):
_type = 'NS'
def _process_values(self, values):
ret = []
for ns in values:
if not ns.endswith('.'):
raise Exception('Invalid record {}, value {} missing '
'trailing .'.format(self.fqdn, ns))
ret.append(ns.lower())
return ret
class PtrRecord(_ValueMixin, Record):
_type = 'PTR'
def _process_value(self, value):
if not value.endswith('.'):
raise Exception('Invalid record {}, value {} missing trailing .'
.format(self.fqdn, value))
return value.lower()
class SshfpValue(object):
def __init__(self, value):
self.algorithm = int(value['algorithm'])
self.fingerprint_type = int(value['fingerprint_type'])
self.fingerprint = value['fingerprint']
@property
def data(self):
return {
'algorithm': self.algorithm,
'fingerprint_type': self.fingerprint_type,
'fingerprint': self.fingerprint,
}
def __cmp__(self, other):
if self.algorithm != other.algorithm:
return cmp(self.algorithm, other.algorithm)
elif self.fingerprint_type != other.fingerprint_type:
return cmp(self.fingerprint_type, other.fingerprint_type)
return cmp(self.fingerprint, other.fingerprint)
def __repr__(self):
return "'{} {} {}'".format(self.algorithm, self.fingerprint_type,
self.fingerprint)
class SshfpRecord(_ValuesMixin, Record):
_type = 'SSHFP'
def _process_values(self, values):
ret = []
for value in values:
try:
ret.append(SshfpValue(value))
except KeyError as e:
raise Exception('Invalid value in record {}, missing {}'
.format(self.fqdn, e.args[0]))
return ret
class SpfRecord(_ValuesMixin, Record):
_type = 'SPF'
def _process_values(self, values):
return values
class SrvValue(object):
def __init__(self, value):
self.priority = int(value['priority'])
self.weight = int(value['weight'])
self.port = int(value['port'])
self.target = value['target'].lower()
@property
def data(self):
return {
'priority': self.priority,
'weight': self.weight,
'port': self.port,
'target': self.target,
}
def __cmp__(self, other):
if self.priority != other.priority:
return cmp(self.priority, other.priority)
elif self.weight != other.weight:
return cmp(self.weight, other.weight)
elif self.port != other.port:
return cmp(self.port, other.port)
return cmp(self.target, other.target)
def __repr__(self):
return "'{} {} {} {}'".format(self.priority, self.weight, self.port,
self.target)
class SrvRecord(_ValuesMixin, Record):
_type = 'SRV'
_name_re = re.compile(r'^_[^\.]+\.[^\.]+')
def __init__(self, zone, name, data, source=None):
if not self._name_re.match(name):
raise Exception('Invalid name {}.{}'.format(name, zone.name))
super(SrvRecord, self).__init__(zone, name, data, source)
def _process_values(self, values):
ret = []
for value in values:
try:
ret.append(SrvValue(value))
except KeyError as e:
raise Exception('Invalid value in record {}, missing {}'
.format(self.fqdn, e.args[0]))
return ret
class TxtRecord(_ValuesMixin, Record):
_type = 'TXT'
def _process_values(self, values):
for value in values:
if _unescaped_semicolon_re.search(value):
raise Exception('Invalid record {}, unescaped ;'
.format(self.fqdn))
return values

+ 6
- 0
octodns/source/__init__.py View File

@ -0,0 +1,6 @@
#
#
#
from __future__ import absolute_import, division, print_function, \
unicode_literals

+ 33
- 0
octodns/source/base.py View File

@ -0,0 +1,33 @@
#
#
#
from __future__ import absolute_import, division, print_function, \
unicode_literals
class BaseSource(object):
def __init__(self, id):
self.id = id
if not getattr(self, 'log', False):
raise NotImplementedError('Abstract base class, log property '
'missing')
if not hasattr(self, 'SUPPORTS_GEO'):
raise NotImplementedError('Abstract base class, SUPPORTS_GEO '
'property missing')
def populate(self, zone, target=False):
'''
Loads all zones the provider knows about
'''
raise NotImplementedError('Abstract base class, populate method '
'missing')
def supports(self, record):
# Unless overriden and handled appropriaitely we'll assume that all
# record types are supported
return True
def __repr__(self):
return self.__class__.__name__

+ 208
- 0
octodns/source/tinydns.py View File

@ -0,0 +1,208 @@
#
#
#
from __future__ import absolute_import, division, print_function, \
unicode_literals
from collections import defaultdict
from ipaddress import ip_address
from os import listdir
from os.path import join
import logging
import re
from ..record import Record
from ..zone import DuplicateRecordException, SubzoneRecordException
from .base import BaseSource
class TinyDnsSource(BaseSource):
SUPPORTS_GEO = False
split_re = re.compile(r':+')
def __init__(self, id, default_ttl=3600):
super(TinyDnsSource, self).__init__(id)
self.default_ttl = default_ttl
def _data_for_A(self, _type, records):
values = []
for record in records:
if record[0] != '0.0.0.0':
values.append(record[0])
if len(values) == 0:
return
try:
ttl = records[0][1]
except IndexError:
ttl = self.default_ttl
return {
'ttl': ttl,
'type': _type,
'values': values,
}
def _data_for_CNAME(self, _type, records):
first = records[0]
try:
ttl = first[1]
except IndexError:
ttl = self.default_ttl
return {
'ttl': ttl,
'type': _type,
'value': '{}.'.format(first[0])
}
def _data_for_MX(self, _type, records):
try:
ttl = records[0][2]
except IndexError:
ttl = self.default_ttl
return {
'ttl': ttl,
'type': _type,
'values': [{
'priority': r[1],
'value': '{}.'.format(r[0])
} for r in records]
}
def _data_for_NS(self, _type, records):
try:
ttl = records[0][1]
except IndexError:
ttl = self.default_ttl
return {
'ttl': ttl,
'type': _type,
'values': ['{}.'.format(r[0]) for r in records]
}
def populate(self, zone, target=False):
self.log.debug('populate: zone=%s', zone.name)
before = len(zone.records)
if zone.name.endswith('in-addr.arpa.'):
self._populate_in_addr_arpa(zone)
else:
self._populate_normal(zone)
self.log.info('populate: found %s records',
len(zone.records) - before)
def _populate_normal(self, zone):
type_map = {
'=': 'A',
'^': None,
'.': 'NS',
'C': 'CNAME',
'+': 'A',
'@': 'MX',
}
name_re = re.compile('((?P<name>.+)\.)?{}$'.format(zone.name[:-1]))
data = defaultdict(lambda: defaultdict(list))
for line in self._lines():
_type = line[0]
if _type not in type_map:
# Something we don't care about
continue
_type = type_map[_type]
if not _type:
continue
# Skip type, remove trailing comments, and omit newline
line = line[1:].split('#', 1)[0]
# Split on :'s including :: and strip leading/trailing ws
line = [p.strip() for p in self.split_re.split(line)]
match = name_re.match(line[0])
if not match:
continue
name = zone.hostname_from_fqdn(line[0])
data[name][_type].append(line[1:])
for name, types in data.items():
for _type, d in types.items():
data_for = getattr(self, '_data_for_{}'.format(_type))
data = data_for(_type, d)
if data:
record = Record.new(zone, name, data, source=self)
try:
zone.add_record(record)
except SubzoneRecordException:
self.log.debug('_populate_normal: skipping subzone '
'record=%s', record)
def _populate_in_addr_arpa(self, zone):
name_re = re.compile('(?P<name>.+)\.{}$'.format(zone.name[:-1]))
for line in self._lines():
_type = line[0]
# We're only interested in = (A+PTR), and ^ (PTR) records
if _type not in ('=', '^'):
continue
# Skip type, remove trailing comments, and omit newline
line = line[1:].split('#', 1)[0]
# Split on :'s including :: and strip leading/trailing ws
line = [p.strip() for p in self.split_re.split(line)]
if line[0].endswith('in-addr.arpa'):
# since it's already in in-addr.arpa format
match = name_re.match(line[0])
value = '{}.'.format(line[1])
else:
addr = ip_address(line[1])
match = name_re.match(addr.reverse_pointer)
value = '{}.'.format(line[0])
if match:
try:
ttl = line[2]
except IndexError:
ttl = self.default_ttl
name = match.group('name')
record = Record.new(zone, name, {
'ttl': ttl,
'type': 'PTR',
'value': value
}, source=self)
try:
zone.add_record(record)
except DuplicateRecordException:
self.log.warn('Duplicate PTR record for {}, '
'skipping'.format(addr))
class TinyDnsFileSource(TinyDnsSource):
'''
A basic TinyDNS zonefile importer created to import legacy data.
NOTE: timestamps & lo fields are ignored if present.
'''
def __init__(self, id, directory, default_ttl=3600):
self.log = logging.getLogger('TinyDnsFileSource[{}]'.format(id))
self.log.debug('__init__: id=%s, directory=%s, default_ttl=%d', id,
directory, default_ttl)
super(TinyDnsFileSource, self).__init__(id, default_ttl)
self.directory = directory
self._cache = None
def _lines(self):
if self._cache is None:
# We unfortunately don't know where to look since tinydns stuff can
# be defined anywhere so we'll just read all files
lines = []
for filename in listdir(self.directory):
if filename[0] == '.':
# Ignore hidden files
continue
with open(join(self.directory, filename), 'r') as fh:
lines += filter(lambda l: l, fh.read().split('\n'))
self._cache = lines
return self._cache

+ 79
- 0
octodns/yaml.py View File

@ -0,0 +1,79 @@
#
#
#
from __future__ import absolute_import, division, print_function, \
unicode_literals
from yaml import SafeDumper, SafeLoader, load, dump
from yaml.constructor import ConstructorError
import re
# zero-padded sort, simplified version of
# https://www.xormedia.com/natural-sort-order-with-zero-padding/
_pad_re = re.compile('\d+')
def _zero_pad(match):
return '{:04d}'.format(int(match.group(0)))
def _zero_padded_numbers(s):
try:
int(s)
except ValueError:
return _pad_re.sub(lambda d: _zero_pad(d), s)
# Found http://stackoverflow.com/a/21912744 which guided me on how to hook in
# here
class SortEnforcingLoader(SafeLoader):
def __init__(self, *args, **kwargs):
super(SortEnforcingLoader, self).__init__(*args, **kwargs)
self.add_constructor(self.DEFAULT_MAPPING_TAG, self._construct)
def _construct(self, _, node):
self.flatten_mapping(node)
ret = self.construct_pairs(node)
keys = [d[0] for d in ret]
if keys != sorted(keys, key=_zero_padded_numbers):
raise ConstructorError(None, None, "keys out of order: {}"
.format(', '.join(keys)), node.start_mark)
return dict(ret)
def safe_load(stream, enforce_order=True):
return load(stream, SortEnforcingLoader if enforce_order else SafeLoader)
class SortingDumper(SafeDumper):
'''
This sorts keys alphanumerically in a "natural" manner where things with
the number 2 come before the number 12.
See https://www.xormedia.com/natural-sort-order-with-zero-padding/ for
more info
'''
def __init__(self, *args, **kwargs):
super(SortingDumper, self).__init__(*args, **kwargs)
self.add_representer(dict, self._representer)
def _representer(self, _, data):
data = data.items()
data.sort(key=lambda d: _zero_padded_numbers(d[0]))
return self.represent_mapping(self.DEFAULT_MAPPING_TAG, data)
def safe_dump(data, fh, **options):
kwargs = {
'canonical': False,
'indent': 2,
'default_style': '',
'default_flow_style': False,
'explicit_start': True
}
kwargs.update(options)
dump(data, fh, SortingDumper, **kwargs)

+ 117
- 0
octodns/zone.py View File

@ -0,0 +1,117 @@
#
#
#
from __future__ import absolute_import, division, print_function, \
unicode_literals
from logging import getLogger
import re
from .record import Create, Delete
class SubzoneRecordException(Exception):
pass
class DuplicateRecordException(Exception):
pass
def _is_eligible(record):
# Should this record be considered when computing changes
# We ignore all top-level NS records
return record._type != 'NS' or record.name != ''
class Zone(object):
log = getLogger('Zone')
def __init__(self, name, sub_zones):
if not name[-1] == '.':
raise Exception('Invalid zone name {}, missing ending dot'
.format(name))
# Force everyting to lowercase just to be safe
self.name = str(name).lower() if name else name
self.sub_zones = sub_zones
self.records = set()
# optional leading . to match empty hostname
# optional trailing . b/c some sources don't have it on their fqdn
self._name_re = re.compile('\.?{}?$'.format(name))
self.log.debug('__init__: zone=%s, sub_zones=%s', self, sub_zones)
def hostname_from_fqdn(self, fqdn):
return self._name_re.sub('', fqdn)
def add_record(self, record):
name = record.name
last = name.split('.')[-1]
if last in self.sub_zones:
if name != last:
# it's a record for something under a sub-zone
raise SubzoneRecordException('Record {} is under a '
'managed subzone'
.format(record.fqdn))
elif record._type != 'NS':
# It's a non NS record for exactly a sub-zone
raise SubzoneRecordException('Record {} a managed sub-zone '
'and not of type NS'
.format(record.fqdn))
if record in self.records:
raise DuplicateRecordException('Duplicate record {}, type {}'
.format(record.fqdn, record._type))
self.records.add(record)
def changes(self, desired, target):
self.log.debug('changes: zone=%s, target=%s', self, target)
# Build up a hash of the desired records, thanks to our special
# __hash__ and __cmp__ on Record we'll be able to look up records that
# match name and _type with it
desired_records = {r: r for r in desired.records}
changes = []
# Find diffs & removes
for record in filter(_is_eligible, self.records):
try:
desired_record = desired_records[record]
except KeyError:
if not target.supports(record):
self.log.debug('changes: skipping record=%s %s - %s does '
'not support it', record.fqdn, record._type,
target.id)
continue
# record has been removed
self.log.debug('changes: zone=%s, removed record=%s', self,
record)
changes.append(Delete(record))
else:
change = record.changes(desired_record, target)
if change:
self.log.debug('changes: zone=%s, modified\n'
' existing=%s,\n desired=%s', self,
record, desired_record)
changes.append(change)
else:
self.log.debug('changes: zone=%s, n.c. record=%s', self,
record)
# Find additions, things that are in desired, but missing in ourselves.
# This uses set math and our special __hash__ and __cmp__ functions as
# well
for record in filter(_is_eligible, desired.records - self.records):
if not target.supports(record):
self.log.debug('changes: skipping record=%s %s - %s does not '
'support it', record.fqdn, record._type,
target.id)
continue
self.log.debug('changes: zone=%s, create record=%s', self, record)
changes.append(Create(record))
return changes
def __repr__(self):
return 'Zone<{}>'.format(self.name)

+ 6
- 0
requirements-dev.txt View File

@ -0,0 +1,6 @@
coverage
mock
nose
pep8
pyflakes
requests_mock

+ 17
- 0
requirements.txt View File

@ -0,0 +1,17 @@
# These are known good versions. You're free to use others and things will
# likely work, but no promises are made, especilly if you go older.
PyYaml==3.12
boto3==1.4.4
botocore==1.5.4
dnspython==1.15.0
docutils==0.13.1
dyn==1.7.10
futures==3.0.5
incf.countryutils==1.0
ipaddress==1.0.18
jmespath==0.9.0
python-dateutil==2.6.0
requests==2.13.0
s3transfer==0.1.10
six==1.10.0
yamllint==1.6.0

+ 35
- 0
script/bootstrap View File

@ -0,0 +1,35 @@
#!/bin/bash
# Usage: script/bootstrap
# Ensures all dependencies are installed locally.
set -e
cd "$(dirname $0)"/..
ROOT=$(pwd)
if [ -z "$VENV_NAME" ]; then
VENV_NAME="env"
fi
if [ ! -d "$VENV_NAME" ]; then
if [ -z "$VENV_PYTHON" ]; then
VENV_PYTHON=`which python`
fi
virtualenv --python=$VENV_PYTHON $VENV_NAME
fi
. "$VENV_NAME/bin/activate"
pip install -U -r requirements.txt
if [ "$ENV" != "production" ]; then
pip install -U -r requirements-dev.txt
fi
if [ ! -L ".git/hooks/pre-commit" ]; then
ln -s "$ROOT/.git_hooks_pre-commit" ".git/hooks/pre-commit"
fi
echo ""
echo "Run source env/bin/activate to get your shell in to the virtualenv"
echo "See README.md for more information."
echo ""

+ 30
- 0
script/cibuild View File

@ -0,0 +1,30 @@
#!/bin/sh
set -e
cd "$(dirname "$0")/.."
echo "## bootstrap ###################################################################"
script/bootstrap
echo "## environment & versions ######################################################"
python --version
pip --version
VVER=$(virtualenv --version)
echo "virtualenv $VVER"
if [ -z "$VENV_NAME" ]; then
VENV_NAME="env"
fi
. "$VENV_NAME/bin/activate"
echo "## clean up ####################################################################"
find octodns tests -name "*.pyc" -exec rm {} \;
rm -f *.pyc
echo "## begin #######################################################################"
# For now it's just lint...
echo "## lint ########################################################################"
script/lint
echo "## tests/coverage ##############################################################"
script/coverage
echo "## complete ####################################################################"

+ 30
- 0
script/coverage View File

@ -0,0 +1,30 @@
#!/bin/sh
set -e
cd "$(dirname "$0")/.."
if [ -z "$VENV_NAME" ]; then
VENV_NAME="env"
fi
ACTIVATE="$VENV_NAME/bin/activate"
if [ ! -f "$ACTIVATE" ]; then
echo "$ACTIVATE does not exist, run ./script/bootstrap" >&2
exit 1
fi
. "$ACTIVATE"
# Just to be sure/safe
export AWS_ACCESS_KEY_ID=
export AWS_SECRET_ACCESS_KEY=
export CLOUDFLARE_EMAIL=
export CLOUDFLARE_TOKEN=
export DNSIMPLE_ACCOUNT=
export DNSIMPLE_TOKEN=
export DYN_CUSTOMER=
export DYN_PASSWORD=
export DYN_USERNAME=
coverage run --branch --source=octodns `which nosetests` --with-xunit "$@"
coverage html
coverage xml

+ 21
- 0
script/lint View File

@ -0,0 +1,21 @@
#!/bin/sh
set -e
cd "$(dirname "$0")/.."
ROOT=$(pwd)
if [ -z "$VENV_NAME" ]; then
VENV_NAME="env"
fi
ACTIVATE="$VENV_NAME/bin/activate"
if [ ! -f "$ACTIVATE" ]; then
echo "$ACTIVATE does not exist, run ./script/bootstrap" >&2
exit 1
fi
. "$ACTIVATE"
SOURCES="*.py octodns/*.py octodns/*/*.py tests/*.py"
pep8 --ignore=E221,E241,E251 $SOURCES
pyflakes $SOURCES

+ 15
- 0
script/sdist View File

@ -0,0 +1,15 @@
#!/bin/bash
set -e
if ! git diff-index --quiet HEAD --; then
echo "Changes in local directory, commit or clear"
exit 1
fi
SHA=$(git rev-parse HEAD)
python setup.py sdist
TARBALL=dist/octodns-$SHA.tar.gz
mv dist/octodns-0.*.tar.gz $TARBALL
echo "Created $TARBALL"

+ 28
- 0
script/test View File

@ -0,0 +1,28 @@
#!/bin/sh
set -e
cd "$(dirname "$0")/.."
if [ -z "$VENV_NAME" ]; then
VENV_NAME="env"
fi
ACTIVATE="$VENV_NAME/bin/activate"
if [ ! -f "$ACTIVATE" ]; then
echo "$ACTIVATE does not exist, run ./script/bootstrap" >&2
exit 1
fi
. "$ACTIVATE"
# Just to be sure/safe
export AWS_ACCESS_KEY_ID=
export AWS_SECRET_ACCESS_KEY=
export CLOUDFLARE_EMAIL=
export CLOUDFLARE_TOKEN=
export DNSIMPLE_ACCOUNT=
export DNSIMPLE_TOKEN=
export DYN_CUSTOMER=
export DYN_PASSWORD=
export DYN_USERNAME=
nosetests "$@"

+ 46
- 0
setup.py View File

@ -0,0 +1,46 @@
#!/usr/bin/env python
from os.path import dirname, join
import octodns
try:
from setuptools import find_packages, setup
except ImportError:
from distutils.core import find_packages, setup
cmds = (
'compare',
'dump',
'report',
'sync',
'validate'
)
cmds_dir = join(dirname(__file__), 'octodns', 'cmds')
console_scripts = {
'octodns-{name} = octodns.cmds.{name}'.format(name=filename[:-3])
for filename in cmds
}
setup(
author='Ross McFarland',
author_email='rwmcfa1@gmail.com',
description=octodns.__doc__,
entry_points={
'console_scripts': console_scripts,
},
install_requires=[
'PyYaml>=3.12',
'dnspython>=1.15.0',
'incf.countryutils>=1.0',
'ipaddress>=1.0.18',
'python-dateutil>=2.6.0',
'requests>=2.13.0',
'yamllint>=1.6.0'
],
license='MIT',
long_description=open('README.md').read(),
name='octodns',
packages=find_packages(),
url='https://github.com/github/octodns',
version=octodns.__VERSION__,
)

+ 4
- 0
tests/config/bad-provider-class-module.yaml View File

@ -0,0 +1,4 @@
providers:
dne:
class: octodns.provider.yaml.DoesntExistProvider
zones: {}

+ 4
- 0
tests/config/bad-provider-class-no-module.yaml View File

@ -0,0 +1,4 @@
providers:
dne:
class: DoesntExistProvider
zones: {}

+ 4
- 0
tests/config/bad-provider-class.yaml View File

@ -0,0 +1,4 @@
providers:
dne:
class: foo.bar.DoesntExistProvider
zones: {}

+ 1
- 0
tests/config/empty.yaml View File

@ -0,0 +1 @@
---

+ 3
- 0
tests/config/missing-provider-class.yaml View File

@ -0,0 +1,3 @@
providers:
yaml: {}
zones: {}

+ 4
- 0
tests/config/missing-provider-config.yaml View File

@ -0,0 +1,4 @@
providers:
yaml:
class: octodns.provider.yaml.YamlProvider
zones: {}

+ 6
- 0
tests/config/missing-provider-env.yaml View File

@ -0,0 +1,6 @@
providers:
yaml:
class: octodns.provider.yaml.YamlProvider
int: 42
directory: env/DOES_NOT_EXIST
zones: {}

+ 3
- 0
tests/config/missing-sources.yaml View File

@ -0,0 +1,3 @@
providers: {}
zones:
missing.sources.: {}

+ 13
- 0
tests/config/no-dump.yaml View File

@ -0,0 +1,13 @@
providers:
in:
class: octodns.provider.yaml.YamlProvider
directory: tests/config
out:
class: octodns.provider.yaml.YamlProvider
directory: /tmp/foo
zones:
unit.tests.:
sources:
- in
targets:
- out

+ 13
- 0
tests/config/simple-validate.yaml View File

@ -0,0 +1,13 @@
providers:
in:
class: octodns.provider.yaml.YamlProvider
directory: tests/config
simple:
class: helpers.SimpleProvider
zones:
unit.tests.:
sources:
- in
- simple
targets:
- dump

+ 35
- 0
tests/config/simple.yaml View File

@ -0,0 +1,35 @@
providers:
in:
class: octodns.provider.yaml.YamlProvider
directory: tests/config
dump:
class: octodns.provider.yaml.YamlProvider
directory: env/YAML_TMP_DIR
# This is sort of ugly, but it shouldn't hurt anything. It'll just write out
# the target file twice where it and dump are both used
dump2:
class: octodns.provider.yaml.YamlProvider
directory: env/YAML_TMP_DIR
simple:
class: helpers.SimpleProvider
geo:
class: helpers.GeoProvider
nosshfp:
class: helpers.NoSshFpProvider
zones:
unit.tests.:
sources:
- in
targets:
- dump
subzone.unit.tests.:
sources:
- in
targets:
- dump
- dump2
empty.:
sources:
- in
targets:
- dump

+ 10
- 0
tests/config/subzone.unit.tests.yaml View File

@ -0,0 +1,10 @@
---
2:
type: A
value: 2.4.4.4
12:
type: A
value: 12.4.4.4
test:
type: A
value: 4.4.4.4

+ 108
- 0
tests/config/unit.tests.yaml View File

@ -0,0 +1,108 @@
---
? ''
: - geo:
AF:
- 2.2.3.4
- 2.2.3.5
AS-JP:
- 3.2.3.4
- 3.2.3.5
NA-US:
- 4.2.3.4
- 4.2.3.5
NA-US-CA:
- 5.2.3.4
- 5.2.3.5
ttl: 300
type: A
values:
- 1.2.3.4
- 1.2.3.5
- ttl: 3600
type: SSHFP
values:
- algorithm: 1
fingerprint: bf6b6825d2977c511a475bbefb88aad54a92ac73
fingerprint_type: 1
- algorithm: 1
fingerprint: 7491973e5f8b39d5327cd4e08bc81b05f7710b49
fingerprint_type: 1
- type: NS
values:
- 6.2.3.4.
- 7.2.3.4.
_srv._tcp:
ttl: 600
type: SRV
values:
- port: 30
priority: 12
target: foo-2.unit.tests.
weight: 20
- port: 30
priority: 10
target: foo-1.unit.tests.
weight: 20
aaaa:
ttl: 600
type: AAAA
value: 2601:644:500:e210:62f8:1dff:feb8:947a
cname:
ttl: 300
type: CNAME
value: unit.tests.
mx:
ttl: 300
type: MX
values:
- priority: 40
value: smtp-1.unit.tests.
- priority: 20
value: smtp-2.unit.tests.
- priority: 30
value: smtp-3.unit.tests.
- priority: 10
value: smtp-4.unit.tests.
naptr:
ttl: 600
type: NAPTR
values:
- flags: U
order: 100
preference: 100
regexp: '!^.*$!sip:info@bar.example.com!'
replacement: .
service: SIP+D2U
- flags: S
order: 10
preference: 100
regexp: '!^.*$!sip:info@bar.example.com!'
replacement: .
service: SIP+D2U
ptr:
ttl: 300
type: PTR
value: foo.bar.com.
spf:
ttl: 600
type: SPF
value: v=spf1 ip4:192.168.0.1/16-all
sub:
type: 'NS'
values:
- 6.2.3.4.
- 7.2.3.4.
txt:
ttl: 600
type: TXT
values:
- Bah bah black sheep
- have you any wool.
www:
ttl: 300
type: A
value: 2.2.3.6
www.sub:
ttl: 300
type: A
value: 2.2.3.6

+ 28
- 0
tests/config/unknown-provider.yaml View File

@ -0,0 +1,28 @@
providers:
yaml:
class: octodns.provider.yaml.YamlProvider
directory: ./config
simple_source:
class: helpers.SimpleSource
zones:
missing.sources.:
targets:
- yaml
missing.targets.:
sources:
- yaml
unknown.source.:
sources:
- not-there
targets:
- yaml
unknown.target.:
sources:
- yaml
targets:
- not-there-either
not.targetable.:
sources:
- yaml
targets:
- simple_source

+ 8
- 0
tests/config/unordered.yaml View File

@ -0,0 +1,8 @@
---
abc:
type: A
value: 9.9.9.9
xyz:
# t comes before v
value: 9.9.9.9
type: A

+ 188
- 0
tests/fixtures/cloudflare-dns_records-page-1.json View File

@ -0,0 +1,188 @@
{
"result": [
{
"id": "fc12ab34cd5611334422ab3322997650",
"type": "A",
"name": "unit.tests",
"content": "1.2.3.4",
"proxiable": true,
"proxied": false,
"ttl": 300,
"locked": false,
"zone_id": "ff12ab34cd5611334422ab3322997650",
"zone_name": "unit.tests",
"modified_on": "2017-03-11T18:01:43.054409Z",
"created_on": "2017-03-11T18:01:43.054409Z",
"meta": {
"auto_added": false
}
},
{
"id": "fc12ab34cd5611334422ab3322997651",
"type": "A",
"name": "unit.tests",
"content": "1.2.3.5",
"proxiable": true,
"proxied": false,
"ttl": 300,
"locked": false,
"zone_id": "ff12ab34cd5611334422ab3322997650",
"zone_name": "unit.tests",
"modified_on": "2017-03-11T18:01:43.160148Z",
"created_on": "2017-03-11T18:01:43.160148Z",
"meta": {
"auto_added": false
}
},
{
"id": "fc12ab34cd5611334422ab3322997653",
"type": "A",
"name": "www.unit.tests",
"content": "2.2.3.6",
"proxiable": true,
"proxied": false,
"ttl": 300,
"locked": false,
"zone_id": "ff12ab34cd5611334422ab3322997650",
"zone_name": "unit.tests",
"modified_on": "2017-03-11T18:01:43.420689Z",
"created_on": "2017-03-11T18:01:43.420689Z",
"meta": {
"auto_added": false
}
},
{
"id": "fc12ab34cd5611334422ab3322997654",
"type": "A",
"name": "www.sub.unit.tests",
"content": "2.2.3.6",
"proxiable": true,
"proxied": false,
"ttl": 300,
"locked": false,
"zone_id": "ff12ab34cd5611334422ab3322997650",
"zone_name": "unit.tests",
"modified_on": "2017-03-11T18:01:44.030044Z",
"created_on": "2017-03-11T18:01:44.030044Z",
"meta": {
"auto_added": false
}
},
{
"id": "fc12ab34cd5611334422ab3322997655",
"type": "AAAA",
"name": "aaaa.unit.tests",
"content": "2601:644:500:e210:62f8:1dff:feb8:947a",
"proxiable": true,
"proxied": false,
"ttl": 600,
"locked": false,
"zone_id": "ff12ab34cd5611334422ab3322997650",
"zone_name": "unit.tests",
"modified_on": "2017-03-11T18:01:43.843594Z",
"created_on": "2017-03-11T18:01:43.843594Z",
"meta": {
"auto_added": false
}
},
{
"id": "fc12ab34cd5611334422ab3322997656",
"type": "CNAME",
"name": "cname.unit.tests",
"content": "unit.tests",
"proxiable": true,
"proxied": false,
"ttl": 300,
"locked": false,
"zone_id": "ff12ab34cd5611334422ab3322997650",
"zone_name": "unit.tests",
"modified_on": "2017-03-11T18:01:43.940682Z",
"created_on": "2017-03-11T18:01:43.940682Z",
"meta": {
"auto_added": false
}
},
{
"id": "fc12ab34cd5611334422ab3322997657",
"type": "MX",
"name": "mx.unit.tests",
"content": "smtp-1.unit.tests",
"proxiable": false,
"proxied": false,
"ttl": 300,
"priority": 40,
"locked": false,
"zone_id": "ff12ab34cd5611334422ab3322997650",
"zone_name": "unit.tests",
"modified_on": "2017-03-11T18:01:43.764273Z",
"created_on": "2017-03-11T18:01:43.764273Z",
"meta": {
"auto_added": false
}
},
{
"id": "fc12ab34cd5611334422ab3322997658",
"type": "MX",
"name": "mx.unit.tests",
"content": "smtp-2.unit.tests",
"proxiable": false,
"proxied": false,
"ttl": 300,
"priority": 20,
"locked": false,
"zone_id": "ff12ab34cd5611334422ab3322997650",
"zone_name": "unit.tests",
"modified_on": "2017-03-11T18:01:43.586007Z",
"created_on": "2017-03-11T18:01:43.586007Z",
"meta": {
"auto_added": false
}
},
{
"id": "fc12ab34cd5611334422ab3322997659",
"type": "MX",
"name": "mx.unit.tests",
"content": "smtp-3.unit.tests",
"proxiable": false,
"proxied": false,
"ttl": 300,
"priority": 30,
"locked": false,
"zone_id": "ff12ab34cd5611334422ab3322997650",
"zone_name": "unit.tests",
"modified_on": "2017-03-11T18:01:43.670592Z",
"created_on": "2017-03-11T18:01:43.670592Z",
"meta": {
"auto_added": false
}
},
{
"id": "fc12ab34cd5611334422ab3322997660",
"type": "MX",
"name": "mx.unit.tests",
"content": "smtp-4.unit.tests",
"proxiable": false,
"proxied": false,
"ttl": 300,
"priority": 10,
"locked": false,
"zone_id": "ff12ab34cd5611334422ab3322997650",
"zone_name": "unit.tests",
"modified_on": "2017-03-11T18:01:43.505671Z",
"created_on": "2017-03-11T18:01:43.505671Z",
"meta": {
"auto_added": false
}
}
],
"result_info": {
"page": 1,
"per_page": 10,
"total_pages": 2,
"count": 10,
"total_count": 16
},
"success": true,
"errors": [],
"messages": []
}

+ 116
- 0
tests/fixtures/cloudflare-dns_records-page-2.json View File

@ -0,0 +1,116 @@
{
"result": [
{
"id": "fc12ab34cd5611334422ab3322997661",
"type": "NS",
"name": "under.unit.tests",
"content": "ns1.unit.tests",
"proxiable": false,
"proxied": false,
"ttl": 3600,
"locked": false,
"zone_id": "ff12ab34cd5611334422ab3322997650",
"zone_name": "unit.tests",
"modified_on": "2017-03-11T18:01:42.599878Z",
"created_on": "2017-03-11T18:01:42.599878Z",
"meta": {
"auto_added": false
}
},
{
"id": "fc12ab34cd5611334422ab3322997662",
"type": "NS",
"name": "under.unit.tests",
"content": "ns2.unit.tests",
"proxiable": false,
"proxied": false,
"ttl": 3600,
"locked": false,
"zone_id": "ff12ab34cd5611334422ab3322997650",
"zone_name": "unit.tests",
"modified_on": "2017-03-11T18:01:42.727011Z",
"created_on": "2017-03-11T18:01:42.727011Z",
"meta": {
"auto_added": false
}
},
{
"id": "fc12ab34cd5611334422ab3322997663",
"type": "SPF",
"name": "spf.unit.tests",
"content": "v=spf1 ip4:192.168.0.1/16-all",
"proxiable": false,
"proxied": false,
"ttl": 600,
"locked": false,
"zone_id": "ff12ab34cd5611334422ab3322997650",
"zone_name": "unit.tests",
"modified_on": "2017-03-11T18:01:44.112568Z",
"created_on": "2017-03-11T18:01:44.112568Z",
"meta": {
"auto_added": false
}
},
{
"id": "fc12ab34cd5611334422ab3322997664",
"type": "TXT",
"name": "txt.unit.tests",
"content": "Bah bah black sheep",
"proxiable": false,
"proxied": false,
"ttl": 600,
"locked": false,
"zone_id": "ff12ab34cd5611334422ab3322997650",
"zone_name": "unit.tests",
"modified_on": "2017-03-11T18:01:42.837282Z",
"created_on": "2017-03-11T18:01:42.837282Z",
"meta": {
"auto_added": false
}
},
{
"id": "fc12ab34cd5611334422ab3322997665",
"type": "TXT",
"name": "txt.unit.tests",
"content": "have you any wool.",
"proxiable": false,
"proxied": false,
"ttl": 600,
"locked": false,
"zone_id": "ff12ab34cd5611334422ab3322997650",
"zone_name": "unit.tests",
"modified_on": "2017-03-11T18:01:42.961566Z",
"created_on": "2017-03-11T18:01:42.961566Z",
"meta": {
"auto_added": false
}
},
{
"id": "fc12ab34cd5611334422ab3322997666",
"type": "SOA",
"name": "unit.tests",
"content": "ignored",
"proxiable": false,
"proxied": false,
"ttl": 600,
"locked": false,
"zone_id": "ff12ab34cd5611334422ab3322997650",
"zone_name": "unit.tests",
"modified_on": "2017-03-11T18:01:42.961566Z",
"created_on": "2017-03-11T18:01:42.961566Z",
"meta": {
"auto_added": false
}
}
],
"result_info": {
"page": 2,
"per_page": 10,
"total_pages": 2,
"count": 6,
"total_count": 16
},
"success": true,
"errors": [],
"messages": []
}

+ 140
- 0
tests/fixtures/cloudflare-zones-page-1.json View File

@ -0,0 +1,140 @@
{
"result": [
{
"id": "234234243423aaabb334342aaa343433",
"name": "github.com",
"status": "pending",
"paused": false,
"type": "full",
"development_mode": 0,
"name_servers": [
"alice.ns.cloudflare.com",
"tom.ns.cloudflare.com"
],
"original_name_servers": [],
"original_registrar": null,
"original_dnshost": null,
"modified_on": "2017-02-20T03:57:03.753292Z",
"created_on": "2017-02-20T03:53:59.274170Z",
"meta": {
"step": 4,
"wildcard_proxiable": false,
"custom_certificate_quota": 0,
"page_rule_quota": 3,
"phishing_detected": false,
"multiple_railguns_allowed": false
},
"owner": {
"type": "user",
"id": "334234243423aaabb334342aaa343433",
"email": "noreply@github.com"
},
"permissions": [
"#analytics:read",
"#billing:edit",
"#billing:read",
"#cache_purge:edit",
"#dns_records:edit",
"#dns_records:read",
"#lb:edit",
"#lb:read",
"#logs:read",
"#organization:edit",
"#organization:read",
"#ssl:edit",
"#ssl:read",
"#waf:edit",
"#waf:read",
"#zone:edit",
"#zone:read",
"#zone_settings:edit",
"#zone_settings:read"
],
"plan": {
"id": "0feeeeeeeeeeeeeeeeeeeeeeeeeeeeee",
"name": "Free Website",
"price": 0,
"currency": "USD",
"frequency": "",
"is_subscribed": true,
"can_subscribe": false,
"legacy_id": "free",
"legacy_discount": false,
"externally_managed": false
}
},
{
"id": "234234243423aaabb334342aaa343434",
"name": "github.io",
"status": "pending",
"paused": false,
"type": "full",
"development_mode": 0,
"name_servers": [
"alice.ns.cloudflare.com",
"tom.ns.cloudflare.com"
],
"original_name_servers": [],
"original_registrar": null,
"original_dnshost": null,
"modified_on": "2017-02-20T04:12:00.732827Z",
"created_on": "2017-02-20T04:11:58.250696Z",
"meta": {
"step": 4,
"wildcard_proxiable": false,
"custom_certificate_quota": 0,
"page_rule_quota": 3,
"phishing_detected": false,
"multiple_railguns_allowed": false
},
"owner": {
"type": "user",
"id": "334234243423aaabb334342aaa343433",
"email": "noreply@github.com"
},
"permissions": [
"#analytics:read",
"#billing:edit",
"#billing:read",
"#cache_purge:edit",
"#dns_records:edit",
"#dns_records:read",
"#lb:edit",
"#lb:read",
"#logs:read",
"#organization:edit",
"#organization:read",
"#ssl:edit",
"#ssl:read",
"#waf:edit",
"#waf:read",
"#zone:edit",
"#zone:read",
"#zone_settings:edit",
"#zone_settings:read"
],
"plan": {
"id": "0feeeeeeeeeeeeeeeeeeeeeeeeeeeeee",
"name": "Free Website",
"price": 0,
"currency": "USD",
"frequency": "",
"is_subscribed": true,
"can_subscribe": false,
"legacy_id": "free",
"legacy_discount": false,
"externally_managed": false
}
}
],
"result_info": {
"page": 1,
"per_page": 2,
"total_pages": 2,
"count": 2,
"total_count": 4
},
"success": true,
"errors": [],
"messages": []
}

+ 140
- 0
tests/fixtures/cloudflare-zones-page-2.json View File

@ -0,0 +1,140 @@
{
"result": [
{
"id": "234234243423aaabb334342aaa343434",
"name": "githubusercontent.com",
"status": "pending",
"paused": false,
"type": "full",
"development_mode": 0,
"name_servers": [
"alice.ns.cloudflare.com",
"tom.ns.cloudflare.com"
],
"original_name_servers": [],
"original_registrar": null,
"original_dnshost": null,
"modified_on": "2017-02-20T04:06:46.019706Z",
"created_on": "2017-02-20T04:05:51.683040Z",
"meta": {
"step": 4,
"wildcard_proxiable": false,
"custom_certificate_quota": 0,
"page_rule_quota": 3,
"phishing_detected": false,
"multiple_railguns_allowed": false
},
"owner": {
"type": "user",
"id": "334234243423aaabb334342aaa343433",
"email": "noreply@github.com"
},
"permissions": [
"#analytics:read",
"#billing:edit",
"#billing:read",
"#cache_purge:edit",
"#dns_records:edit",
"#dns_records:read",
"#lb:edit",
"#lb:read",
"#logs:read",
"#organization:edit",
"#organization:read",
"#ssl:edit",
"#ssl:read",
"#waf:edit",
"#waf:read",
"#zone:edit",
"#zone:read",
"#zone_settings:edit",
"#zone_settings:read"
],
"plan": {
"id": "0feeeeeeeeeeeeeeeeeeeeeeeeeeeeee",
"name": "Free Website",
"price": 0,
"currency": "USD",
"frequency": "",
"is_subscribed": true,
"can_subscribe": false,
"legacy_id": "free",
"legacy_discount": false,
"externally_managed": false
}
},
{
"id": "234234243423aaabb334342aaa343435",
"name": "unit.tests",
"status": "pending",
"paused": false,
"type": "full",
"development_mode": 0,
"name_servers": [
"alice.ns.cloudflare.com",
"tom.ns.cloudflare.com"
],
"original_name_servers": [],
"original_registrar": null,
"original_dnshost": null,
"modified_on": "2017-02-20T04:10:23.687329Z",
"created_on": "2017-02-20T04:10:18.294562Z",
"meta": {
"step": 4,
"wildcard_proxiable": false,
"custom_certificate_quota": 0,
"page_rule_quota": 3,
"phishing_detected": false,
"multiple_railguns_allowed": false
},
"owner": {
"type": "user",
"id": "334234243423aaabb334342aaa343433",
"email": "noreply@github.com"
},
"permissions": [
"#analytics:read",
"#billing:edit",
"#billing:read",
"#cache_purge:edit",
"#dns_records:edit",
"#dns_records:read",
"#lb:edit",
"#lb:read",
"#logs:read",
"#organization:edit",
"#organization:read",
"#ssl:edit",
"#ssl:read",
"#waf:edit",
"#waf:read",
"#zone:edit",
"#zone:read",
"#zone_settings:edit",
"#zone_settings:read"
],
"plan": {
"id": "0feeeeeeeeeeeeeeeeeeeeeeeeeeeeee",
"name": "Free Website",
"price": 0,
"currency": "USD",
"frequency": "",
"is_subscribed": true,
"can_subscribe": false,
"legacy_id": "free",
"legacy_discount": false,
"externally_managed": false
}
}
],
"result_info": {
"page": 2,
"per_page": 2,
"total_pages": 2,
"count": 2,
"total_count": 4
},
"success": true,
"errors": [],
"messages": []
}

+ 106
- 0
tests/fixtures/dnsimple-invalid-content.json View File

@ -0,0 +1,106 @@
{
"data": [
{
"id": 11189898,
"zone_id": "unit.tests",
"parent_id": null,
"name": "naptr",
"content": "",
"ttl": 600,
"priority": null,
"type": "NAPTR",
"regions": [
"global"
],
"system_record": false,
"created_at": "2017-03-09T15:55:11Z",
"updated_at": "2017-03-09T15:55:11Z"
},
{
"id": 11189899,
"zone_id": "unit.tests",
"parent_id": null,
"name": "naptr",
"content": "100 \"U\" \"SIP+D2U\" \"!^.*$!sip:info@bar.example.com!\" .",
"ttl": 600,
"priority": null,
"type": "NAPTR",
"regions": [
"global"
],
"system_record": false,
"created_at": "2017-03-09T15:55:11Z",
"updated_at": "2017-03-09T15:55:11Z"
},
{
"id": 11189878,
"zone_id": "unit.tests",
"parent_id": null,
"name": "_srv._tcp",
"content": "",
"ttl": 600,
"priority": 10,
"type": "SRV",
"regions": [
"global"
],
"system_record": false,
"created_at": "2017-03-09T15:55:08Z",
"updated_at": "2017-03-09T15:55:08Z"
},
{
"id": 11189879,
"zone_id": "unit.tests",
"parent_id": null,
"name": "_srv._tcp",
"content": "20 foo-2.unit.tests",
"ttl": 600,
"priority": 12,
"type": "SRV",
"regions": [
"global"
],
"system_record": false,
"created_at": "2017-03-09T15:55:08Z",
"updated_at": "2017-03-09T15:55:08Z"
},
{
"id": 11189882,
"zone_id": "unit.tests",
"parent_id": null,
"name": "",
"content": "",
"ttl": 3600,
"priority": null,
"type": "SSHFP",
"regions": [
"global"
],
"system_record": false,
"created_at": "2017-03-09T15:55:08Z",
"updated_at": "2017-03-09T15:55:08Z"
},
{
"id": 11189883,
"zone_id": "unit.tests",
"parent_id": null,
"name": "",
"content": "1 1",
"ttl": 3600,
"priority": null,
"type": "SSHFP",
"regions": [
"global"
],
"system_record": false,
"created_at": "2017-03-09T15:55:09Z",
"updated_at": "2017-03-09T15:55:09Z"
}
],
"pagination": {
"current_page": 1,
"per_page": 20,
"total_entries": 6,
"total_pages": 1
}
}

+ 314
- 0
tests/fixtures/dnsimple-page-1.json View File

@ -0,0 +1,314 @@
{
"data": [
{
"id": 11189873,
"zone_id": "unit.tests",
"parent_id": null,
"name": "",
"content": "ns1.dnsimple.com admin.dnsimple.com 1489074932 86400 7200 604800 300",
"ttl": 3600,
"priority": null,
"type": "SOA",
"regions": [
"global"
],
"system_record": true,
"created_at": "2017-03-09T15:55:08Z",
"updated_at": "2017-03-09T15:56:21Z"
},
{
"id": 11189874,
"zone_id": "unit.tests",
"parent_id": null,
"name": "",
"content": "ns1.dnsimple.com",
"ttl": 3600,
"priority": null,
"type": "NS",
"regions": [
"global"
],
"system_record": true,
"created_at": "2017-03-09T15:55:08Z",
"updated_at": "2017-03-09T15:55:08Z"
},
{
"id": 11189875,
"zone_id": "unit.tests",
"parent_id": null,
"name": "",
"content": "ns2.dnsimple.com",
"ttl": 3600,
"priority": null,
"type": "NS",
"regions": [
"global"
],
"system_record": true,
"created_at": "2017-03-09T15:55:08Z",
"updated_at": "2017-03-09T15:55:08Z"
},
{
"id": 11189876,
"zone_id": "unit.tests",
"parent_id": null,
"name": "",
"content": "ns3.dnsimple.com",
"ttl": 3600,
"priority": null,
"type": "NS",
"regions": [
"global"
],
"system_record": true,
"created_at": "2017-03-09T15:55:08Z",
"updated_at": "2017-03-09T15:55:08Z"
},
{
"id": 11189877,
"zone_id": "unit.tests",
"parent_id": null,
"name": "",
"content": "ns4.dnsimple.com",
"ttl": 3600,
"priority": null,
"type": "NS",
"regions": [
"global"
],
"system_record": true,
"created_at": "2017-03-09T15:55:08Z",
"updated_at": "2017-03-09T15:55:08Z"
},
{
"id": 11189878,
"zone_id": "unit.tests",
"parent_id": null,
"name": "_srv._tcp",
"content": "20 30 foo-1.unit.tests",
"ttl": 600,
"priority": 10,
"type": "SRV",
"regions": [
"global"
],
"system_record": false,
"created_at": "2017-03-09T15:55:08Z",
"updated_at": "2017-03-09T15:55:08Z"
},
{
"id": 11189879,
"zone_id": "unit.tests",
"parent_id": null,
"name": "_srv._tcp",
"content": "20 30 foo-2.unit.tests",
"ttl": 600,
"priority": 12,
"type": "SRV",
"regions": [
"global"
],
"system_record": false,
"created_at": "2017-03-09T15:55:08Z",
"updated_at": "2017-03-09T15:55:08Z"
},
{
"id": 11189880,
"zone_id": "unit.tests",
"parent_id": null,
"name": "under",
"content": "ns1.unit.tests.",
"ttl": 3600,
"priority": null,
"type": "NS",
"regions": [
"global"
],
"system_record": false,
"created_at": "2017-03-09T15:55:08Z",
"updated_at": "2017-03-09T15:55:08Z"
},
{
"id": 11189881,
"zone_id": "unit.tests",
"parent_id": null,
"name": "under",
"content": "ns2.unit.tests.",
"ttl": 3600,
"priority": null,
"type": "NS",
"regions": [
"global"
],
"system_record": false,
"created_at": "2017-03-09T15:55:08Z",
"updated_at": "2017-03-09T15:55:08Z"
},
{
"id": 11189882,
"zone_id": "unit.tests",
"parent_id": null,
"name": "",
"content": "1 1 7491973e5f8b39d5327cd4e08bc81b05f7710b49",
"ttl": 3600,
"priority": null,
"type": "SSHFP",
"regions": [
"global"
],
"system_record": false,
"created_at": "2017-03-09T15:55:08Z",
"updated_at": "2017-03-09T15:55:08Z"
},
{
"id": 11189883,
"zone_id": "unit.tests",
"parent_id": null,
"name": "",
"content": "1 1 bf6b6825d2977c511a475bbefb88aad54a92ac73",
"ttl": 3600,
"priority": null,
"type": "SSHFP",
"regions": [
"global"
],
"system_record": false,
"created_at": "2017-03-09T15:55:09Z",
"updated_at": "2017-03-09T15:55:09Z"
},
{
"id": 11189884,
"zone_id": "unit.tests",
"parent_id": null,
"name": "txt",
"content": "Bah bah black sheep",
"ttl": 600,
"priority": null,
"type": "TXT",
"regions": [
"global"
],
"system_record": false,
"created_at": "2017-03-09T15:55:09Z",
"updated_at": "2017-03-09T15:55:09Z"
},
{
"id": 11189885,
"zone_id": "unit.tests",
"parent_id": null,
"name": "txt",
"content": "have you any wool.",
"ttl": 600,
"priority": null,
"type": "TXT",
"regions": [
"global"
],
"system_record": false,
"created_at": "2017-03-09T15:55:09Z",
"updated_at": "2017-03-09T15:55:09Z"
},
{
"id": 11189886,
"zone_id": "unit.tests",
"parent_id": null,
"name": "",
"content": "1.2.3.4",
"ttl": 300,
"priority": null,
"type": "A",
"regions": [
"global"
],
"system_record": false,
"created_at": "2017-03-09T15:55:09Z",
"updated_at": "2017-03-09T15:55:09Z"
},
{
"id": 11189887,
"zone_id": "unit.tests",
"parent_id": null,
"name": "",
"content": "1.2.3.5",
"ttl": 300,
"priority": null,
"type": "A",
"regions": [
"global"
],
"system_record": false,
"created_at": "2017-03-09T15:55:09Z",
"updated_at": "2017-03-09T15:55:09Z"
},
{
"id": 11189889,
"zone_id": "unit.tests",
"parent_id": null,
"name": "www",
"content": "2.2.3.6",
"ttl": 300,
"priority": null,
"type": "A",
"regions": [
"global"
],
"system_record": false,
"created_at": "2017-03-09T15:55:09Z",
"updated_at": "2017-03-09T15:55:09Z"
},
{
"id": 11189890,
"zone_id": "unit.tests",
"parent_id": null,
"name": "mx",
"content": "smtp-4.unit.tests",
"ttl": 300,
"priority": 10,
"type": "MX",
"regions": [
"global"
],
"system_record": false,
"created_at": "2017-03-09T15:55:10Z",
"updated_at": "2017-03-09T15:55:10Z"
},
{
"id": 11189891,
"zone_id": "unit.tests",
"parent_id": null,
"name": "mx",
"content": "smtp-2.unit.tests",
"ttl": 300,
"priority": 20,
"type": "MX",
"regions": [
"global"
],
"system_record": false,
"created_at": "2017-03-09T15:55:10Z",
"updated_at": "2017-03-09T15:55:10Z"
},
{
"id": 11189892,
"zone_id": "unit.tests",
"parent_id": null,
"name": "mx",
"content": "smtp-3.unit.tests",
"ttl": 300,
"priority": 30,
"type": "MX",
"regions": [
"global"
],
"system_record": false,
"created_at": "2017-03-09T15:55:10Z",
"updated_at": "2017-03-09T15:55:10Z"
}
],
"pagination": {
"current_page": 1,
"per_page": 20,
"total_entries": 27,
"total_pages": 2
}
}

+ 138
- 0
tests/fixtures/dnsimple-page-2.json View File

@ -0,0 +1,138 @@
{
"data": [
{
"id": 11189893,
"zone_id": "unit.tests",
"parent_id": null,
"name": "mx",
"content": "smtp-1.unit.tests",
"ttl": 300,
"priority": 40,
"type": "MX",
"regions": [
"global"
],
"system_record": false,
"created_at": "2017-03-09T15:55:10Z",
"updated_at": "2017-03-09T15:55:10Z"
},
{
"id": 11189894,
"zone_id": "unit.tests",
"parent_id": null,
"name": "aaaa",
"content": "2601:644:500:e210:62f8:1dff:feb8:947a",
"ttl": 600,
"priority": null,
"type": "AAAA",
"regions": [
"global"
],
"system_record": false,
"created_at": "2017-03-09T15:55:10Z",
"updated_at": "2017-03-09T15:55:10Z"
},
{
"id": 11189895,
"zone_id": "unit.tests",
"parent_id": null,
"name": "cname",
"content": "unit.tests",
"ttl": 300,
"priority": null,
"type": "CNAME",
"regions": [
"global"
],
"system_record": false,
"created_at": "2017-03-09T15:55:10Z",
"updated_at": "2017-03-09T15:55:10Z"
},
{
"id": 11189896,
"zone_id": "unit.tests",
"parent_id": null,
"name": "ptr",
"content": "foo.bar.com.",
"ttl": 300,
"priority": null,
"type": "PTR",
"regions": [
"global"
],
"system_record": false,
"created_at": "2017-03-09T15:55:10Z",
"updated_at": "2017-03-09T15:55:10Z"
},
{
"id": 11189897,
"zone_id": "unit.tests",
"parent_id": null,
"name": "www.sub",
"content": "2.2.3.6",
"ttl": 300,
"priority": null,
"type": "A",
"regions": [
"global"
],
"system_record": false,
"created_at": "2017-03-09T15:55:10Z",
"updated_at": "2017-03-09T15:55:10Z"
},
{
"id": 11189898,
"zone_id": "unit.tests",
"parent_id": null,
"name": "naptr",
"content": "10 100 \"S\" \"SIP+D2U\" \"!^.*$!sip:info@bar.example.com!\" .",
"ttl": 600,
"priority": null,
"type": "NAPTR",
"regions": [
"global"
],
"system_record": false,
"created_at": "2017-03-09T15:55:11Z",
"updated_at": "2017-03-09T15:55:11Z"
},
{
"id": 11189899,
"zone_id": "unit.tests",
"parent_id": null,
"name": "naptr",
"content": "100 100 \"U\" \"SIP+D2U\" \"!^.*$!sip:info@bar.example.com!\" .",
"ttl": 600,
"priority": null,
"type": "NAPTR",
"regions": [
"global"
],
"system_record": false,
"created_at": "2017-03-09T15:55:11Z",
"updated_at": "2017-03-09T15:55:11Z"
},
{
"id": 11189900,
"zone_id": "unit.tests",
"parent_id": null,
"name": "spf",
"content": "v=spf1 ip4:192.168.0.1/16-all",
"ttl": 600,
"priority": null,
"type": "SPF",
"regions": [
"global"
],
"system_record": false,
"created_at": "2017-03-09T15:55:11Z",
"updated_at": "2017-03-09T15:55:11Z"
}
],
"pagination": {
"current_page": 2,
"per_page": 20,
"total_entries": 27,
"total_pages": 2
}
}

+ 4190
- 0
tests/fixtures/dyn-traffic-director-get.json
File diff suppressed because it is too large
View File


+ 235
- 0
tests/fixtures/powerdns-full-data.json View File

@ -0,0 +1,235 @@
{
"account": "",
"dnssec": false,
"id": "unit.tests.",
"kind": "Master",
"last_check": 0,
"masters": [],
"name": "unit.tests.",
"notified_serial": 2017012803,
"rrsets": [
{
"comments": [],
"name": "mx.unit.tests.",
"records": [
{
"content": "40 smtp-1.unit.tests.",
"disabled": false
},
{
"content": "20 smtp-2.unit.tests.",
"disabled": false
},
{
"content": "30 smtp-3.unit.tests.",
"disabled": false
},
{
"content": "10 smtp-4.unit.tests.",
"disabled": false
}
],
"ttl": 300,
"type": "MX"
},
{
"comments": [],
"name": "sub.unit.tests.",
"records": [
{
"content": "6.2.3.4.",
"disabled": false
}, {
"content": "7.2.3.4.",
"disabled": false
}
],
"ttl": 3600,
"type": "NS"
},
{
"comments": [],
"name": "www.unit.tests.",
"records": [
{
"content": "2.2.3.6",
"disabled": false
}
],
"ttl": 300,
"type": "A"
},
{
"comments": [],
"name": "_srv._tcp.unit.tests.",
"records": [
{
"content": "10 20 30 foo-1.unit.tests.",
"disabled": false
},
{
"content": "12 20 30 foo-2.unit.tests.",
"disabled": false
}
],
"ttl": 600,
"type": "SRV"
},
{
"comments": [],
"name": "txt.unit.tests.",
"records": [
{
"content": "\"Bah bah black sheep\"",
"disabled": false
},
{
"content": "\"have you any wool.\"",
"disabled": false
}
],
"ttl": 600,
"type": "TXT"
},
{
"comments": [],
"name": "naptr.unit.tests.",
"records": [
{
"content": "10 100 \"S\" \"SIP+D2U\" \"!^.*$!sip:info@bar.example.com!\" .",
"disabled": false
},
{
"content": "100 100 \"U\" \"SIP+D2U\" \"!^.*$!sip:info@bar.example.com!\" .",
"disabled": false
}
],
"ttl": 600,
"type": "NAPTR"
},
{
"comments": [],
"name": "ptr.unit.tests.",
"records": [
{
"content": "foo.bar.com.",
"disabled": false
}
],
"ttl": 300,
"type": "PTR"
},
{
"comments": [],
"name": "spf.unit.tests.",
"records": [
{
"content": "\"v=spf1 ip4:192.168.0.1/16-all\"",
"disabled": false
}
],
"ttl": 600,
"type": "SPF"
},
{
"comments": [],
"name": "cname.unit.tests.",
"records": [
{
"content": "unit.tests.",
"disabled": false
}
],
"ttl": 300,
"type": "CNAME"
},
{
"comments": [],
"name": "www.sub.unit.tests.",
"records": [
{
"content": "2.2.3.6",
"disabled": false
}
],
"ttl": 300,
"type": "A"
},
{
"comments": [],
"name": "aaaa.unit.tests.",
"records": [
{
"content": "2601:644:500:e210:62f8:1dff:feb8:947a",
"disabled": false
}
],
"ttl": 600,
"type": "AAAA"
},
{
"comments": [],
"name": "unit.tests.",
"records": [
{
"content": "1 1 7491973e5f8b39d5327cd4e08bc81b05f7710b49",
"disabled": false
},
{
"content": "1 1 bf6b6825d2977c511a475bbefb88aad54a92ac73",
"disabled": false
}
],
"ttl": 3600,
"type": "SSHFP"
},
{
"comments": [],
"name": "unit.tests.",
"records": [
{
"content": "ns1.ext.unit.tests. hostmaster.unit.tests. 2017012803 3600 600 604800 60",
"disabled": false
}
],
"ttl": 3600,
"type": "SOA"
},
{
"comments": [],
"name": "unit.tests.",
"records": [
{
"content": "1.1.1.1.",
"disabled": false
},
{
"content": "4.4.4.4.",
"disabled": false
}
],
"ttl": 600,
"type": "NS"
},
{
"comments": [],
"name": "unit.tests.",
"records": [
{
"content": "1.2.3.5",
"disabled": false
},
{
"content": "1.2.3.4",
"disabled": false
}
],
"ttl": 300,
"type": "A"
}
],
"serial": 2017012803,
"soa_edit": "",
"soa_edit_api": "INCEPTION-INCREMENT",
"url": "api/v1/servers/localhost/zones/unit.tests."
}

+ 69
- 0
tests/helpers.py View File

@ -0,0 +1,69 @@
#
#
#
from __future__ import absolute_import, division, print_function, \
unicode_literals
from shutil import rmtree
from tempfile import mkdtemp
class SimpleSource(object):
def __init__(self, id='test'):
pass
class SimpleProvider(object):
SUPPORTS_GEO = False
def __init__(self, id='test'):
pass
def populate(self, zone, source=True):
pass
def supports(self, record):
return True
def __repr__(self):
return self.__class__.__name__
class GeoProvider(object):
SUPPORTS_GEO = True
def __init__(self, id='test'):
pass
def populate(self, zone, source=True):
pass
def supports(self, record):
return True
def __repr__(self):
return self.__class__.__name__
class NoSshFpProvider(SimpleProvider):
def supports(self, record):
return record._type != 'SSHFP'
class TemporaryDirectory(object):
def __init__(self, delete_on_exit=True):
self.delete_on_exit = delete_on_exit
def __enter__(self):
self.dirname = mkdtemp()
return self
def __exit__(self, *args, **kwargs):
if self.delete_on_exit:
rmtree(self.dirname)
else:
raise Exception(self.dirname)

+ 203
- 0
tests/test_octodns_manager.py View File

@ -0,0 +1,203 @@
#
#
#
from __future__ import absolute_import, division, print_function, \
unicode_literals
from os import environ
from os.path import dirname, join
from unittest import TestCase
from octodns.record import Record
from octodns.manager import _AggregateTarget, Manager
from octodns.zone import Zone
from helpers import GeoProvider, NoSshFpProvider, SimpleProvider, \
TemporaryDirectory
config_dir = join(dirname(__file__), 'config')
def get_config_filename(which):
return join(config_dir, which)
class TestManager(TestCase):
def test_missing_provider_class(self):
with self.assertRaises(Exception) as ctx:
Manager(get_config_filename('missing-provider-class.yaml')).sync()
self.assertTrue('missing class' in ctx.exception.message)
def test_bad_provider_class(self):
with self.assertRaises(Exception) as ctx:
Manager(get_config_filename('bad-provider-class.yaml')).sync()
self.assertTrue('Unknown provider class' in ctx.exception.message)
def test_bad_provider_class_module(self):
with self.assertRaises(Exception) as ctx:
Manager(get_config_filename('bad-provider-class-module.yaml')) \
.sync()
self.assertTrue('Unknown provider class' in ctx.exception.message)
def test_bad_provider_class_no_module(self):
with self.assertRaises(Exception) as ctx:
Manager(get_config_filename('bad-provider-class-no-module.yaml')) \
.sync()
self.assertTrue('Unknown provider class' in ctx.exception.message)
def test_missing_provider_config(self):
# Missing provider config
with self.assertRaises(Exception) as ctx:
Manager(get_config_filename('missing-provider-config.yaml')).sync()
self.assertTrue('provider config' in ctx.exception.message)
def test_missing_env_config(self):
with self.assertRaises(Exception) as ctx:
Manager(get_config_filename('missing-provider-env.yaml')).sync()
self.assertTrue('missing env var' in ctx.exception.message)
def test_missing_source(self):
with self.assertRaises(Exception) as ctx:
Manager(get_config_filename('unknown-provider.yaml')) \
.sync(['missing.sources.'])
self.assertTrue('missing sources' in ctx.exception.message)
def test_missing_targets(self):
with self.assertRaises(Exception) as ctx:
Manager(get_config_filename('unknown-provider.yaml')) \
.sync(['missing.targets.'])
self.assertTrue('missing targets' in ctx.exception.message)
def test_unknown_source(self):
with self.assertRaises(Exception) as ctx:
Manager(get_config_filename('unknown-provider.yaml')) \
.sync(['unknown.source.'])
self.assertTrue('unknown source' in ctx.exception.message)
def test_unknown_target(self):
with self.assertRaises(Exception) as ctx:
Manager(get_config_filename('unknown-provider.yaml')) \
.sync(['unknown.target.'])
self.assertTrue('unknown target' in ctx.exception.message)
def test_source_only_as_a_target(self):
with self.assertRaises(Exception) as ctx:
Manager(get_config_filename('unknown-provider.yaml')) \
.sync(['not.targetable.'])
self.assertTrue('does not support targeting' in ctx.exception.message)
def test_simple(self):
with TemporaryDirectory() as tmpdir:
environ['YAML_TMP_DIR'] = tmpdir.dirname
tc = Manager(get_config_filename('simple.yaml')) \
.sync(dry_run=False)
self.assertEquals(19, tc)
# try with just one of the zones
tc = Manager(get_config_filename('simple.yaml')) \
.sync(dry_run=False, eligible_zones=['unit.tests.'])
self.assertEquals(13, tc)
# the subzone, with 2 targets
tc = Manager(get_config_filename('simple.yaml')) \
.sync(dry_run=False, eligible_zones=['subzone.unit.tests.'])
self.assertEquals(6, tc)
# and finally the empty zone
tc = Manager(get_config_filename('simple.yaml')) \
.sync(dry_run=False, eligible_zones=['empty.'])
self.assertEquals(0, tc)
# Again with force
tc = Manager(get_config_filename('simple.yaml')) \
.sync(dry_run=False, force=True)
self.assertEquals(19, tc)
def test_eligible_targets(self):
with TemporaryDirectory() as tmpdir:
environ['YAML_TMP_DIR'] = tmpdir.dirname
# Only allow a target that doesn't exist
tc = Manager(get_config_filename('simple.yaml')) \
.sync(eligible_targets=['foo'])
self.assertEquals(0, tc)
def test_compare(self):
with TemporaryDirectory() as tmpdir:
environ['YAML_TMP_DIR'] = tmpdir.dirname
manager = Manager(get_config_filename('simple.yaml'))
changes = manager.compare(['in'], ['in'], 'unit.tests.')
self.assertEquals([], changes)
# Create an empty unit.test zone config
with open(join(tmpdir.dirname, 'unit.tests.yaml'), 'w') as fh:
fh.write('---\n{}')
changes = manager.compare(['in'], ['dump'], 'unit.tests.')
self.assertEquals(13, len(changes))
# Compound sources with varying support
changes = manager.compare(['in', 'nosshfp'],
['dump'],
'unit.tests.')
self.assertEquals(12, len(changes))
with self.assertRaises(Exception) as ctx:
manager.compare(['nope'], ['dump'], 'unit.tests.')
self.assertEquals('Unknown source: nope', ctx.exception.message)
def test_aggregate_target(self):
simple = SimpleProvider()
geo = GeoProvider()
nosshfp = NoSshFpProvider()
self.assertFalse(_AggregateTarget([simple, simple]).SUPPORTS_GEO)
self.assertFalse(_AggregateTarget([simple, geo]).SUPPORTS_GEO)
self.assertFalse(_AggregateTarget([geo, simple]).SUPPORTS_GEO)
self.assertTrue(_AggregateTarget([geo, geo]).SUPPORTS_GEO)
zone = Zone('unit.tests.', [])
record = Record.new(zone, 'sshfp', {
'ttl': 60,
'type': 'SSHFP',
'value': {
'algorithm': 1,
'fingerprint_type': 1,
'fingerprint': 'abcdefg',
},
})
self.assertTrue(simple.supports(record))
self.assertFalse(nosshfp.supports(record))
self.assertTrue(_AggregateTarget([simple, simple]).supports(record))
self.assertFalse(_AggregateTarget([simple, nosshfp]).supports(record))
def test_dump(self):
with TemporaryDirectory() as tmpdir:
environ['YAML_TMP_DIR'] = tmpdir.dirname
manager = Manager(get_config_filename('simple.yaml'))
with self.assertRaises(Exception) as ctx:
manager.dump('unit.tests.', tmpdir.dirname, 'nope')
self.assertEquals('Unknown source: nope', ctx.exception.message)
manager.dump('unit.tests.', tmpdir.dirname, 'in')
# make sure this fails with an IOError and not a KeyError when
# tyring to find sub zones
with self.assertRaises(IOError):
manager.dump('unknown.zone.', tmpdir.dirname, 'in')
def test_validate_configs(self):
Manager(get_config_filename('simple-validate.yaml')).validate_configs()
with self.assertRaises(Exception) as ctx:
Manager(get_config_filename('missing-sources.yaml')) \
.validate_configs()
self.assertTrue('missing sources' in ctx.exception.message)
with self.assertRaises(Exception) as ctx:
Manager(get_config_filename('unknown-provider.yaml')) \
.validate_configs()
self.assertTrue('unknown source' in ctx.exception.message)

+ 170
- 0
tests/test_octodns_provider_base.py View File

@ -0,0 +1,170 @@
#
#
#
from __future__ import absolute_import, division, print_function, \
unicode_literals
from logging import getLogger
from unittest import TestCase
from octodns.record import Create, Delete, Record, Update
from octodns.provider.base import BaseProvider, Plan, UnsafePlan
from octodns.zone import Zone
class HelperProvider(BaseProvider):
log = getLogger('HelperProvider')
def __init__(self, extra_changes, apply_disabled=False,
include_change_callback=None):
self.__extra_changes = extra_changes
self.apply_disabled = apply_disabled
self.include_change_callback = include_change_callback
def populate(self, zone, target=False):
pass
def _include_change(self, change):
return not self.include_change_callback or \
self.include_change_callback(change)
def _extra_changes(self, existing, changes):
return self.__extra_changes
def _apply(self, plan):
pass
class TestBaseProvider(TestCase):
def test_base_provider(self):
with self.assertRaises(NotImplementedError) as ctx:
BaseProvider('base')
self.assertEquals('Abstract base class, log property missing',
ctx.exception.message)
class HasLog(BaseProvider):
log = getLogger('HasLog')
with self.assertRaises(NotImplementedError) as ctx:
HasLog('haslog')
self.assertEquals('Abstract base class, SUPPORTS_GEO property missing',
ctx.exception.message)
class HasSupportsGeo(HasLog):
SUPPORTS_GEO = False
zone = Zone('unit.tests.', [])
with self.assertRaises(NotImplementedError) as ctx:
HasSupportsGeo('hassupportesgeo').populate(zone)
self.assertEquals('Abstract base class, populate method missing',
ctx.exception.message)
class HasPopulate(HasSupportsGeo):
def populate(self, zone, target=False):
zone.add_record(Record.new(zone, '', {
'ttl': 60,
'type': 'A',
'value': '2.3.4.5'
}))
zone.add_record(Record.new(zone, 'going', {
'ttl': 60,
'type': 'A',
'value': '3.4.5.6'
}))
zone.add_record(Record.new(zone, '', {
'ttl': 60,
'type': 'A',
'value': '1.2.3.4'
}))
self.assertTrue(HasSupportsGeo('hassupportesgeo')
.supports(list(zone.records)[0]))
plan = HasPopulate('haspopulate').plan(zone)
self.assertEquals(2, len(plan.changes))
with self.assertRaises(NotImplementedError) as ctx:
HasPopulate('haspopulate').apply(plan)
self.assertEquals('Abstract base class, _apply method missing',
ctx.exception.message)
def test_plan(self):
ignored = Zone('unit.tests.', [])
# No change, thus no plan
provider = HelperProvider([])
self.assertEquals(None, provider.plan(ignored))
record = Record.new(ignored, 'a', {
'ttl': 30,
'type': 'A',
'value': '1.2.3.4',
})
provider = HelperProvider([Create(record)])
plan = provider.plan(ignored)
self.assertTrue(plan)
self.assertEquals(1, len(plan.changes))
def test_apply(self):
ignored = Zone('unit.tests.', [])
record = Record.new(ignored, 'a', {
'ttl': 30,
'type': 'A',
'value': '1.2.3.4',
})
provider = HelperProvider([Create(record)], apply_disabled=True)
plan = provider.plan(ignored)
provider.apply(plan)
provider.apply_disabled = False
self.assertEquals(1, provider.apply(plan))
def test_include_change(self):
zone = Zone('unit.tests.', [])
record = Record.new(zone, 'a', {
'ttl': 30,
'type': 'A',
'value': '1.2.3.4',
})
zone.add_record(record)
provider = HelperProvider([], include_change_callback=lambda c: False)
plan = provider.plan(zone)
# We filtered out the only change
self.assertFalse(plan)
def test_safe(self):
ignored = Zone('unit.tests.', [])
# No changes is safe
Plan(None, None, []).raise_if_unsafe()
# Creates are safe
record = Record.new(ignored, 'a', {
'ttl': 30,
'type': 'A',
'value': '1.2.3.4',
})
Plan(None, None, [Create(record) for i in range(10)]).raise_if_unsafe()
# max Updates is safe
changes = [Update(record, record)
for i in range(Plan.MAX_SAFE_UPDATES)]
Plan(None, None, changes).raise_if_unsafe()
# but max + 1 isn't
with self.assertRaises(UnsafePlan):
changes.append(Update(record, record))
Plan(None, None, changes).raise_if_unsafe()
# max Deletes is safe
changes = [Delete(record) for i in range(Plan.MAX_SAFE_DELETES)]
Plan(None, None, changes).raise_if_unsafe()
# but max + 1 isn't
with self.assertRaises(UnsafePlan):
changes.append(Delete(record))
Plan(None, None, changes).raise_if_unsafe()

+ 273
- 0
tests/test_octodns_provider_cloudflare.py View File

@ -0,0 +1,273 @@
#
#
#
from __future__ import absolute_import, division, print_function, \
unicode_literals
from mock import Mock, call
from os.path import dirname, join
from requests import HTTPError
from requests_mock import ANY, mock as requests_mock
from unittest import TestCase
from octodns.record import Record
from octodns.provider.cloudflare import CloudflareProvider
from octodns.provider.yaml import YamlProvider
from octodns.zone import Zone
class TestCloudflareProvider(TestCase):
expected = Zone('unit.tests.', [])
source = YamlProvider('test', join(dirname(__file__), 'config'))
source.populate(expected)
# Our test suite differs a bit, add our NS and remove the simple one
expected.add_record(Record.new(expected, 'under', {
'ttl': 3600,
'type': 'NS',
'values': [
'ns1.unit.tests.',
'ns2.unit.tests.',
]
}))
for record in list(expected.records):
if record.name == 'sub' and record._type == 'NS':
expected.records.remove(record)
break
empty = {'result': [], 'result_info': {'count': 0, 'per_page': 0}}
def test_populate(self):
provider = CloudflareProvider('test', 'email', 'token')
# Bad auth
with requests_mock() as mock:
mock.get(ANY, status_code=403,
text='{"success":false,"errors":[{"code":9103,'
'"message":"Unknown X-Auth-Key or X-Auth-Email"}],'
'"messages":[],"result":null}')
with self.assertRaises(Exception) as ctx:
zone = Zone('unit.tests.', [])
provider.populate(zone)
self.assertEquals('Unknown X-Auth-Key or X-Auth-Email',
ctx.exception.message)
# Bad auth, unknown resp
with requests_mock() as mock:
mock.get(ANY, status_code=403, text='{}')
with self.assertRaises(Exception) as ctx:
zone = Zone('unit.tests.', [])
provider.populate(zone)
self.assertEquals('Authentication error', ctx.exception.message)
# General error
with requests_mock() as mock:
mock.get(ANY, status_code=502, text='Things caught fire')
with self.assertRaises(HTTPError) as ctx:
zone = Zone('unit.tests.', [])
provider.populate(zone)
self.assertEquals(502, ctx.exception.response.status_code)
# Non-existant zone doesn't populate anything
with requests_mock() as mock:
mock.get(ANY, status_code=200, json=self.empty)
zone = Zone('unit.tests.', [])
provider.populate(zone)
self.assertEquals(set(), zone.records)
# re-populating the same non-existant zone uses cache and makes no
# calls
again = Zone('unit.tests.', [])
provider.populate(again)
self.assertEquals(set(), again.records)
# bust zone cache
provider._zones = None
# existing zone with data
with requests_mock() as mock:
base = 'https://api.cloudflare.com/client/v4/zones'
# zones
with open('tests/fixtures/cloudflare-zones-page-1.json') as fh:
mock.get('{}?page=1'.format(base), status_code=200,
text=fh.read())
with open('tests/fixtures/cloudflare-zones-page-2.json') as fh:
mock.get('{}?page=2'.format(base), status_code=200,
text=fh.read())
mock.get('{}?page=3'.format(base), status_code=200,
json={'result': [], 'result_info': {'count': 0,
'per_page': 0}})
# records
base = '{}/234234243423aaabb334342aaa343435/dns_records' \
.format(base)
with open('tests/fixtures/cloudflare-dns_records-'
'page-1.json') as fh:
mock.get('{}?page=1'.format(base), status_code=200,
text=fh.read())
with open('tests/fixtures/cloudflare-dns_records-'
'page-2.json') as fh:
mock.get('{}?page=2'.format(base), status_code=200,
text=fh.read())
zone = Zone('unit.tests.', [])
provider.populate(zone)
self.assertEquals(9, len(zone.records))
changes = self.expected.changes(zone, provider)
self.assertEquals(0, len(changes))
# re-populating the same zone/records comes out of cache, no calls
again = Zone('unit.tests.', [])
provider.populate(again)
self.assertEquals(9, len(again.records))
def test_apply(self):
provider = CloudflareProvider('test', 'email', 'token')
provider._request = Mock()
provider._request.side_effect = [
self.empty, # no zones
{
'result': {
'id': 42,
}
}, # zone create
] + [None] * 15 # individual record creates
# non-existant zone, create everything
plan = provider.plan(self.expected)
self.assertEquals(9, len(plan.changes))
self.assertEquals(9, provider.apply(plan))
provider._request.assert_has_calls([
# created the domain
call('POST', '/zones', data={
'jump_start': False,
'name': 'unit.tests'
}),
# created at least one of the record with expected data
call('POST', '/zones/42/dns_records', data={
'content': 'ns1.unit.tests.',
'type': 'NS',
'name': 'under.unit.tests',
'ttl': 3600
}),
])
# expected number of total calls
self.assertEquals(17, provider._request.call_count)
provider._request.reset_mock()
provider.zone_records = Mock(return_value=[
{
"id": "fc12ab34cd5611334422ab3322997653",
"type": "A",
"name": "www.unit.tests",
"content": "1.2.3.4",
"proxiable": True,
"proxied": False,
"ttl": 300,
"locked": False,
"zone_id": "ff12ab34cd5611334422ab3322997650",
"zone_name": "unit.tests",
"modified_on": "2017-03-11T18:01:43.420689Z",
"created_on": "2017-03-11T18:01:43.420689Z",
"meta": {
"auto_added": False
}
},
{
"id": "fc12ab34cd5611334422ab3322997654",
"type": "A",
"name": "www.unit.tests",
"content": "2.2.3.4",
"proxiable": True,
"proxied": False,
"ttl": 300,
"locked": False,
"zone_id": "ff12ab34cd5611334422ab3322997650",
"zone_name": "unit.tests",
"modified_on": "2017-03-11T18:01:44.030044Z",
"created_on": "2017-03-11T18:01:44.030044Z",
"meta": {
"auto_added": False
}
},
{
"id": "fc12ab34cd5611334422ab3322997655",
"type": "A",
"name": "nc.unit.tests",
"content": "3.2.3.4",
"proxiable": True,
"proxied": False,
"ttl": 120,
"locked": False,
"zone_id": "ff12ab34cd5611334422ab3322997650",
"zone_name": "unit.tests",
"modified_on": "2017-03-11T18:01:44.030044Z",
"created_on": "2017-03-11T18:01:44.030044Z",
"meta": {
"auto_added": False
}
},
{
"id": "fc12ab34cd5611334422ab3322997655",
"type": "A",
"name": "ttl.unit.tests",
"content": "4.2.3.4",
"proxiable": True,
"proxied": False,
"ttl": 600,
"locked": False,
"zone_id": "ff12ab34cd5611334422ab3322997650",
"zone_name": "unit.tests",
"modified_on": "2017-03-11T18:01:44.030044Z",
"created_on": "2017-03-11T18:01:44.030044Z",
"meta": {
"auto_added": False
}
},
])
# we don't care about the POST/create return values
provider._request.return_value = {}
provider._request.side_effect = None
wanted = Zone('unit.tests.', [])
wanted.add_record(Record.new(wanted, 'nc', {
'ttl': 60, # TTL is below their min
'type': 'A',
'value': '3.2.3.4'
}))
wanted.add_record(Record.new(wanted, 'ttl', {
'ttl': 300, # TTL change
'type': 'A',
'value': '3.2.3.4'
}))
plan = provider.plan(wanted)
# only see the delete & ttl update, below min-ttl is filtered out
self.assertEquals(2, len(plan.changes))
self.assertEquals(2, provider.apply(plan))
# recreate for update, and deletes for the 2 parts of the other
provider._request.assert_has_calls([
call('POST', '/zones/42/dns_records', data={
'content': '3.2.3.4',
'type': 'A',
'name': 'ttl.unit.tests',
'ttl': 300}),
call('DELETE', '/zones/ff12ab34cd5611334422ab3322997650/'
'dns_records/fc12ab34cd5611334422ab3322997655'),
call('DELETE', '/zones/ff12ab34cd5611334422ab3322997650/'
'dns_records/fc12ab34cd5611334422ab3322997653'),
call('DELETE', '/zones/ff12ab34cd5611334422ab3322997650/'
'dns_records/fc12ab34cd5611334422ab3322997654')
])

+ 202
- 0
tests/test_octodns_provider_dnsimple.py View File

@ -0,0 +1,202 @@
#
#
#
from __future__ import absolute_import, division, print_function, \
unicode_literals
from mock import Mock, call
from os.path import dirname, join
from requests import HTTPError
from requests_mock import ANY, mock as requests_mock
from unittest import TestCase
from octodns.record import Record
from octodns.provider.dnsimple import DnsimpleClientNotFound, DnsimpleProvider
from octodns.provider.yaml import YamlProvider
from octodns.zone import Zone
class TestDnsimpleProvider(TestCase):
expected = Zone('unit.tests.', [])
source = YamlProvider('test', join(dirname(__file__), 'config'))
source.populate(expected)
# Our test suite differs a bit, add our NS and remove the simple one
expected.add_record(Record.new(expected, 'under', {
'ttl': 3600,
'type': 'NS',
'values': [
'ns1.unit.tests.',
'ns2.unit.tests.',
]
}))
for record in list(expected.records):
if record.name == 'sub' and record._type == 'NS':
expected.records.remove(record)
break
def test_populate(self):
provider = DnsimpleProvider('test', 'token', 42)
# Bad auth
with requests_mock() as mock:
mock.get(ANY, status_code=401,
text='{"message": "Authentication failed"}')
with self.assertRaises(Exception) as ctx:
zone = Zone('unit.tests.', [])
provider.populate(zone)
self.assertEquals('Unauthorized', ctx.exception.message)
# General error
with requests_mock() as mock:
mock.get(ANY, status_code=502, text='Things caught fire')
with self.assertRaises(HTTPError) as ctx:
zone = Zone('unit.tests.', [])
provider.populate(zone)
self.assertEquals(502, ctx.exception.response.status_code)
# Non-existant zone doesn't populate anything
with requests_mock() as mock:
mock.get(ANY, status_code=404,
text='{"message": "Domain `foo.bar` not found"}')
zone = Zone('unit.tests.', [])
provider.populate(zone)
self.assertEquals(set(), zone.records)
# No diffs == no changes
with requests_mock() as mock:
base = 'https://api.dnsimple.com/v2/42/zones/unit.tests/' \
'records?page='
with open('tests/fixtures/dnsimple-page-1.json') as fh:
mock.get('{}{}'.format(base, 1), text=fh.read())
with open('tests/fixtures/dnsimple-page-2.json') as fh:
mock.get('{}{}'.format(base, 2), text=fh.read())
zone = Zone('unit.tests.', [])
provider.populate(zone)
self.assertEquals(14, len(zone.records))
changes = self.expected.changes(zone, provider)
self.assertEquals(0, len(changes))
# 2nd populate makes no network calls/all from cache
again = Zone('unit.tests.', [])
provider.populate(again)
self.assertEquals(14, len(again.records))
# bust the cache
del provider._zone_records[zone.name]
# test handling of invalid content
with requests_mock() as mock:
with open('tests/fixtures/dnsimple-invalid-content.json') as fh:
mock.get(ANY, text=fh.read())
zone = Zone('unit.tests.', [])
provider.populate(zone)
self.assertEquals(set([
Record.new(zone, '', {
'ttl': 3600,
'type': 'SSHFP',
'values': []
}),
Record.new(zone, '_srv._tcp', {
'ttl': 600,
'type': 'SRV',
'values': []
}),
Record.new(zone, 'naptr', {
'ttl': 600,
'type': 'NAPTR',
'values': []
}),
]), zone.records)
def test_apply(self):
provider = DnsimpleProvider('test', 'token', 42)
resp = Mock()
resp.json = Mock()
provider._client._request = Mock(return_value=resp)
# non-existant domain, create everything
resp.json.side_effect = [
DnsimpleClientNotFound, # no zone in populate
DnsimpleClientNotFound, # no domain during apply
]
plan = provider.plan(self.expected)
# No root NS
n = len(self.expected.records) - 1
self.assertEquals(n, len(plan.changes))
self.assertEquals(n, provider.apply(plan))
provider._client._request.assert_has_calls([
# created the domain
call('POST', '/domains', data={'name': 'unit.tests'}),
# created at least one of the record with expected data
call('POST', '/zones/unit.tests/records', data={
'content': '20 30 foo-1.unit.tests.',
'priority': 10,
'type': 'SRV',
'name': '_srv._tcp',
'ttl': 600
}),
])
# expected number of total calls
self.assertEquals(25, provider._client._request.call_count)
provider._client._request.reset_mock()
# delete 1 and update 1
provider._client.records = Mock(return_value=[
{
'id': 11189897,
'name': 'www',
'content': '1.2.3.4',
'ttl': 300,
'type': 'A',
},
{
'id': 11189898,
'name': 'www',
'content': '2.2.3.4',
'ttl': 300,
'type': 'A',
},
{
'id': 11189899,
'name': 'ttl',
'content': '3.2.3.4',
'ttl': 600,
'type': 'A',
}
])
# Domain exists, we don't care about return
resp.json.side_effect = ['{}']
wanted = Zone('unit.tests.', [])
wanted.add_record(Record.new(wanted, 'ttl', {
'ttl': 300,
'type': 'A',
'value': '3.2.3.4'
}))
plan = provider.plan(wanted)
self.assertEquals(2, len(plan.changes))
self.assertEquals(2, provider.apply(plan))
# recreate for update, and deletes for the 2 parts of the other
provider._client._request.assert_has_calls([
call('POST', '/zones/unit.tests/records', data={
'content': '3.2.3.4',
'type': 'A',
'name': 'ttl',
'ttl': 300
}),
call('DELETE', '/zones/unit.tests/records/11189899'),
call('DELETE', '/zones/unit.tests/records/11189897'),
call('DELETE', '/zones/unit.tests/records/11189898')
])

+ 1155
- 0
tests/test_octodns_provider_dyn.py
File diff suppressed because it is too large
View File


+ 290
- 0
tests/test_octodns_provider_powerdns.py View File

@ -0,0 +1,290 @@
#
#
#
from __future__ import absolute_import, division, print_function, \
unicode_literals
from json import loads, dumps
from os.path import dirname, join
from requests import HTTPError
from requests_mock import ANY, mock as requests_mock
from unittest import TestCase
from octodns.record import Record
from octodns.provider.powerdns import PowerDnsProvider
from octodns.provider.yaml import YamlProvider
from octodns.zone import Zone
EMPTY_TEXT = '''
{
"account": "",
"dnssec": false,
"id": "xunit.tests.",
"kind": "Master",
"last_check": 0,
"masters": [],
"name": "xunit.tests.",
"notified_serial": 0,
"rrsets": [],
"serial": 2017012801,
"soa_edit": "",
"soa_edit_api": "INCEPTION-INCREMENT",
"url": "api/v1/servers/localhost/zones/xunit.tests."
}
'''
with open('./tests/fixtures/powerdns-full-data.json') as fh:
FULL_TEXT = fh.read()
class TestPowerDnsProvider(TestCase):
def test_provider(self):
provider = PowerDnsProvider('test', 'non.existant', 'api-key',
nameserver_values=['8.8.8.8.',
'9.9.9.9.'])
# Bad auth
with requests_mock() as mock:
mock.get(ANY, status_code=401, text='Unauthorized')
with self.assertRaises(Exception) as ctx:
zone = Zone('unit.tests.', [])
provider.populate(zone)
print(ctx.exception.message)
self.assertTrue('unauthorized' in ctx.exception.message)
# General error
with requests_mock() as mock:
mock.get(ANY, status_code=502, text='Things caught fire')
with self.assertRaises(HTTPError) as ctx:
zone = Zone('unit.tests.', [])
provider.populate(zone)
self.assertEquals(502, ctx.exception.response.status_code)
# Non-existant zone doesn't populate anything
with requests_mock() as mock:
mock.get(ANY, status_code=422,
json={'error': "Could not find domain 'unit.tests.'"})
zone = Zone('unit.tests.', [])
provider.populate(zone)
self.assertEquals(set(), zone.records)
# The rest of this is messy/complicated b/c it's dealing with mocking
expected = Zone('unit.tests.', [])
source = YamlProvider('test', join(dirname(__file__), 'config'))
source.populate(expected)
self.assertEquals(14, len(expected.records))
# No diffs == no changes
with requests_mock() as mock:
mock.get(ANY, status_code=200, text=FULL_TEXT)
zone = Zone('unit.tests.', [])
provider.populate(zone)
self.assertEquals(14, len(zone.records))
changes = expected.changes(zone, provider)
self.assertEquals(0, len(changes))
# Used in a minute
def assert_rrsets_callback(request, context):
data = loads(request.body)
self.assertEquals(len(expected.records), len(data['rrsets']))
return ''
# No existing records -> creates for every record in expected
with requests_mock() as mock:
mock.get(ANY, status_code=200, text=EMPTY_TEXT)
# post 201, is reponse to the create with data
mock.patch(ANY, status_code=201, text=assert_rrsets_callback)
plan = provider.plan(expected)
self.assertEquals(len(expected.records), len(plan.changes))
self.assertEquals(len(expected.records), provider.apply(plan))
# Non-existent zone -> creates for every record in expected
# OMG this is fucking ugly, probably better to ditch requests_mocks and
# just mock things for real as it doesn't seem to provide a way to get
# at the request params or verify that things were called from what I
# can tell
not_found = {'error': "Could not find domain 'unit.tests.'"}
with requests_mock() as mock:
# get 422's, unknown zone
mock.get(ANY, status_code=422, text='')
# patch 422's, unknown zone
mock.patch(ANY, status_code=422, text=dumps(not_found))
# post 201, is reponse to the create with data
mock.post(ANY, status_code=201, text=assert_rrsets_callback)
plan = provider.plan(expected)
self.assertEquals(len(expected.records), len(plan.changes))
self.assertEquals(len(expected.records), provider.apply(plan))
with requests_mock() as mock:
# get 422's, unknown zone
mock.get(ANY, status_code=422, text='')
# patch 422's,
data = {'error': "Key 'name' not present or not a String"}
mock.patch(ANY, status_code=422, text=dumps(data))
with self.assertRaises(HTTPError) as ctx:
plan = provider.plan(expected)
provider.apply(plan)
response = ctx.exception.response
self.assertEquals(422, response.status_code)
self.assertTrue('error' in response.json())
with requests_mock() as mock:
# get 422's, unknown zone
mock.get(ANY, status_code=422, text='')
# patch 500's, things just blew up
mock.patch(ANY, status_code=500, text='')
with self.assertRaises(HTTPError):
plan = provider.plan(expected)
provider.apply(plan)
with requests_mock() as mock:
# get 422's, unknown zone
mock.get(ANY, status_code=422, text='')
# patch 500's, things just blew up
mock.patch(ANY, status_code=422, text=dumps(not_found))
# post 422's, something wrong with create
mock.post(ANY, status_code=422, text='Hello Word!')
with self.assertRaises(HTTPError):
plan = provider.plan(expected)
provider.apply(plan)
def test_small_change(self):
provider = PowerDnsProvider('test', 'non.existant', 'api-key')
expected = Zone('unit.tests.', [])
source = YamlProvider('test', join(dirname(__file__), 'config'))
source.populate(expected)
self.assertEquals(14, len(expected.records))
# A small change to a single record
with requests_mock() as mock:
mock.get(ANY, status_code=200, text=FULL_TEXT)
missing = Zone(expected.name, [])
# Find and delete the SPF record
for record in expected.records:
if record._type != 'SPF':
missing.add_record(record)
def assert_delete_callback(request, context):
self.assertEquals({
'rrsets': [{
'records': [
{'content': '"v=spf1 ip4:192.168.0.1/16-all"',
'disabled': False}
],
'changetype': 'DELETE',
'type': 'SPF',
'name': 'spf.unit.tests.',
'ttl': 600
}]
}, loads(request.body))
return ''
mock.patch(ANY, status_code=201, text=assert_delete_callback)
plan = provider.plan(missing)
self.assertEquals(1, len(plan.changes))
self.assertEquals(1, provider.apply(plan))
def test_existing_nameservers(self):
ns_values = ['8.8.8.8.', '9.9.9.9.']
provider = PowerDnsProvider('test', 'non.existant', 'api-key',
nameserver_values=ns_values)
expected = Zone('unit.tests.', [])
ns_record = Record.new(expected, '', {
'type': 'NS',
'ttl': 600,
'values': ns_values
})
expected.add_record(ns_record)
# no changes
with requests_mock() as mock:
data = {
'rrsets': [{
'comments': [],
'name': 'unit.tests.',
'records': [
{
'content': '8.8.8.8.',
'disabled': False
},
{
'content': '9.9.9.9.',
'disabled': False
}
],
'ttl': 600,
'type': 'NS'
}, {
'comments': [],
'name': 'unit.tests.',
'records': [{
'content': '1.2.3.4',
'disabled': False,
}],
'ttl': 60,
'type': 'A'
}]
}
mock.get(ANY, status_code=200, json=data)
unrelated_record = Record.new(expected, '', {
'type': 'A',
'ttl': 60,
'value': '1.2.3.4'
})
expected.add_record(unrelated_record)
plan = provider.plan(expected)
self.assertFalse(plan)
# remove it now that we don't need the unrelated change any longer
expected.records.remove(unrelated_record)
# ttl diff
with requests_mock() as mock:
data = {
'rrsets': [{
'comments': [],
'name': 'unit.tests.',
'records': [
{
'content': '8.8.8.8.',
'disabled': False
},
{
'content': '9.9.9.9.',
'disabled': False
},
],
'ttl': 3600,
'type': 'NS'
}]
}
mock.get(ANY, status_code=200, json=data)
plan = provider.plan(expected)
self.assertEquals(1, len(plan.changes))
# create
with requests_mock() as mock:
data = {
'rrsets': []
}
mock.get(ANY, status_code=200, json=data)
plan = provider.plan(expected)
self.assertEquals(1, len(plan.changes))

+ 1145
- 0
tests/test_octodns_provider_route53.py
File diff suppressed because it is too large
View File


+ 111
- 0
tests/test_octodns_provider_yaml.py View File

@ -0,0 +1,111 @@
#
#
#
from __future__ import absolute_import, division, print_function, \
unicode_literals
from os.path import dirname, isfile, join
from unittest import TestCase
from yaml import safe_load
from yaml.constructor import ConstructorError
from octodns.record import Create
from octodns.provider.yaml import YamlProvider
from octodns.zone import SubzoneRecordException, Zone
from helpers import TemporaryDirectory
class TestYamlProvider(TestCase):
def test_provider(self):
source = YamlProvider('test', join(dirname(__file__), 'config'))
zone = Zone('unit.tests.', [])
# With target we don't add anything
source.populate(zone, target=source)
self.assertEquals(0, len(zone.records))
# without it we see everything
source.populate(zone)
self.assertEquals(14, len(zone.records))
# Assumption here is that a clean round-trip means that everything
# worked as expected, data that went in came back out and could be
# pulled in yet again and still match up. That assumes that the input
# data completely exercises things. This assumption can be tested by
# relatively well by running
# ./script/coverage tests/test_octodns_provider_yaml.py and
# looking at the coverage file
# ./htmlcov/octodns_provider_yaml_py.html
with TemporaryDirectory() as td:
# Add some subdirs to make sure that it can create them
directory = join(td.dirname, 'sub', 'dir')
yaml_file = join(directory, 'unit.tests.yaml')
target = YamlProvider('test', directory)
# We add everything
plan = target.plan(zone)
self.assertEquals(13, len(filter(lambda c: isinstance(c, Create),
plan.changes)))
self.assertFalse(isfile(yaml_file))
# Now actually do it
self.assertEquals(13, target.apply(plan))
self.assertTrue(isfile(yaml_file))
# There should be no changes after the round trip
reloaded = Zone('unit.tests.', [])
target.populate(reloaded)
self.assertFalse(zone.changes(reloaded, target=source))
# A 2nd sync should still create everything
plan = target.plan(zone)
self.assertEquals(13, len(filter(lambda c: isinstance(c, Create),
plan.changes)))
with open(yaml_file) as fh:
data = safe_load(fh.read())
# these are stored as plural 'values'
for r in data['']:
self.assertTrue('values' in r)
self.assertTrue('values' in data['mx'])
self.assertTrue('values' in data['naptr'])
self.assertTrue('values' in data['_srv._tcp'])
self.assertTrue('values' in data['txt'])
# these are stored as singular 'value'
self.assertTrue('value' in data['aaaa'])
self.assertTrue('value' in data['ptr'])
self.assertTrue('value' in data['spf'])
self.assertTrue('value' in data['www'])
def test_empty(self):
source = YamlProvider('test', join(dirname(__file__), 'config'))
zone = Zone('empty.', [])
# without it we see everything
source.populate(zone)
self.assertEquals(0, len(zone.records))
def test_unsorted(self):
source = YamlProvider('test', join(dirname(__file__), 'config'))
zone = Zone('unordered.', [])
with self.assertRaises(ConstructorError):
source.populate(zone)
def test_subzone_handling(self):
source = YamlProvider('test', join(dirname(__file__), 'config'))
# If we add `sub` as a sub-zone we'll reject `www.sub`
zone = Zone('unit.tests.', ['sub'])
with self.assertRaises(SubzoneRecordException) as ctx:
source.populate(zone)
self.assertEquals('Record www.sub.unit.tests. is under a managed '
'subzone', ctx.exception.message)

+ 765
- 0
tests/test_octodns_record.py View File

@ -0,0 +1,765 @@
#
#
#
from __future__ import absolute_import, division, print_function, \
unicode_literals
from unittest import TestCase
from octodns.record import ARecord, AaaaRecord, CnameRecord, Create, Delete, \
GeoValue, MxRecord, NaptrRecord, NaptrValue, NsRecord, PtrRecord, Record, \
SshfpRecord, SpfRecord, SrvRecord, TxtRecord, Update
from octodns.zone import Zone
from helpers import GeoProvider, SimpleProvider
class TestRecord(TestCase):
zone = Zone('unit.tests.', [])
def test_lowering(self):
record = ARecord(self.zone, 'MiXeDcAsE', {
'ttl': 30,
'type': 'A',
'value': '1.2.3.4',
})
self.assertEquals('mixedcase', record.name)
def test_a_and_record(self):
a_values = ['1.2.3.4', '2.2.3.4']
a_data = {'ttl': 30, 'values': a_values}
a = ARecord(self.zone, 'a', a_data)
self.assertEquals('a', a.name)
self.assertEquals('a.unit.tests.', a.fqdn)
self.assertEquals(30, a.ttl)
self.assertEquals(a_values, a.values)
self.assertEquals(a_data, a.data)
b_value = '3.2.3.4'
b_data = {'ttl': 30, 'value': b_value}
b = ARecord(self.zone, 'b', b_data)
self.assertEquals([b_value], b.values)
self.assertEquals(b_data, b.data)
# missing ttl
with self.assertRaises(Exception) as ctx:
ARecord(self.zone, None, {'value': '1.1.1.1'})
self.assertTrue('missing ttl' in ctx.exception.message)
# missing values & value
with self.assertRaises(Exception) as ctx:
ARecord(self.zone, None, {'ttl': 42})
self.assertTrue('missing value(s)' in ctx.exception.message)
# top-level
data = {'ttl': 30, 'value': '4.2.3.4'}
self.assertEquals(self.zone.name, ARecord(self.zone, '', data).fqdn)
self.assertEquals(self.zone.name, ARecord(self.zone, None, data).fqdn)
# ARecord equate with itself
self.assertTrue(a == a)
# Records with differing names and same type don't equate
self.assertFalse(a == b)
# Records with same name & type equate even if ttl is different
self.assertTrue(a == ARecord(self.zone, 'a',
{'ttl': 31, 'values': a_values}))
# Records with same name & type equate even if values are different
self.assertTrue(a == ARecord(self.zone, 'a',
{'ttl': 30, 'value': b_value}))
target = SimpleProvider()
# no changes if self
self.assertFalse(a.changes(a, target))
# no changes if clone
other = ARecord(self.zone, 'a', {'ttl': 30, 'values': a_values})
self.assertFalse(a.changes(other, target))
# changes if ttl modified
other.ttl = 31
update = a.changes(other, target)
self.assertEquals(a, update.existing)
self.assertEquals(other, update.new)
# changes if values modified
other.ttl = a.ttl
other.values = ['4.4.4.4']
update = a.changes(other, target)
self.assertEquals(a, update.existing)
self.assertEquals(other, update.new)
# Hashing
records = set()
records.add(a)
self.assertTrue(a in records)
self.assertFalse(b in records)
records.add(b)
self.assertTrue(b in records)
# __repr__ doesn't blow up
a.__repr__()
# Record.__repr__ does
with self.assertRaises(NotImplementedError):
class DummyRecord(Record):
def __init__(self):
pass
DummyRecord().__repr__()
def test_invalid_a(self):
with self.assertRaises(Exception) as ctx:
ARecord(self.zone, 'a', {
'ttl': 30,
'value': 'foo',
})
self.assertTrue('Invalid record' in ctx.exception.message)
with self.assertRaises(Exception) as ctx:
ARecord(self.zone, 'a', {
'ttl': 30,
'values': ['1.2.3.4', 'bar'],
})
self.assertTrue('Invalid record' in ctx.exception.message)
def test_geo(self):
geo_data = {'ttl': 42, 'values': ['5.2.3.4', '6.2.3.4'],
'geo': {'AF': ['1.1.1.1'],
'AS-JP': ['2.2.2.2', '3.3.3.3'],
'NA-US': ['4.4.4.4', '5.5.5.5'],
'NA-US-CA': ['6.6.6.6', '7.7.7.7']}}
geo = ARecord(self.zone, 'geo', geo_data)
self.assertEquals(geo_data, geo.data)
other_data = {'ttl': 42, 'values': ['5.2.3.4', '6.2.3.4'],
'geo': {'AF': ['1.1.1.1'],
'AS-JP': ['2.2.2.2', '3.3.3.3'],
'NA-US': ['4.4.4.4', '5.5.5.5'],
'NA-US-CA': ['6.6.6.6', '7.7.7.7']}}
other = ARecord(self.zone, 'geo', other_data)
self.assertEquals(other_data, other.data)
simple_target = SimpleProvider()
geo_target = GeoProvider()
# Geo provider doesn't consider identical geo to be changes
self.assertFalse(geo.changes(geo, geo_target))
# geo values don't impact equality
other.geo['AF'].values = ['9.9.9.9']
self.assertTrue(geo == other)
# Non-geo supporting provider doesn't consider geo diffs to be changes
self.assertFalse(geo.changes(other, simple_target))
# Geo provider does consider geo diffs to be changes
self.assertTrue(geo.changes(other, geo_target))
# Object without geo doesn't impact equality
other.geo = {}
self.assertTrue(geo == other)
# Non-geo supporting provider doesn't consider lack of geo a diff
self.assertFalse(geo.changes(other, simple_target))
# Geo provider does consider lack of geo diffs to be changes
self.assertTrue(geo.changes(other, geo_target))
# invalid geo code
with self.assertRaises(Exception) as ctx:
ARecord(self.zone, 'geo', {'ttl': 42,
'values': ['5.2.3.4', '6.2.3.4'],
'geo': {'abc': ['1.1.1.1']}})
self.assertEquals('Invalid geo "abc"', ctx.exception.message)
with self.assertRaises(Exception) as ctx:
ARecord(self.zone, 'geo', {'ttl': 42,
'values': ['5.2.3.4', '6.2.3.4'],
'geo': {'NA-US': ['1.1.1']}})
self.assertTrue('not a valid ip' in ctx.exception.message)
# __repr__ doesn't blow up
geo.__repr__()
def assertMultipleValues(self, _type, a_values, b_value):
a_data = {'ttl': 30, 'values': a_values}
a = _type(self.zone, 'a', a_data)
self.assertEquals('a', a.name)
self.assertEquals('a.unit.tests.', a.fqdn)
self.assertEquals(30, a.ttl)
self.assertEquals(a_values, a.values)
self.assertEquals(a_data, a.data)
b_data = {'ttl': 30, 'value': b_value}
b = _type(self.zone, 'b', b_data)
self.assertEquals([b_value], b.values)
self.assertEquals(b_data, b.data)
# missing values & value
with self.assertRaises(Exception) as ctx:
_type(self.zone, None, {'ttl': 42})
self.assertTrue('missing value(s)' in ctx.exception.message)
def test_aaaa(self):
a_values = ['2001:0db8:3c4d:0015:0000:0000:1a2f:1a2b',
'2001:0db8:3c4d:0015:0000:0000:1a2f:1a3b']
b_value = '2001:0db8:3c4d:0015:0000:0000:1a2f:1a4b'
self.assertMultipleValues(AaaaRecord, a_values, b_value)
with self.assertRaises(Exception) as ctx:
AaaaRecord(self.zone, 'a', {
'ttl': 30,
'value': 'foo',
})
self.assertTrue('Invalid record' in ctx.exception.message)
with self.assertRaises(Exception) as ctx:
AaaaRecord(self.zone, 'a', {
'ttl': 30,
'values': [b_value, 'bar'],
})
self.assertTrue('Invalid record' in ctx.exception.message)
def assertSingleValue(self, _type, a_value, b_value):
a_data = {'ttl': 30, 'value': a_value}
a = _type(self.zone, 'a', a_data)
self.assertEquals('a', a.name)
self.assertEquals('a.unit.tests.', a.fqdn)
self.assertEquals(30, a.ttl)
self.assertEquals(a_value, a.value)
self.assertEquals(a_data, a.data)
b_data = {'ttl': 30, 'value': b_value}
b = _type(self.zone, 'b', b_data)
self.assertEquals(b_value, b.value)
self.assertEquals(b_data, b.data)
# missing value
with self.assertRaises(Exception) as ctx:
_type(self.zone, None, {'ttl': 42})
self.assertTrue('missing value' in ctx.exception.message)
target = SimpleProvider()
# No changes with self
self.assertFalse(a.changes(a, target))
# Diff in value causes change
other = _type(self.zone, 'a', {'ttl': 30, 'value': b_value})
change = a.changes(other, target)
self.assertEqual(change.existing, a)
self.assertEqual(change.new, other)
# __repr__ doesn't blow up
a.__repr__()
def test_cname(self):
self.assertSingleValue(CnameRecord, 'target.foo.com.',
'other.foo.com.')
with self.assertRaises(Exception) as ctx:
CnameRecord(self.zone, 'a', {
'ttl': 30,
'value': 'foo',
})
self.assertTrue('Invalid record' in ctx.exception.message)
with self.assertRaises(Exception) as ctx:
CnameRecord(self.zone, 'a', {
'ttl': 30,
'values': ['foo.com.', 'bar.com'],
})
self.assertTrue('Invalid record' in ctx.exception.message)
def test_mx(self):
a_values = [{
'priority': 10,
'value': 'smtp1'
}, {
'priority': 20,
'value': 'smtp2'
}]
a_data = {'ttl': 30, 'values': a_values}
a = MxRecord(self.zone, 'a', a_data)
self.assertEquals('a', a.name)
self.assertEquals('a.unit.tests.', a.fqdn)
self.assertEquals(30, a.ttl)
self.assertEquals(a_values[0]['priority'], a.values[0].priority)
self.assertEquals(a_values[0]['value'], a.values[0].value)
self.assertEquals(a_values[1]['priority'], a.values[1].priority)
self.assertEquals(a_values[1]['value'], a.values[1].value)
self.assertEquals(a_data, a.data)
b_value = {
'priority': 12,
'value': 'smtp3',
}
b_data = {'ttl': 30, 'value': b_value}
b = MxRecord(self.zone, 'b', b_data)
self.assertEquals(b_value['priority'], b.values[0].priority)
self.assertEquals(b_value['value'], b.values[0].value)
self.assertEquals(b_data, b.data)
# missing value
with self.assertRaises(Exception) as ctx:
MxRecord(self.zone, None, {'ttl': 42})
self.assertTrue('missing value(s)' in ctx.exception.message)
# invalid value
with self.assertRaises(Exception) as ctx:
MxRecord(self.zone, None, {'ttl': 42, 'value': {}})
self.assertTrue('Invalid value' in ctx.exception.message)
target = SimpleProvider()
# No changes with self
self.assertFalse(a.changes(a, target))
# Diff in priority causes change
other = MxRecord(self.zone, 'a', {'ttl': 30, 'values': a_values})
other.values[0].priority = 22
change = a.changes(other, target)
self.assertEqual(change.existing, a)
self.assertEqual(change.new, other)
# Diff in value causes change
other.values[0].priority = a.values[0].priority
other.values[0].value = 'smtpX'
change = a.changes(other, target)
self.assertEqual(change.existing, a)
self.assertEqual(change.new, other)
# __repr__ doesn't blow up
a.__repr__()
def test_naptr(self):
a_values = [{
'order': 10,
'preference': 11,
'flags': 'X',
'service': 'Y',
'regexp': 'Z',
'replacement': '.',
}, {
'order': 20,
'preference': 21,
'flags': 'A',
'service': 'B',
'regexp': 'C',
'replacement': 'foo.com',
}]
a_data = {'ttl': 30, 'values': a_values}
a = NaptrRecord(self.zone, 'a', a_data)
self.assertEquals('a', a.name)
self.assertEquals('a.unit.tests.', a.fqdn)
self.assertEquals(30, a.ttl)
for i in (0, 1):
for k in a_values[0].keys():
self.assertEquals(a_values[i][k], getattr(a.values[i], k))
self.assertEquals(a_data, a.data)
b_value = {
'order': 30,
'preference': 31,
'flags': 'M',
'service': 'N',
'regexp': 'O',
'replacement': 'x',
}
b_data = {'ttl': 30, 'value': b_value}
b = NaptrRecord(self.zone, 'b', b_data)
for k in a_values[0].keys():
self.assertEquals(b_value[k], getattr(b.values[0], k))
self.assertEquals(b_data, b.data)
# missing value
with self.assertRaises(Exception) as ctx:
NaptrRecord(self.zone, None, {'ttl': 42})
self.assertTrue('missing value' in ctx.exception.message)
# invalid value
with self.assertRaises(Exception) as ctx:
NaptrRecord(self.zone, None, {'ttl': 42, 'value': {}})
self.assertTrue('Invalid value' in ctx.exception.message)
target = SimpleProvider()
# No changes with self
self.assertFalse(a.changes(a, target))
# Diff in priority causes change
other = NaptrRecord(self.zone, 'a', {'ttl': 30, 'values': a_values})
other.values[0].order = 22
change = a.changes(other, target)
self.assertEqual(change.existing, a)
self.assertEqual(change.new, other)
# Diff in replacement causes change
other.values[0].order = a.values[0].order
other.values[0].replacement = 'smtpX'
change = a.changes(other, target)
self.assertEqual(change.existing, a)
self.assertEqual(change.new, other)
# full sorting
# equivilent
b_naptr_value = b.values[0]
self.assertEquals(0, b_naptr_value.__cmp__(b_naptr_value))
# by order
self.assertEquals(1, b_naptr_value.__cmp__(NaptrValue({
'order': 10,
'preference': 31,
'flags': 'M',
'service': 'N',
'regexp': 'O',
'replacement': 'x',
})))
self.assertEquals(-1, b_naptr_value.__cmp__(NaptrValue({
'order': 40,
'preference': 31,
'flags': 'M',
'service': 'N',
'regexp': 'O',
'replacement': 'x',
})))
# by preference
self.assertEquals(1, b_naptr_value.__cmp__(NaptrValue({
'order': 30,
'preference': 10,
'flags': 'M',
'service': 'N',
'regexp': 'O',
'replacement': 'x',
})))
self.assertEquals(-1, b_naptr_value.__cmp__(NaptrValue({
'order': 30,
'preference': 40,
'flags': 'M',
'service': 'N',
'regexp': 'O',
'replacement': 'x',
})))
# by flags
self.assertEquals(1, b_naptr_value.__cmp__(NaptrValue({
'order': 30,
'preference': 31,
'flags': 'A',
'service': 'N',
'regexp': 'O',
'replacement': 'x',
})))
self.assertEquals(-1, b_naptr_value.__cmp__(NaptrValue({
'order': 30,
'preference': 31,
'flags': 'Z',
'service': 'N',
'regexp': 'O',
'replacement': 'x',
})))
# by service
self.assertEquals(1, b_naptr_value.__cmp__(NaptrValue({
'order': 30,
'preference': 31,
'flags': 'M',
'service': 'A',
'regexp': 'O',
'replacement': 'x',
})))
self.assertEquals(-1, b_naptr_value.__cmp__(NaptrValue({
'order': 30,
'preference': 31,
'flags': 'M',
'service': 'Z',
'regexp': 'O',
'replacement': 'x',
})))
# by regexp
self.assertEquals(1, b_naptr_value.__cmp__(NaptrValue({
'order': 30,
'preference': 31,
'flags': 'M',
'service': 'N',
'regexp': 'A',
'replacement': 'x',
})))
self.assertEquals(-1, b_naptr_value.__cmp__(NaptrValue({
'order': 30,
'preference': 31,
'flags': 'M',
'service': 'N',
'regexp': 'Z',
'replacement': 'x',
})))
# by replacement
self.assertEquals(1, b_naptr_value.__cmp__(NaptrValue({
'order': 30,
'preference': 31,
'flags': 'M',
'service': 'N',
'regexp': 'O',
'replacement': 'a',
})))
self.assertEquals(-1, b_naptr_value.__cmp__(NaptrValue({
'order': 30,
'preference': 31,
'flags': 'M',
'service': 'N',
'regexp': 'O',
'replacement': 'z',
})))
# __repr__ doesn't blow up
a.__repr__()
def test_ns(self):
a_values = ['5.6.7.8.', '6.7.8.9.', '7.8.9.0.']
a_data = {'ttl': 30, 'values': a_values}
a = NsRecord(self.zone, 'a', a_data)
self.assertEquals('a', a.name)
self.assertEquals('a.unit.tests.', a.fqdn)
self.assertEquals(30, a.ttl)
self.assertEquals(a_values, a.values)
self.assertEquals(a_data, a.data)
b_value = '9.8.7.6.'
b_data = {'ttl': 30, 'value': b_value}
b = NsRecord(self.zone, 'b', b_data)
self.assertEquals([b_value], b.values)
self.assertEquals(b_data, b.data)
# missing values & value
with self.assertRaises(Exception) as ctx:
NsRecord(self.zone, None, {'ttl': 42})
self.assertTrue('missing value(s)' in ctx.exception.message)
with self.assertRaises(Exception) as ctx:
NsRecord(self.zone, 'a', {
'ttl': 30,
'value': 'foo',
})
self.assertTrue('Invalid record' in ctx.exception.message)
with self.assertRaises(Exception) as ctx:
NsRecord(self.zone, 'a', {
'ttl': 30,
'values': ['foo.com.', 'bar.com'],
})
self.assertTrue('Invalid record' in ctx.exception.message)
def test_ptr(self):
self.assertSingleValue(PtrRecord, 'foo.bar.com.', 'other.bar.com.')
with self.assertRaises(Exception) as ctx:
PtrRecord(self.zone, 'a', {
'ttl': 30,
'value': 'foo',
})
self.assertTrue('Invalid record' in ctx.exception.message)
def test_sshfp(self):
a_values = [{
'algorithm': 10,
'fingerprint_type': 11,
'fingerprint': 'abc123',
}, {
'algorithm': 20,
'fingerprint_type': 21,
'fingerprint': 'def456',
}]
a_data = {'ttl': 30, 'values': a_values}
a = SshfpRecord(self.zone, 'a', a_data)
self.assertEquals('a', a.name)
self.assertEquals('a.unit.tests.', a.fqdn)
self.assertEquals(30, a.ttl)
self.assertEquals(a_values[0]['algorithm'], a.values[0].algorithm)
self.assertEquals(a_values[0]['fingerprint_type'],
a.values[0].fingerprint_type)
self.assertEquals(a_values[0]['fingerprint'], a.values[0].fingerprint)
self.assertEquals(a_data, a.data)
b_value = {
'algorithm': 30,
'fingerprint_type': 31,
'fingerprint': 'ghi789',
}
b_data = {'ttl': 30, 'value': b_value}
b = SshfpRecord(self.zone, 'b', b_data)
self.assertEquals(b_value['algorithm'], b.values[0].algorithm)
self.assertEquals(b_value['fingerprint_type'],
b.values[0].fingerprint_type)
self.assertEquals(b_value['fingerprint'], b.values[0].fingerprint)
self.assertEquals(b_data, b.data)
# missing value
with self.assertRaises(Exception) as ctx:
SshfpRecord(self.zone, None, {'ttl': 42})
self.assertTrue('missing value(s)' in ctx.exception.message)
# invalid value
with self.assertRaises(Exception) as ctx:
SshfpRecord(self.zone, None, {'ttl': 42, 'value': {}})
self.assertTrue('Invalid value' in ctx.exception.message)
target = SimpleProvider()
# No changes with self
self.assertFalse(a.changes(a, target))
# Diff in algorithm causes change
other = SshfpRecord(self.zone, 'a', {'ttl': 30, 'values': a_values})
other.values[0].algorithm = 22
change = a.changes(other, target)
self.assertEqual(change.existing, a)
self.assertEqual(change.new, other)
# Diff in fingerprint_type causes change
other = SshfpRecord(self.zone, 'a', {'ttl': 30, 'values': a_values})
other.values[0].algorithm = a.values[0].algorithm
other.values[0].fingerprint_type = 22
change = a.changes(other, target)
self.assertEqual(change.existing, a)
self.assertEqual(change.new, other)
# Diff in fingerprint causes change
other = SshfpRecord(self.zone, 'a', {'ttl': 30, 'values': a_values})
other.values[0].fingerprint_type = a.values[0].fingerprint_type
other.values[0].fingerprint = 22
change = a.changes(other, target)
self.assertEqual(change.existing, a)
self.assertEqual(change.new, other)
# __repr__ doesn't blow up
a.__repr__()
def test_spf(self):
a_values = ['spf1 -all', 'spf1 -hrm']
b_value = 'spf1 -other'
self.assertMultipleValues(SpfRecord, a_values, b_value)
def test_srv(self):
a_values = [{
'priority': 10,
'weight': 11,
'port': 12,
'target': 'server1',
}, {
'priority': 20,
'weight': 21,
'port': 22,
'target': 'server2',
}]
a_data = {'ttl': 30, 'values': a_values}
a = SrvRecord(self.zone, '_a._tcp', a_data)
self.assertEquals('_a._tcp', a.name)
self.assertEquals('_a._tcp.unit.tests.', a.fqdn)
self.assertEquals(30, a.ttl)
self.assertEquals(a_values[0]['priority'], a.values[0].priority)
self.assertEquals(a_values[0]['weight'], a.values[0].weight)
self.assertEquals(a_values[0]['port'], a.values[0].port)
self.assertEquals(a_values[0]['target'], a.values[0].target)
self.assertEquals(a_data, a.data)
b_value = {
'priority': 30,
'weight': 31,
'port': 32,
'target': 'server3',
}
b_data = {'ttl': 30, 'value': b_value}
b = SrvRecord(self.zone, '_b._tcp', b_data)
self.assertEquals(b_value['priority'], b.values[0].priority)
self.assertEquals(b_value['weight'], b.values[0].weight)
self.assertEquals(b_value['port'], b.values[0].port)
self.assertEquals(b_value['target'], b.values[0].target)
self.assertEquals(b_data, b.data)
# invalid name
with self.assertRaises(Exception) as ctx:
SrvRecord(self.zone, 'bad', {'ttl': 42})
self.assertEquals('Invalid name bad.unit.tests.',
ctx.exception.message)
# missing value
with self.assertRaises(Exception) as ctx:
SrvRecord(self.zone, '_missing._tcp', {'ttl': 42})
self.assertTrue('missing value(s)' in ctx.exception.message)
# invalid value
with self.assertRaises(Exception) as ctx:
SrvRecord(self.zone, '_missing._udp', {'ttl': 42, 'value': {}})
self.assertTrue('Invalid value' in ctx.exception.message)
target = SimpleProvider()
# No changes with self
self.assertFalse(a.changes(a, target))
# Diff in priority causes change
other = SrvRecord(self.zone, '_a._icmp',
{'ttl': 30, 'values': a_values})
other.values[0].priority = 22
change = a.changes(other, target)
self.assertEqual(change.existing, a)
self.assertEqual(change.new, other)
# Diff in weight causes change
other.values[0].priority = a.values[0].priority
other.values[0].weight = 33
change = a.changes(other, target)
self.assertEqual(change.existing, a)
self.assertEqual(change.new, other)
# Diff in port causes change
other.values[0].weight = a.values[0].weight
other.values[0].port = 44
change = a.changes(other, target)
self.assertEqual(change.existing, a)
self.assertEqual(change.new, other)
# Diff in target causes change
other.values[0].port = a.values[0].port
other.values[0].target = 'serverX'
change = a.changes(other, target)
self.assertEqual(change.existing, a)
self.assertEqual(change.new, other)
# __repr__ doesn't blow up
a.__repr__()
def test_txt(self):
a_values = ['a one', 'a two']
b_value = 'b other'
self.assertMultipleValues(TxtRecord, a_values, b_value)
Record.new(self.zone, 'txt', {
'ttl': 44,
'type': 'TXT',
'value': 'escaped\; foo',
})
with self.assertRaises(Exception) as ctx:
Record.new(self.zone, 'txt', {
'ttl': 44,
'type': 'TXT',
'value': 'un-escaped; foo',
})
self.assertEquals('Invalid record txt.unit.tests., unescaped ;',
ctx.exception.message)
def test_record_new(self):
txt = Record.new(self.zone, 'txt', {
'ttl': 44,
'type': 'TXT',
'value': 'some text',
})
self.assertIsInstance(txt, TxtRecord)
self.assertEquals('TXT', txt._type)
self.assertEquals(['some text'], txt.values)
# Missing type
with self.assertRaises(Exception) as ctx:
Record.new(self.zone, 'unknown', {})
self.assertTrue('missing type' in ctx.exception.message)
# Unkown type
with self.assertRaises(Exception) as ctx:
Record.new(self.zone, 'unknown', {
'type': 'XXX',
})
self.assertTrue('Unknown record type' in ctx.exception.message)
def test_change(self):
existing = Record.new(self.zone, 'txt', {
'ttl': 44,
'type': 'TXT',
'value': 'some text',
})
new = Record.new(self.zone, 'txt', {
'ttl': 44,
'type': 'TXT',
'value': 'some change',
})
create = Create(new)
self.assertEquals(new.values, create.record.values)
update = Update(existing, new)
self.assertEquals(new.values, update.record.values)
delete = Delete(existing)
self.assertEquals(existing.values, delete.record.values)
def test_geo_value(self):
code = 'NA-US-CA'
values = ['1.2.3.4']
geo = GeoValue(code, values)
self.assertEquals(code, geo.code)
self.assertEquals('NA', geo.continent_code)
self.assertEquals('US', geo.country_code)
self.assertEquals('CA', geo.subdivision_code)
self.assertEquals(values, geo.values)
self.assertEquals(['NA-US', 'NA'], list(geo.parents))

+ 176
- 0
tests/test_octodns_source_tinydns.py View File

@ -0,0 +1,176 @@
#
#
#
from __future__ import absolute_import, division, print_function, \
unicode_literals
from unittest import TestCase
from octodns.record import Record
from octodns.source.tinydns import TinyDnsFileSource
from octodns.zone import Zone
from helpers import SimpleProvider
class TestTinyDnsFileSource(TestCase):
source = TinyDnsFileSource('test', './tests/zones')
def test_populate_normal(self):
got = Zone('example.com.', [])
self.source.populate(got)
self.assertEquals(11, len(got.records))
expected = Zone('example.com.', [])
for name, data in (
('', {
'type': 'A',
'ttl': 30,
'values': ['10.2.3.4', '10.2.3.5'],
}),
('sub', {
'type': 'NS',
'ttl': 30,
'values': ['ns1.ns.com.', 'ns2.ns.com.'],
}),
('www', {
'type': 'A',
'ttl': 3600,
'value': '10.2.3.6',
}),
('cname', {
'type': 'CNAME',
'ttl': 3600,
'value': 'www.example.com.',
}),
('some-host-abc123', {
'type': 'A',
'ttl': 1800,
'value': '10.2.3.7',
}),
('has-dup-def123', {
'type': 'A',
'ttl': 3600,
'value': '10.2.3.8',
}),
('www.sub', {
'type': 'A',
'ttl': 3600,
'value': '1.2.3.4',
}),
('has-dup-def456', {
'type': 'A',
'ttl': 3600,
'value': '10.2.3.8',
}),
('', {
'type': 'MX',
'ttl': 3600,
'values': [{
'priority': 10,
'value': 'smtp-1-host.example.com.',
}, {
'priority': 20,
'value': 'smtp-2-host.example.com.',
}]
}),
('smtp', {
'type': 'MX',
'ttl': 1800,
'values': [{
'priority': 30,
'value': 'smtp-1-host.example.com.',
}, {
'priority': 40,
'value': 'smtp-2-host.example.com.',
}]
}),
):
record = Record.new(expected, name, data)
expected.add_record(record)
changes = expected.changes(got, SimpleProvider())
self.assertEquals([], changes)
def test_populate_normal_sub1(self):
got = Zone('asdf.subtest.com.', [])
self.source.populate(got)
self.assertEquals(1, len(got.records))
expected = Zone('asdf.subtest.com.', [])
for name, data in (
('a3', {
'type': 'A',
'ttl': 3600,
'values': ['10.2.3.7'],
}),
):
record = Record.new(expected, name, data)
expected.add_record(record)
changes = expected.changes(got, SimpleProvider())
self.assertEquals([], changes)
def test_populate_normal_sub2(self):
got = Zone('blah-asdf.subtest.com.', [])
self.source.populate(got)
self.assertEquals(2, len(got.records))
expected = Zone('sub-asdf.subtest.com.', [])
for name, data in (
('a1', {
'type': 'A',
'ttl': 3600,
'values': ['10.2.3.5'],
}),
('a2', {
'type': 'A',
'ttl': 3600,
'values': ['10.2.3.6'],
}),
):
record = Record.new(expected, name, data)
expected.add_record(record)
changes = expected.changes(got, SimpleProvider())
self.assertEquals([], changes)
def test_populate_in_addr_arpa(self):
got = Zone('3.2.10.in-addr.arpa.', [])
self.source.populate(got)
expected = Zone('3.2.10.in-addr.arpa.', [])
for name, data in (
('10', {
'type': 'PTR',
'ttl': 3600,
'value': 'a-ptr.example.com.'
}),
('11', {
'type': 'PTR',
'ttl': 30,
'value': 'a-ptr-2.example.com.'
}),
('8', {
'type': 'PTR',
'ttl': 3600,
'value': 'has-dup-def123.example.com.'
}),
('7', {
'type': 'PTR',
'ttl': 1800,
'value': 'some-host-abc123.example.com.'
}),
):
record = Record.new(expected, name, data)
expected.add_record(record)
changes = expected.changes(got, SimpleProvider())
self.assertEquals([], changes)
def test_ignores_subs(self):
got = Zone('example.com.', ['sub'])
self.source.populate(got)
self.assertEquals(10, len(got.records))

+ 61
- 0
tests/test_octodns_yaml.py View File

@ -0,0 +1,61 @@
#
#
#
from __future__ import absolute_import, division, print_function, \
unicode_literals
from StringIO import StringIO
from unittest import TestCase
from yaml.constructor import ConstructorError
from octodns.yaml import safe_dump, safe_load
class TestYaml(TestCase):
def test_stuff(self):
self.assertEquals({
1: 'a',
2: 'b',
'3': 'c',
10: 'd',
'11': 'e',
}, safe_load('''
1: a
2: b
'3': c
10: d
'11': e
'''))
self.assertEquals({
'*.1.2': 'a',
'*.2.2': 'b',
'*.10.1': 'c',
'*.11.2': 'd',
}, safe_load('''
'*.1.2': 'a'
'*.2.2': 'b'
'*.10.1': 'c'
'*.11.2': 'd'
'''))
with self.assertRaises(ConstructorError) as ctx:
safe_load('''
'*.2.2': 'b'
'*.1.2': 'a'
'*.11.2': 'd'
'*.10.1': 'c'
''')
self.assertEquals('keys out of order: *.2.2, *.1.2, *.11.2, *.10.1',
ctx.exception.problem)
buf = StringIO()
safe_dump({
'*.1.1': 42,
'*.11.1': 43,
'*.2.1': 44,
}, buf)
self.assertEquals("---\n'*.1.1': 42\n'*.2.1': 44\n'*.11.1': 43\n",
buf.getvalue())

+ 174
- 0
tests/test_octodns_zone.py View File

@ -0,0 +1,174 @@
#
#
#
from __future__ import absolute_import, division, print_function, \
unicode_literals
from unittest import TestCase
from octodns.record import ARecord, AaaaRecord, Create, Delete, Record, Update
from octodns.zone import DuplicateRecordException, SubzoneRecordException, Zone
from helpers import SimpleProvider
class TestZone(TestCase):
def test_lowering(self):
zone = Zone('UniT.TEsTs.', [])
self.assertEquals('unit.tests.', zone.name)
def test_hostname_from_fqdn(self):
zone = Zone('unit.tests.', [])
for hostname, fqdn in (
('', 'unit.tests.'),
('', 'unit.tests'),
('foo', 'foo.unit.tests.'),
('foo', 'foo.unit.tests'),
('foo.bar', 'foo.bar.unit.tests.'),
('foo.bar', 'foo.bar.unit.tests'),
('foo.unit.tests', 'foo.unit.tests.unit.tests.'),
('foo.unit.tests', 'foo.unit.tests.unit.tests'),
):
self.assertEquals(hostname, zone.hostname_from_fqdn(fqdn))
def test_add_record(self):
zone = Zone('unit.tests.', [])
a = ARecord(zone, 'a', {'ttl': 42, 'value': '1.1.1.1'})
b = ARecord(zone, 'b', {'ttl': 42, 'value': '1.1.1.1'})
zone.add_record(a)
self.assertEquals(zone.records, set([a]))
# Can't add record with same name & type
with self.assertRaises(DuplicateRecordException) as ctx:
zone.add_record(a)
self.assertEquals('Duplicate record a.unit.tests., type A',
ctx.exception.message)
self.assertEquals(zone.records, set([a]))
# Can add dup name, with different type
zone.add_record(b)
self.assertEquals(zone.records, set([a, b]))
def test_changes(self):
before = Zone('unit.tests.', [])
a = ARecord(before, 'a', {'ttl': 42, 'value': '1.1.1.1'})
before.add_record(a)
b = AaaaRecord(before, 'b', {'ttl': 42, 'value': '1:1:1::1'})
before.add_record(b)
after = Zone('unit.tests.', [])
after.add_record(a)
after.add_record(b)
target = SimpleProvider()
# before == after -> no changes
self.assertFalse(before.changes(after, target))
# add a record, delete a record -> [Delete, Create]
c = ARecord(before, 'c', {'ttl': 42, 'value': '1.1.1.1'})
after.add_record(c)
after.records.remove(b)
self.assertEquals(after.records, set([a, c]))
changes = before.changes(after, target)
self.assertEquals(2, len(changes))
for change in changes:
if isinstance(change, Create):
create = change
elif isinstance(change, Delete):
delete = change
self.assertEquals(b, delete.existing)
self.assertFalse(delete.new)
self.assertEquals(c, create.new)
self.assertFalse(create.existing)
delete.__repr__()
create.__repr__()
after = Zone('unit.tests.', [])
changed = ARecord(before, 'a', {'ttl': 42, 'value': '2.2.2.2'})
after.add_record(changed)
after.add_record(b)
changes = before.changes(after, target)
self.assertEquals(1, len(changes))
update = changes[0]
self.assertIsInstance(update, Update)
# Using changes here to get a full equality
self.assertFalse(a.changes(update.existing, target))
self.assertFalse(changed.changes(update.new, target))
update.__repr__()
def test_unsupporting(self):
class NoAaaaProvider(object):
id = 'no-aaaa'
SUPPORTS_GEO = False
def supports(self, record):
return record._type != 'AAAA'
current = Zone('unit.tests.', [])
desired = Zone('unit.tests.', [])
a = ARecord(desired, 'a', {'ttl': 42, 'value': '1.1.1.1'})
desired.add_record(a)
aaaa = AaaaRecord(desired, 'b', {'ttl': 42, 'value': '1:1:1::1'})
desired.add_record(aaaa)
# Only create the supported A, not the AAAA
changes = current.changes(desired, NoAaaaProvider())
self.assertEquals(1, len(changes))
self.assertIsInstance(changes[0], Create)
# Only delete the supported A, not the AAAA
changes = desired.changes(current, NoAaaaProvider())
self.assertEquals(1, len(changes))
self.assertIsInstance(changes[0], Delete)
def test_missing_dot(self):
with self.assertRaises(Exception) as ctx:
Zone('not.allowed', [])
self.assertTrue('missing ending dot' in ctx.exception.message)
def test_sub_zones(self):
zone = Zone('unit.tests.', set(['sub', 'barred']))
# NS for exactly the sub is allowed
record = Record.new(zone, 'sub', {
'ttl': 3600,
'type': 'NS',
'values': ['1.2.3.4.', '2.3.4.5.'],
})
zone.add_record(record)
self.assertEquals(set([record]), zone.records)
# non-NS for exactly the sub is rejected
record = Record.new(zone, 'sub', {
'ttl': 3600,
'type': 'A',
'values': ['1.2.3.4', '2.3.4.5'],
})
with self.assertRaises(SubzoneRecordException) as ctx:
zone.add_record(record)
self.assertTrue('not of type NS', ctx.exception.message)
# NS for something below the sub is rejected
record = Record.new(zone, 'foo.sub', {
'ttl': 3600,
'type': 'NS',
'values': ['1.2.3.4.', '2.3.4.5.'],
})
with self.assertRaises(SubzoneRecordException) as ctx:
zone.add_record(record)
self.assertTrue('under a managed sub-zone', ctx.exception.message)
# A for something below the sub is rejected
record = Record.new(zone, 'foo.bar.sub', {
'ttl': 3600,
'type': 'A',
'values': ['1.2.3.4', '2.3.4.5'],
})
with self.assertRaises(SubzoneRecordException) as ctx:
zone.add_record(record)
self.assertTrue('under a managed sub-zone', ctx.exception.message)

BIN
tests/zones/.is-needed-for-tests View File


+ 48
- 0
tests/zones/example.com View File

@ -0,0 +1,48 @@
#
# This is a comment header
#
# Multi-value A
+example.com:10.2.3.4:30
+example.com:10.2.3.5:30
Ccname.other.foo:www.other.foo
# A+PTR
=some-host-abc123.example.com:10.2.3.7:1800
# A+PTR with duplicate address, fine for A's, but warning on PTRs
=has-dup-def123.example.com:10.2.3.8
=has-dup-def456.example.com:10.2.3.8
# A+PTR with an ip addr we ignore
=ignored.example.com:0.0.0.0
# PTR
^10.3.2.10.in-addr.arpa:a-ptr.example.com
^11.3.2.10.in-addr.arpa:a-ptr-2.example.com:30
# PTR in another zone we won't be interested in
^142.1.168.192.in-addr.arpa:a-ptr.example.com
# MX
@example.com::smtp-1-host.example.com:10
@example.com::smtp-2-host.example.com:20
# MX with ttl
@smtp.example.com::smtp-1-host.example.com:30:1800
@smtp.example.com::smtp-2-host.example.com:40:1800
# NS
.sub.example.com::ns1.ns.com:30
.sub.example.com::ns2.ns.com:30
# A, under sub
+www.sub.example.com::1.2.3.4
# Top-level NS
.example.com::ns1.ns.com
.example.com::ns2.ns.com
# sub special cases
+a1.blah-asdf.subtest.com:10.2.3.5
+a2.blah-asdf.subtest.com:10.2.3.6
+a3.asdf.subtest.com:10.2.3.7

+ 7
- 0
tests/zones/other.foo View File

@ -0,0 +1,7 @@
# Single value A without ttl
+www.example.com:10.2.3.6
# CNAME with trailing comment
Ccname.example.com:www.example.com # this is a comment
+www.other.foo:14.2.3.6

Loading…
Cancel
Save