Browse Source

Merge branch 'master' into patch-1

pull/565/head
Ross McFarland 5 years ago
committed by GitHub
parent
commit
e6e745d1b7
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
24 changed files with 2059 additions and 166 deletions
  1. +2
    -0
      README.md
  2. +2
    -0
      docs/dynamic_records.md
  3. +2
    -0
      docs/geo_records.md
  4. +15
    -4
      octodns/manager.py
  5. +58
    -39
      octodns/provider/ns1.py
  6. +28
    -6
      octodns/provider/route53.py
  7. +6
    -1
      octodns/provider/selectel.py
  8. +444
    -0
      octodns/provider/ultra.py
  9. +1
    -1
      octodns/record/__init__.py
  10. +100
    -0
      octodns/source/envvar.py
  11. +6
    -6
      requirements.txt
  12. +1
    -1
      setup.py
  13. +94
    -0
      tests/fixtures/ultra-records-page-1.json
  14. +34
    -0
      tests/fixtures/ultra-records-page-2.json
  15. +135
    -0
      tests/fixtures/ultra-zones-page-1.json
  16. +135
    -0
      tests/fixtures/ultra-zones-page-2.json
  17. +23
    -0
      tests/test_octodns_manager.py
  18. +303
    -87
      tests/test_octodns_provider_ns1.py
  19. +64
    -20
      tests/test_octodns_provider_route53.py
  20. +506
    -0
      tests/test_octodns_provider_ultra.py
  21. +2
    -1
      tests/test_octodns_record.py
  22. +14
    -0
      tests/test_octodns_source_axfr.py
  23. +41
    -0
      tests/test_octodns_source_envvar.py
  24. +43
    -0
      tests/zones/invalid.records.

+ 2
- 0
README.md View File

@ -187,6 +187,7 @@ The above command pulled the existing data out of Route53 and placed the results
| [DnsimpleProvider](/octodns/provider/dnsimple.py) | | All | No | CAA tags restricted |
| [DynProvider](/octodns/provider/dyn.py) | dyn | All | Both | |
| [EtcHostsProvider](/octodns/provider/etc_hosts.py) | | A, AAAA, ALIAS, CNAME | No | |
| [EnvVarSource](/octodns/source/envvar.py) | | TXT | No | read-only environment variable injection |
| [GoogleCloudProvider](/octodns/provider/googlecloud.py) | google-cloud-dns | A, AAAA, CAA, CNAME, MX, NAPTR, NS, PTR, SPF, SRV, TXT | No | |
| [MythicBeastsProvider](/octodns/provider/mythicbeasts.py) | Mythic Beasts | A, AAAA, ALIAS, CNAME, MX, NS, SRV, SSHFP, CAA, TXT | No | |
| [Ns1Provider](/octodns/provider/ns1.py) | ns1-python | All | Yes | No CNAME support, missing `NA` geo target |
@ -196,6 +197,7 @@ The above command pulled the existing data out of Route53 and placed the results
| [Route53](/octodns/provider/route53.py) | boto3 | A, AAAA, CAA, CNAME, MX, NAPTR, NS, PTR, SPF, SRV, TXT | Both | CNAME health checks don't support a Host header |
| [Selectel](/octodns/provider/selectel.py) | | A, AAAA, CNAME, MX, NS, SPF, SRV, TXT | No | |
| [Transip](/octodns/provider/transip.py) | transip | A, AAAA, CNAME, MX, SRV, SPF, TXT, SSHFP, CAA | No | |
| [UltraDns](/octodns/provider/ultra.py) | | A, AAAA, CAA, CNAME, MX, NS, PTR, SPF, SRV, TXT | No | |
| [AxfrSource](/octodns/source/axfr.py) | | A, AAAA, CNAME, MX, NS, PTR, SPF, SRV, TXT | No | read-only |
| [ZoneFileSource](/octodns/source/axfr.py) | | A, AAAA, CNAME, MX, NS, PTR, SPF, SRV, TXT | No | read-only |
| [TinyDnsFileSource](/octodns/source/tinydns.py) | | A, CNAME, MX, NS, PTR | No | read-only |


+ 2
- 0
docs/dynamic_records.md View File

@ -110,6 +110,7 @@ test:
| Key | Description | Default |
|--|--|--|
| measure_latency | Show latency in AWS console | true |
| request_interval | Healthcheck interval [10\|30] seconds | 10 |
```yaml
@ -123,4 +124,5 @@ test:
route53:
healthcheck:
measure_latency: false
request_interval: 30
```

+ 2
- 0
docs/geo_records.md View File

@ -86,6 +86,7 @@ test:
| Key | Description | Default |
|--|--|--|
| measure_latency | Show latency in AWS console | true |
| request_interval | Healthcheck interval [10\|30] seconds | 10 |
```yaml
---
@ -98,4 +99,5 @@ test:
route53:
healthcheck:
measure_latency: false
request_interval: 30
```

+ 15
- 4
octodns/manager.py View File

@ -8,6 +8,7 @@ from __future__ import absolute_import, division, print_function, \
from concurrent.futures import ThreadPoolExecutor
from importlib import import_module
from os import environ
from six import text_type
import logging
from .provider.base import BaseProvider
@ -221,13 +222,21 @@ class Manager(object):
self.log.debug('configured_sub_zones: subs=%s', sub_zone_names)
return set(sub_zone_names)
def _populate_and_plan(self, zone_name, sources, targets):
def _populate_and_plan(self, zone_name, sources, targets, lenient=False):
self.log.debug('sync: populating, zone=%s', zone_name)
self.log.debug('sync: populating, zone=%s, lenient=%s',
zone_name, lenient)
zone = Zone(zone_name,
sub_zones=self.configured_sub_zones(zone_name))
for source in sources:
source.populate(zone)
try:
source.populate(zone, lenient=lenient)
except TypeError as e:
if "keyword argument 'lenient'" not in text_type(e):
raise
self.log.warn(': provider %s does not accept lenient param',
source.__class__.__name__)
source.populate(zone)
self.log.debug('sync: planning, zone=%s', zone_name)
plans = []
@ -259,6 +268,7 @@ class Manager(object):
futures = []
for zone_name, config in zones:
self.log.info('sync: zone=%s', zone_name)
lenient = config.get('lenient', False)
try:
sources = config['sources']
except KeyError:
@ -308,7 +318,8 @@ class Manager(object):
.format(zone_name, target))
futures.append(self._executor.submit(self._populate_and_plan,
zone_name, sources, targets))
zone_name, sources,
targets, lenient=lenient))
# Wait on all results and unpack/flatten them in to a list of target &
# plan pairs.


+ 58
- 39
octodns/provider/ns1.py View File

@ -237,7 +237,6 @@ class Ns1Provider(BaseProvider):
'NS', 'PTR', 'SPF', 'SRV', 'TXT'))
ZONE_NOT_FOUND_MESSAGE = 'server error: zone not found'
CATCHALL_PREFIX = 'catchall__'
def _update_filter(self, filter, with_disabled):
if with_disabled:
@ -455,6 +454,16 @@ class Ns1Provider(BaseProvider):
data['geo'] = geo
return data
def _parse_dynamic_pool_name(self, pool_name):
if pool_name.startswith('catchall__'):
# Special case for the old-style catchall prefix
return pool_name[10:]
try:
pool_name, _ = pool_name.rsplit('__', 1)
except ValueError:
pass
return pool_name
def _data_for_dynamic_A(self, _type, record):
# First make sure we have the expected filters config
if not self._valid_filter_config(record['filters'], record['domain']):
@ -472,9 +481,8 @@ class Ns1Provider(BaseProvider):
for answer in record['answers']:
# region (group name in the UI) is the pool name
pool_name = answer['region']
# Get the actual pool name from the constructed pool name in case
# of the catchall
pool_name = pool_name.replace(self.CATCHALL_PREFIX, '')
# Get the actual pool name by removing the type
pool_name = self._parse_dynamic_pool_name(pool_name)
pool = pools[pool_name]
meta = answer['meta']
@ -501,15 +509,24 @@ class Ns1Provider(BaseProvider):
# tied to pools on the NS1 side, e.g. we can only have 1 rule per pool,
# that may eventually run into problems, but I don't have any use-cases
# examples currently where it would
rules = []
rules = {}
for pool_name, region in sorted(record['regions'].items()):
# Rules that refer to the catchall pool would have the
# CATCHALL_PREFIX in the pool name. Strip the prefix to get back
# the pool name as in the config
pool_name = pool_name.replace(self.CATCHALL_PREFIX, '')
# Get the actual pool name by removing the type
pool_name = self._parse_dynamic_pool_name(pool_name)
meta = region['meta']
notes = self._parse_notes(meta.get('note', ''))
rule_order = notes['rule-order']
try:
rule = rules[rule_order]
except KeyError:
rule = {
'pool': pool_name,
'_order': rule_order,
}
rules[rule_order] = rule
# The group notes field in the UI is a `note` on the region here,
# that's where we can find our pool's fallback.
if 'fallback' in notes:
@ -560,17 +577,15 @@ class Ns1Provider(BaseProvider):
for state in meta.get('us_state', []):
geos.add('NA-US-{}'.format(state))
rule = {
'pool': pool_name,
'_order': notes['rule-order'],
}
if geos:
rule['geos'] = sorted(geos)
rules.append(rule)
# There are geos, combine them with any existing geos for this
# pool and recorded the sorted unique set of them
rule['geos'] = sorted(set(rule.get('geos', [])) | geos)
# Order and convert to a list
default = sorted(default)
# Order
# Convert to list and order
rules = list(rules.values())
rules.sort(key=lambda r: (r['_order'], r['pool']))
return {
@ -1050,29 +1065,34 @@ class Ns1Provider(BaseProvider):
meta = {
'note': self._encode_notes(notes),
}
if georegion:
meta['georegion'] = sorted(georegion)
if country:
meta['country'] = sorted(country)
if us_state:
meta['us_state'] = sorted(us_state)
georegion_meta = dict(meta)
georegion_meta['georegion'] = sorted(georegion)
regions['{}__georegion'.format(pool_name)] = {
'meta': georegion_meta,
}
if country or us_state:
# If there's country and/or states its a country pool,
# countries and states can coexist as they're handled by the
# same step in the filterchain (countries and georegions
# cannot as they're seperate stages and run the risk of
# eliminating all options)
country_state_meta = dict(meta)
if country:
country_state_meta['country'] = sorted(country)
if us_state:
country_state_meta['us_state'] = sorted(us_state)
regions['{}__country'.format(pool_name)] = {
'meta': country_state_meta,
}
if not georegion and not country and not us_state:
# This is the catchall pool. Modify the pool name in the record
# being pushed
# NS1 regions are indexed by pool names. Any reuse of pool
# names in the rules will result in overwriting of the pool.
# Reuse of pools is in general disallowed but for the case of
# the catchall pool - to allow legitimate usecases.
# The pool name renaming is done to accommodate for such a
# reuse.
# (We expect only one catchall per record. Any associated
# validation is expected to covered under record validation)
pool_name = '{}{}'.format(self.CATCHALL_PREFIX, pool_name)
regions[pool_name] = {
'meta': meta,
}
# If there's no targeting it's a catchall
regions['{}__catchall'.format(pool_name)] = {
'meta': meta,
}
existing_monitors = self._monitors_for(record)
active_monitors = set()
@ -1102,15 +1122,14 @@ class Ns1Provider(BaseProvider):
# Build our list of answers
# The regions dictionary built above already has the required pool
# names. Iterate over them and add answers.
# In the case of the catchall, original pool name can be obtained
# by stripping the CATCHALL_PREFIX from the pool name
answers = []
for pool_name in sorted(regions.keys()):
priority = 1
# Dynamic/health checked
pool_label = pool_name
pool_name = pool_name.replace(self.CATCHALL_PREFIX, '')
# Remove the pool type from the end of the name
pool_name = self._parse_dynamic_pool_name(pool_name)
self._add_answers_for_pool(answers, default_answers, pool_name,
pool_label, pool_answers, pools,
priority)


+ 28
- 6
octodns/provider/route53.py View File

@ -512,6 +512,10 @@ class _Route53GeoRecord(_Route53Record):
self.values)
class Route53ProviderException(Exception):
pass
def _mod_keyer(mod):
rrset = mod['ResourceRecordSet']
@ -1031,8 +1035,20 @@ class Route53Provider(BaseProvider):
.get('healthcheck', {}) \
.get('measure_latency', True)
def _healthcheck_request_interval(self, record):
interval = record._octodns.get('route53', {}) \
.get('healthcheck', {}) \
.get('request_interval', 10)
if (interval in [10, 30]):
return interval
else:
raise Route53ProviderException(
'route53.healthcheck.request_interval '
'parameter must be either 10 or 30.')
def _health_check_equivalent(self, host, path, protocol, port,
measure_latency, health_check, value=None):
measure_latency, request_interval,
health_check, value=None):
config = health_check['HealthCheckConfig']
# So interestingly Route53 normalizes IPAddress which will cause us to
@ -1050,9 +1066,10 @@ class Route53Provider(BaseProvider):
None)
resource_path = config.get('ResourcePath', None)
return host == fully_qualified_domain_name and \
path == resource_path and protocol == config['Type'] \
and port == config['Port'] and \
path == resource_path and protocol == config['Type'] and \
port == config['Port'] and \
measure_latency == config['MeasureLatency'] and \
request_interval == config['RequestInterval'] and \
value == config_ip_address
def get_health_check_id(self, record, value, create):
@ -1077,6 +1094,7 @@ class Route53Provider(BaseProvider):
healthcheck_protocol = record.healthcheck_protocol
healthcheck_port = record.healthcheck_port
healthcheck_latency = self._healthcheck_measure_latency(record)
healthcheck_interval = self._healthcheck_request_interval(record)
# we're looking for a healthcheck with the current version & our record
# type, we'll ignore anything else
@ -1091,6 +1109,7 @@ class Route53Provider(BaseProvider):
healthcheck_protocol,
healthcheck_port,
healthcheck_latency,
healthcheck_interval,
health_check,
value=value):
# this is the health check we're looking for
@ -1108,7 +1127,7 @@ class Route53Provider(BaseProvider):
'FailureThreshold': 6,
'MeasureLatency': healthcheck_latency,
'Port': healthcheck_port,
'RequestInterval': 10,
'RequestInterval': healthcheck_interval,
'Type': healthcheck_protocol,
}
if healthcheck_protocol != 'TCP':
@ -1143,9 +1162,10 @@ class Route53Provider(BaseProvider):
self._health_checks[id] = health_check
self.log.info('get_health_check_id: created id=%s, host=%s, '
'path=%s, protocol=%s, port=%d, measure_latency=%r, '
'value=%s', id, healthcheck_host, healthcheck_path,
'request_interval=%d, value=%s',
id, healthcheck_host, healthcheck_path,
healthcheck_protocol, healthcheck_port,
healthcheck_latency, value)
healthcheck_latency, healthcheck_interval, value)
return id
def _gc_health_checks(self, record, new):
@ -1238,6 +1258,7 @@ class Route53Provider(BaseProvider):
healthcheck_protocol = record.healthcheck_protocol
healthcheck_port = record.healthcheck_port
healthcheck_latency = self._healthcheck_measure_latency(record)
healthcheck_interval = self._healthcheck_request_interval(record)
try:
health_check_id = rrset['HealthCheckId']
@ -1249,6 +1270,7 @@ class Route53Provider(BaseProvider):
healthcheck_protocol,
healthcheck_port,
healthcheck_latency,
healthcheck_interval,
health_check):
# it has the right health check
return False


+ 6
- 1
octodns/provider/selectel.py View File

@ -15,6 +15,11 @@ from ..record import Record, Update
from .base import BaseProvider
def escape_semicolon(s):
assert s
return s.replace(';', '\\;')
class SelectelAuthenticationRequired(Exception):
def __init__(self, msg):
message = 'Authorization failed. Invalid or empty token.'
@ -200,7 +205,7 @@ class SelectelProvider(BaseProvider):
return {
'ttl': records[0]['ttl'],
'type': _type,
'values': [r['content'] for r in records],
'values': [escape_semicolon(r['content']) for r in records]
}
def _data_for_SRV(self, _type, records):


+ 444
- 0
octodns/provider/ultra.py View File

@ -0,0 +1,444 @@
from collections import defaultdict
from ipaddress import ip_address
from logging import getLogger
from requests import Session
from ..record import Record
from .base import BaseProvider
class UltraClientException(Exception):
'''
Base Ultra exception type
'''
pass
class UltraNoZonesExistException(UltraClientException):
'''
Specially handling this condition where no zones exist in an account.
This is not an error exactly yet ultra treats this scenario as though a
failure has occurred.
'''
def __init__(self, data):
super(UltraNoZonesExistException, self).__init__('NoZonesExist')
class UltraClientUnauthorized(UltraClientException):
'''
Exception for invalid credentials.
'''
def __init__(self):
super(UltraClientUnauthorized, self).__init__('Unauthorized')
class UltraProvider(BaseProvider):
'''
Neustar UltraDNS provider
Documentation for Ultra REST API requires a login:
https://portal.ultradns.com/static/docs/REST-API_User_Guide.pdf
Implemented to the May 20, 2020 version of the document (dated on page ii)
Also described as Version 2.83.0 (title page)
Tested against 3.0.0-20200627220036.81047f5
As determined by querying https://api.ultradns.com/version
ultra:
class: octodns.provider.ultra.UltraProvider
# Ultra Account Name (required)
account: acct
# Ultra username (required)
username: user
# Ultra password (required)
password: pass
'''
RECORDS_TO_TYPE = {
'A (1)': 'A',
'AAAA (28)': 'AAAA',
'CAA (257)': 'CAA',
'CNAME (5)': 'CNAME',
'MX (15)': 'MX',
'NS (2)': 'NS',
'PTR (12)': 'PTR',
'SPF (99)': 'SPF',
'SRV (33)': 'SRV',
'TXT (16)': 'TXT',
}
TYPE_TO_RECORDS = {v: k for k, v in RECORDS_TO_TYPE.items()}
SUPPORTS = set(TYPE_TO_RECORDS.keys())
SUPPORTS_GEO = False
SUPPORTS_DYNAMIC = False
TIMEOUT = 5
def _request(self, method, path, params=None,
data=None, json=None, json_response=True):
self.log.debug('_request: method=%s, path=%s', method, path)
url = '{}{}'.format(self._base_uri, path)
resp = self._sess.request(method,
url,
params=params,
data=data,
json=json,
timeout=self._timeout)
self.log.debug('_request: status=%d', resp.status_code)
if resp.status_code == 401:
raise UltraClientUnauthorized()
if json_response:
payload = resp.json()
# Expected return value when no zones exist in an account
if resp.status_code == 404 and len(payload) == 1 and \
payload[0]['errorCode'] == 70002:
raise UltraNoZonesExistException(resp)
else:
payload = resp.text
resp.raise_for_status()
return payload
def _get(self, path, **kwargs):
return self._request('GET', path, **kwargs)
def _post(self, path, **kwargs):
return self._request('POST', path, **kwargs)
def _delete(self, path, **kwargs):
return self._request('DELETE', path, **kwargs)
def _put(self, path, **kwargs):
return self._request('PUT', path, **kwargs)
def _login(self, username, password):
'''
Get an authorization token by logging in using the provided credentials
'''
path = '/v2/authorization/token'
data = {
'grant_type': 'password',
'username': username,
'password': password
}
resp = self._post(path, data=data)
self._sess.headers.update({
'Authorization': 'Bearer {}'.format(resp['access_token']),
})
def __init__(self, id, account, username, password, timeout=TIMEOUT,
*args, **kwargs):
self.log = getLogger('UltraProvider[{}]'.format(id))
self.log.debug('__init__: id=%s, account=%s, username=%s, '
'password=***', id, account, username)
super(UltraProvider, self).__init__(id, *args, **kwargs)
self._base_uri = 'https://restapi.ultradns.com'
self._sess = Session()
self._account = account
self._timeout = timeout
self._login(username, password)
self._zones = None
self._zone_records = {}
@property
def zones(self):
if self._zones is None:
offset = 0
limit = 100
zones = []
paging = True
while paging:
data = {'limit': limit, 'q': 'zone_type:PRIMARY',
'offset': offset}
try:
resp = self._get('/v2/zones', params=data)
except UltraNoZonesExistException:
paging = False
continue
zones.extend(resp['zones'])
info = resp['resultInfo']
if info['offset'] + info['returnedCount'] < info['totalCount']:
offset += info['returnedCount']
else:
paging = False
self._zones = [z['properties']['name'] for z in zones]
return self._zones
def _data_for_multiple(self, _type, records):
return {
'ttl': records['ttl'],
'type': _type,
'values': records['rdata'],
}
_data_for_A = _data_for_multiple
_data_for_SPF = _data_for_multiple
_data_for_NS = _data_for_multiple
def _data_for_TXT(self, _type, records):
return {
'ttl': records['ttl'],
'type': _type,
'values': [r.replace(';', '\\;') for r in records['rdata']],
}
def _data_for_AAAA(self, _type, records):
for i, v in enumerate(records['rdata']):
records['rdata'][i] = str(ip_address(v))
return {
'ttl': records['ttl'],
'type': _type,
'values': records['rdata'],
}
def _data_for_single(self, _type, record):
return {
'type': _type,
'ttl': record['ttl'],
'value': record['rdata'][0],
}
_data_for_PTR = _data_for_single
_data_for_CNAME = _data_for_single
def _data_for_CAA(self, _type, records):
return {
'type': _type,
'ttl': records['ttl'],
'values': [{'flags': x.split()[0],
'tag': x.split()[1],
'value': x.split()[2].strip('"')}
for x in records['rdata']]
}
def _data_for_MX(self, _type, records):
return {
'type': _type,
'ttl': records['ttl'],
'values': [{'preference': x.split()[0],
'exchange': x.split()[1]}
for x in records['rdata']]
}
def _data_for_SRV(self, _type, records):
return {
'type': _type,
'ttl': records['ttl'],
'values': [{
'priority': x.split()[0],
'weight': x.split()[1],
'port': x.split()[2],
'target': x.split()[3],
} for x in records['rdata']]
}
def zone_records(self, zone):
if zone.name not in self._zone_records:
if zone.name not in self.zones:
return []
records = []
path = '/v2/zones/{}/rrsets'.format(zone.name)
offset = 0
limit = 100
paging = True
while paging:
resp = self._get(path,
params={'offset': offset, 'limit': limit})
records.extend(resp['rrSets'])
info = resp['resultInfo']
if info['offset'] + info['returnedCount'] < info['totalCount']:
offset += info['returnedCount']
else:
paging = False
self._zone_records[zone.name] = records
return self._zone_records[zone.name]
def _record_for(self, zone, name, _type, records, lenient):
data_for = getattr(self, '_data_for_{}'.format(_type))
data = data_for(_type, records)
record = Record.new(zone, name, data, source=self, lenient=lenient)
return record
def populate(self, zone, target=False, lenient=False):
self.log.debug('populate: name=%s, target=%s, lenient=%s', zone.name,
target, lenient)
exists = False
before = len(zone.records)
records = self.zone_records(zone)
if records:
exists = True
values = defaultdict(lambda: defaultdict(None))
for record in records:
name = zone.hostname_from_fqdn(record['ownerName'])
if record['rrtype'] == 'SOA (6)':
continue
_type = self.RECORDS_TO_TYPE[record['rrtype']]
values[name][_type] = record
for name, types in values.items():
for _type, records in types.items():
record = self._record_for(zone, name, _type, records,
lenient)
zone.add_record(record, lenient=lenient)
self.log.info('populate: found %s records, exists=%s',
len(zone.records) - before, exists)
return exists
def _apply(self, plan):
desired = plan.desired
changes = plan.changes
self.log.debug('_apply: zone=%s, len(changes)=%d', desired.name,
len(changes))
name = desired.name
if name not in self.zones:
self.log.debug('_apply: no matching zone, creating')
data = {'properties': {'name': name,
'accountName': self._account,
'type': 'PRIMARY'},
'primaryCreateInfo': {
'createType': 'NEW'}}
self._post('/v2/zones', json=data)
self.zones.append(name)
self._zone_records[name] = {}
for change in changes:
class_name = change.__class__.__name__
getattr(self, '_apply_{}'.format(class_name))(change)
# Clear the cache
self._zone_records.pop(name, None)
def _contents_for_multiple_resource_distribution(self, record):
if len(record.values) > 1:
return {
'ttl': record.ttl,
'rdata': record.values,
'profile': {
'@context':
'http://schemas.ultradns.com/RDPool.jsonschema',
'order': 'FIXED',
'description': record.fqdn
}
}
return {
'ttl': record.ttl,
'rdata': record.values
}
_contents_for_A = _contents_for_multiple_resource_distribution
_contents_for_AAAA = _contents_for_multiple_resource_distribution
def _contents_for_multiple(self, record):
return {
'ttl': record.ttl,
'rdata': record.values
}
_contents_for_NS = _contents_for_multiple
_contents_for_SPF = _contents_for_multiple
def _contents_for_TXT(self, record):
return {
'ttl': record.ttl,
'rdata': [v.replace('\\;', ';') for v in record.values]
}
def _contents_for_CNAME(self, record):
return {
'ttl': record.ttl,
'rdata': [record.value]
}
_contents_for_PTR = _contents_for_CNAME
def _contents_for_SRV(self, record):
return {
'ttl': record.ttl,
'rdata': ['{} {} {} {}'.format(x.priority,
x.weight,
x.port,
x.target) for x in record.values]
}
def _contents_for_CAA(self, record):
return {
'ttl': record.ttl,
'rdata': ['{} {} {}'.format(x.flags,
x.tag,
x.value) for x in record.values]
}
def _contents_for_MX(self, record):
return {
'ttl': record.ttl,
'rdata': ['{} {}'.format(x.preference,
x.exchange) for x in record.values]
}
def _gen_data(self, record):
zone_name = self._remove_prefix(record.fqdn, record.name + '.')
path = '/v2/zones/{}/rrsets/{}/{}'.format(zone_name,
record._type,
record.fqdn)
contents_for = getattr(self, '_contents_for_{}'.format(record._type))
return path, contents_for(record)
def _apply_Create(self, change):
new = change.new
self.log.debug("_apply_Create: name=%s type=%s ttl=%s",
new.name,
new._type,
new.ttl)
path, content = self._gen_data(new)
self._post(path, json=content)
def _apply_Update(self, change):
new = change.new
self.log.debug("_apply_Update: name=%s type=%s ttl=%s",
new.name,
new._type,
new.ttl)
path, content = self._gen_data(new)
self.log.debug(path)
self.log.debug(content)
self._put(path, json=content)
def _remove_prefix(self, text, prefix):
if text.startswith(prefix):
return text[len(prefix):]
return text
def _apply_Delete(self, change):
existing = change.existing
for record in self.zone_records(existing.zone):
if record['rrtype'] == 'SOA (6)':
continue
if existing.fqdn == record['ownerName'] and \
existing._type == self.RECORDS_TO_TYPE[record['rrtype']]:
zone_name = self._remove_prefix(existing.fqdn,
existing.name + '.')
path = '/v2/zones/{}/rrsets/{}/{}'.format(zone_name,
existing._type,
existing.fqdn)
self._delete(path, json_response=False)

+ 1
- 1
octodns/record/__init__.py View File

@ -1215,7 +1215,7 @@ class SrvRecord(_ValuesMixin, Record):
def validate(cls, name, fqdn, data):
reasons = []
if not cls._name_re.match(name):
reasons.append('invalid name')
reasons.append('invalid name for SRV record')
reasons.extend(super(SrvRecord, cls).validate(name, fqdn, data))
return reasons


+ 100
- 0
octodns/source/envvar.py View File

@ -0,0 +1,100 @@
import logging
import os
from ..record import Record
from .base import BaseSource
class EnvVarSourceException(Exception):
pass
class EnvironmentVariableNotFoundException(EnvVarSourceException):
def __init__(self, data):
super(EnvironmentVariableNotFoundException, self).__init__(
'Unknown environment variable {}'.format(data))
class EnvVarSource(BaseSource):
'''
This source allows for environment variables to be embedded at octodns
execution time into zones. Intended to capture artifacts of deployment to
facilitate operational objectives.
The TXT record generated will only have a single value.
The record name cannot conflict with any other co-existing sources. If
this occurs, an exception will be thrown.
Possible use cases include:
- Embedding a version number into a TXT record to monitor update
propagation across authoritative providers.
- Capturing identifying information about the deployment process to
record where and when the zone was updated.
version:
class: octodns.source.envvar.EnvVarSource
# The environment variable in question, in this example the username
# currently executing octodns
variable: USER
# The TXT record name to embed the value found at the above
# environment variable
name: deployuser
# The TTL of the TXT record (optional, default 60)
ttl: 3600
This source is then combined with other sources in the octodns config
file:
zones:
netflix.com.:
sources:
- yaml
- version
targets:
- ultra
- ns1
'''
SUPPORTS_GEO = False
SUPPORTS_DYNAMIC = False
SUPPORTS = set(('TXT'))
DEFAULT_TTL = 60
def __init__(self, id, variable, name, ttl=DEFAULT_TTL):
self.log = logging.getLogger('{}[{}]'.format(
self.__class__.__name__, id))
self.log.debug('__init__: id=%s, variable=%s, name=%s, '
'ttl=%d', id, variable, name, ttl)
super(EnvVarSource, self).__init__(id)
self.envvar = variable
self.name = name
self.ttl = ttl
def _read_variable(self):
value = os.environ.get(self.envvar)
if value is None:
raise EnvironmentVariableNotFoundException(self.envvar)
self.log.debug('_read_variable: successfully loaded var=%s val=%s',
self.envvar, value)
return value
def populate(self, zone, target=False, lenient=False):
self.log.debug('populate: name=%s, target=%s, lenient=%s', zone.name,
target, lenient)
before = len(zone.records)
value = self._read_variable()
# We don't need to worry about conflicting records here because the
# manager will deconflict sources on our behalf.
payload = {'ttl': self.ttl, 'type': 'TXT', 'values': [value]}
record = Record.new(zone, self.name, payload, source=self,
lenient=lenient)
zone.add_record(record, lenient=lenient)
self.log.info('populate: found %s records, exists=False',
len(zone.records) - before)

+ 6
- 6
requirements.txt View File

@ -1,25 +1,25 @@
PyYaml==5.3.1
azure-common==1.1.25
azure-mgmt-dns==3.0.0
boto3==1.13.19
botocore==1.16.19
boto3==1.14.14
botocore==1.17.14
dnspython==1.16.0
docutils==0.16
dyn==1.8.1
edgegrid-python==1.1.1
futures==3.2.0; python_version < '3.0'
futures==3.2.0; python_version < '3.2'
google-cloud-core==1.3.0
google-cloud-dns==0.32.0
ipaddress==1.0.23
ipaddress==1.0.23; python_version < '3.3'
jmespath==0.10.0
msrestazure==0.6.3
msrestazure==0.6.4
natsort==6.2.1
ns1-python==0.16.0
ovh==0.5.0
pycountry-convert==0.7.2
pycountry==19.8.18
python-dateutil==2.8.1
requests==2.23.0
requests==2.24.0
s3transfer==0.3.3
setuptools==44.1.1
six==1.15.0


+ 1
- 1
setup.py View File

@ -69,7 +69,7 @@ setup(
'PyYaml>=4.2b1',
'dnspython>=1.15.0',
'futures>=3.2.0; python_version<"3.2"',
'ipaddress>=1.0.22',
'ipaddress>=1.0.22; python_version<"3.3"',
'natsort>=5.5.0',
'pycountry>=19.8.18',
'pycountry-convert>=0.7.2',


+ 94
- 0
tests/fixtures/ultra-records-page-1.json View File

@ -0,0 +1,94 @@
{
"zoneName": "octodns1.test.",
"rrSets": [
{
"ownerName": "_srv._tcp.octodns1.test.",
"rrtype": "SRV (33)",
"ttl": 3600,
"rdata": [
"0 20 443 cname.octodns1.test."
]
},
{
"ownerName": "a.octodns1.test.",
"rrtype": "A (1)",
"ttl": 3600,
"rdata": [
"1.1.1.1"
]
},
{
"ownerName": "aaaa.octodns1.test.",
"rrtype": "AAAA (28)",
"ttl": 3600,
"rdata": [
"0:0:0:0:0:0:0:1"
]
},
{
"ownerName": "caa.octodns1.test.",
"rrtype": "CAA (257)",
"ttl": 3600,
"rdata": [
"0 issue \"symantec.com\""
]
},
{
"ownerName": "cname.octodns1.test.",
"rrtype": "CNAME (5)",
"ttl": 60,
"rdata": [
"a.octodns1.test."
]
},
{
"ownerName": "mail.octodns1.test.",
"rrtype": "MX (15)",
"ttl": 3600,
"rdata": [
"1 aspmx.l.google.com.",
"5 alt1.aspmx.l.google.com."
]
},
{
"ownerName": "octodns1.test.",
"rrtype": "NS (2)",
"ttl": 86400,
"rdata": [
"pdns1.ultradns.biz.",
"pdns1.ultradns.com.",
"pdns1.ultradns.net.",
"pdns1.ultradns.org."
]
},
{
"ownerName": "octodns1.test.",
"rrtype": "SOA (6)",
"ttl": 86400,
"rdata": [
"pdns1.ultradns.com. phelps.netflix.com. 2020062003 86400 86400 86400 86400"
]
},
{
"ownerName": "ptr.octodns1.test.",
"rrtype": "PTR (12)",
"ttl": 300,
"rdata": [
"foo.bar.com."
]
},
{
"ownerName": "spf.octodns1.test.",
"rrtype": "SPF (99)",
"ttl": 3600,
"rdata": [
"v=spf1 -all"
]
}
],
"resultInfo": {
"totalCount": 12,
"offset": 0,
"returnedCount": 10
}
}

+ 34
- 0
tests/fixtures/ultra-records-page-2.json View File

@ -0,0 +1,34 @@
{
"zoneName": "octodns1.test.",
"rrSets": [
{
"ownerName": "txt.octodns1.test.",
"rrtype": "TXT (16)",
"ttl": 3600,
"rdata": [
"foobar",
"v=spf1 -all"
]
},
{
"ownerName": "octodns1.test.",
"rrtype": "A (1)",
"ttl": 3600,
"rdata": [
"1.2.3.4",
"1.2.3.5",
"1.2.3.6"
],
"profile": {
"@context": "http://schemas.ultradns.com/RDPool.jsonschema",
"order": "FIXED",
"description": "octodns1.test."
}
}
],
"resultInfo": {
"totalCount": 12,
"offset": 10,
"returnedCount": 2
}
}

+ 135
- 0
tests/fixtures/ultra-zones-page-1.json View File

@ -0,0 +1,135 @@
{
"queryInfo": {
"q": "zone_type:PRIMARY",
"sort": "NAME",
"reverse": false,
"limit": 10
},
"resultInfo": {
"totalCount": 20,
"offset": 0,
"returnedCount": 10
},
"zones": [
{
"properties": {
"name": "octodns1.test.",
"accountName": "Netflix - Automation",
"type": "PRIMARY",
"dnssecStatus": "UNSIGNED",
"status": "ACTIVE",
"owner": "phelpstest",
"resourceRecordCount": 5,
"lastModifiedDateTime": "2020-06-19T01:05Z"
}
},
{
"properties": {
"name": "octodns10.test.",
"accountName": "Netflix - Automation",
"type": "PRIMARY",
"dnssecStatus": "UNSIGNED",
"status": "ACTIVE",
"owner": "phelpstest",
"resourceRecordCount": 5,
"lastModifiedDateTime": "2020-06-19T01:06Z"
}
},
{
"properties": {
"name": "octodns11.test.",
"accountName": "Netflix - Automation",
"type": "PRIMARY",
"dnssecStatus": "UNSIGNED",
"status": "ACTIVE",
"owner": "phelpstest",
"resourceRecordCount": 5,
"lastModifiedDateTime": "2020-06-19T01:06Z"
}
},
{
"properties": {
"name": "octodns12.test.",
"accountName": "Netflix - Automation",
"type": "PRIMARY",
"dnssecStatus": "UNSIGNED",
"status": "ACTIVE",
"owner": "phelpstest",
"resourceRecordCount": 5,
"lastModifiedDateTime": "2020-06-19T01:06Z"
}
},
{
"properties": {
"name": "octodns13.test.",
"accountName": "Netflix - Automation",
"type": "PRIMARY",
"dnssecStatus": "UNSIGNED",
"status": "ACTIVE",
"owner": "phelpstest",
"resourceRecordCount": 5,
"lastModifiedDateTime": "2020-06-19T01:06Z"
}
},
{
"properties": {
"name": "octodns14.test.",
"accountName": "Netflix - Automation",
"type": "PRIMARY",
"dnssecStatus": "UNSIGNED",
"status": "ACTIVE",
"owner": "phelpstest",
"resourceRecordCount": 5,
"lastModifiedDateTime": "2020-06-19T01:06Z"
}
},
{
"properties": {
"name": "octodns15.test.",
"accountName": "Netflix - Automation",
"type": "PRIMARY",
"dnssecStatus": "UNSIGNED",
"status": "ACTIVE",
"owner": "phelpstest",
"resourceRecordCount": 5,
"lastModifiedDateTime": "2020-06-19T01:06Z"
}
},
{
"properties": {
"name": "octodns16.test.",
"accountName": "Netflix - Automation",
"type": "PRIMARY",
"dnssecStatus": "UNSIGNED",
"status": "ACTIVE",
"owner": "phelpstest",
"resourceRecordCount": 5,
"lastModifiedDateTime": "2020-06-19T01:06Z"
}
},
{
"properties": {
"name": "octodns17.test.",
"accountName": "Netflix - Automation",
"type": "PRIMARY",
"dnssecStatus": "UNSIGNED",
"status": "ACTIVE",
"owner": "phelpstest",
"resourceRecordCount": 5,
"lastModifiedDateTime": "2020-06-19T01:06Z"
}
},
{
"properties": {
"name": "octodns18.test.",
"accountName": "Netflix - Automation",
"type": "PRIMARY",
"dnssecStatus": "UNSIGNED",
"status": "ACTIVE",
"owner": "phelpstest",
"resourceRecordCount": 5,
"lastModifiedDateTime": "2020-06-19T01:07Z"
}
}
]
}

+ 135
- 0
tests/fixtures/ultra-zones-page-2.json View File

@ -0,0 +1,135 @@
{
"queryInfo": {
"q": "zone_type:PRIMARY",
"sort": "NAME",
"reverse": false,
"limit": 10
},
"resultInfo": {
"totalCount": 20,
"offset": 10,
"returnedCount": 10
},
"zones": [
{
"properties": {
"name": "octodns19.test.",
"accountName": "Netflix - Automation",
"type": "PRIMARY",
"dnssecStatus": "UNSIGNED",
"status": "ACTIVE",
"owner": "phelpstest",
"resourceRecordCount": 5,
"lastModifiedDateTime": "2020-06-19T01:07Z"
}
},
{
"properties": {
"name": "octodns2.test.",
"accountName": "Netflix - Automation",
"type": "PRIMARY",
"dnssecStatus": "UNSIGNED",
"status": "ACTIVE",
"owner": "phelpstest",
"resourceRecordCount": 5,
"lastModifiedDateTime": "2020-06-19T01:05Z"
}
},
{
"properties": {
"name": "octodns20.test.",
"accountName": "Netflix - Automation",
"type": "PRIMARY",
"dnssecStatus": "UNSIGNED",
"status": "ACTIVE",
"owner": "phelpstest",
"resourceRecordCount": 5,
"lastModifiedDateTime": "2020-06-19T01:07Z"
}
},
{
"properties": {
"name": "octodns3.test.",
"accountName": "Netflix - Automation",
"type": "PRIMARY",
"dnssecStatus": "UNSIGNED",
"status": "ACTIVE",
"owner": "phelpstest",
"resourceRecordCount": 5,
"lastModifiedDateTime": "2020-06-19T01:05Z"
}
},
{
"properties": {
"name": "octodns4.test.",
"accountName": "Netflix - Automation",
"type": "PRIMARY",
"dnssecStatus": "UNSIGNED",
"status": "ACTIVE",
"owner": "phelpstest",
"resourceRecordCount": 5,
"lastModifiedDateTime": "2020-06-19T01:05Z"
}
},
{
"properties": {
"name": "octodns5.test.",
"accountName": "Netflix - Automation",
"type": "PRIMARY",
"dnssecStatus": "UNSIGNED",
"status": "ACTIVE",
"owner": "phelpstest",
"resourceRecordCount": 5,
"lastModifiedDateTime": "2020-06-19T01:05Z"
}
},
{
"properties": {
"name": "octodns6.test.",
"accountName": "Netflix - Automation",
"type": "PRIMARY",
"dnssecStatus": "UNSIGNED",
"status": "ACTIVE",
"owner": "phelpstest",
"resourceRecordCount": 5,
"lastModifiedDateTime": "2020-06-19T01:05Z"
}
},
{
"properties": {
"name": "octodns7.test.",
"accountName": "Netflix - Automation",
"type": "PRIMARY",
"dnssecStatus": "UNSIGNED",
"status": "ACTIVE",
"owner": "phelpstest",
"resourceRecordCount": 5,
"lastModifiedDateTime": "2020-06-19T01:06Z"
}
},
{
"properties": {
"name": "octodns8.test.",
"accountName": "Netflix - Automation",
"type": "PRIMARY",
"dnssecStatus": "UNSIGNED",
"status": "ACTIVE",
"owner": "phelpstest",
"resourceRecordCount": 5,
"lastModifiedDateTime": "2020-06-19T01:06Z"
}
},
{
"properties": {
"name": "octodns9.test.",
"accountName": "Netflix - Automation",
"type": "PRIMARY",
"dnssecStatus": "UNSIGNED",
"status": "ACTIVE",
"owner": "phelpstest",
"resourceRecordCount": 5,
"lastModifiedDateTime": "2020-06-19T01:06Z"
}
}
]
}

+ 23
- 0
tests/test_octodns_manager.py View File

@ -278,6 +278,29 @@ class TestManager(TestCase):
.validate_configs()
self.assertTrue('unknown source' in text_type(ctx.exception))
def test_populate_lenient_fallback(self):
with TemporaryDirectory() as tmpdir:
environ['YAML_TMP_DIR'] = tmpdir.dirname
# Only allow a target that doesn't exist
manager = Manager(get_config_filename('simple.yaml'))
class NoLenient(SimpleProvider):
def populate(self, zone, source=False):
pass
# This should be ok, we'll fall back to not passing it
manager._populate_and_plan('unit.tests.', [NoLenient()], [])
class NoZone(SimpleProvider):
def populate(self, lenient=False):
pass
# This will blow up, we don't fallback for source
with self.assertRaises(TypeError):
manager._populate_and_plan('unit.tests.', [NoZone()], [])
class TestMainThreadExecutor(TestCase):


+ 303
- 87
tests/test_octodns_provider_ns1.py View File

@ -528,52 +528,55 @@ class TestNs1Provider(TestCase):
class TestNs1ProviderDynamic(TestCase):
zone = Zone('unit.tests.', [])
record = Record.new(zone, '', {
'dynamic': {
'pools': {
'lhr': {
'fallback': 'iad',
'values': [{
'value': '3.4.5.6',
}],
},
'iad': {
'values': [{
'value': '1.2.3.4',
}, {
'value': '2.3.4.5',
}],
def record(self):
# return a new object each time so we can mess with it without causing
# problems from test to test
return Record.new(self.zone, '', {
'dynamic': {
'pools': {
'lhr': {
'fallback': 'iad',
'values': [{
'value': '3.4.5.6',
}],
},
'iad': {
'values': [{
'value': '1.2.3.4',
}, {
'value': '2.3.4.5',
}],
},
},
'rules': [{
'geos': [
'AF',
'EU-GB',
'NA-US-FL'
],
'pool': 'lhr',
}, {
'geos': [
'AF-ZW',
],
'pool': 'iad',
}, {
'pool': 'iad',
}],
},
'rules': [{
'geos': [
'AF',
'EU-GB',
'NA-US-FL'
],
'pool': 'lhr',
}, {
'geos': [
'AF-ZW',
],
'pool': 'iad',
}, {
'pool': 'iad',
}],
},
'octodns': {
'healthcheck': {
'host': 'send.me',
'path': '/_ping',
'port': 80,
'protocol': 'HTTP',
}
},
'ttl': 32,
'type': 'A',
'value': '1.2.3.4',
'meta': {},
})
'octodns': {
'healthcheck': {
'host': 'send.me',
'path': '/_ping',
'port': 80,
'protocol': 'HTTP',
}
},
'ttl': 32,
'type': 'A',
'value': '1.2.3.4',
'meta': {},
})
def test_notes(self):
provider = Ns1Provider('test', 'api-key')
@ -636,7 +639,7 @@ class TestNs1ProviderDynamic(TestCase):
self.assertEquals({
'1.2.3.4': monitor_one,
'2.3.4.5': monitor_four,
}, provider._monitors_for(self.record))
}, provider._monitors_for(self.record()))
def test_uuid(self):
# Just a smoke test/for coverage
@ -707,18 +710,19 @@ class TestNs1ProviderDynamic(TestCase):
provider = Ns1Provider('test', 'api-key')
value = '3.4.5.6'
monitor = provider._monitor_gen(self.record, value)
record = self.record()
monitor = provider._monitor_gen(record, value)
self.assertEquals(value, monitor['config']['host'])
self.assertTrue('\\nHost: send.me\\r' in monitor['config']['send'])
self.assertFalse(monitor['config']['ssl'])
self.assertEquals('host:unit.tests type:A', monitor['notes'])
self.record._octodns['healthcheck']['protocol'] = 'HTTPS'
monitor = provider._monitor_gen(self.record, value)
record._octodns['healthcheck']['protocol'] = 'HTTPS'
monitor = provider._monitor_gen(record, value)
self.assertTrue(monitor['config']['ssl'])
self.record._octodns['healthcheck']['protocol'] = 'TCP'
monitor = provider._monitor_gen(self.record, value)
record._octodns['healthcheck']['protocol'] = 'TCP'
monitor = provider._monitor_gen(record, value)
# No http send done
self.assertFalse('send' in monitor['config'])
# No http response expected
@ -790,10 +794,11 @@ class TestNs1ProviderDynamic(TestCase):
monitor_gen_mock.side_effect = [{'key': 'value'}]
monitor_create_mock.side_effect = [('mon-id', 'feed-id')]
value = '1.2.3.4'
monitor_id, feed_id = provider._monitor_sync(self.record, value, None)
record = self.record()
monitor_id, feed_id = provider._monitor_sync(record, value, None)
self.assertEquals('mon-id', monitor_id)
self.assertEquals('feed-id', feed_id)
monitor_gen_mock.assert_has_calls([call(self.record, value)])
monitor_gen_mock.assert_has_calls([call(record, value)])
monitor_create_mock.assert_has_calls([call({'key': 'value'})])
monitors_update_mock.assert_not_called()
feed_create_mock.assert_not_called()
@ -809,7 +814,7 @@ class TestNs1ProviderDynamic(TestCase):
'name': 'monitor name',
}
monitor_gen_mock.side_effect = [monitor]
monitor_id, feed_id = provider._monitor_sync(self.record, value,
monitor_id, feed_id = provider._monitor_sync(record, value,
monitor)
self.assertEquals('mon-id', monitor_id)
self.assertEquals('feed-id', feed_id)
@ -830,7 +835,7 @@ class TestNs1ProviderDynamic(TestCase):
}
monitor_gen_mock.side_effect = [monitor]
feed_create_mock.side_effect = ['feed-id2']
monitor_id, feed_id = provider._monitor_sync(self.record, value,
monitor_id, feed_id = provider._monitor_sync(record, value,
monitor)
self.assertEquals('mon-id2', monitor_id)
self.assertEquals('feed-id2', feed_id)
@ -853,7 +858,7 @@ class TestNs1ProviderDynamic(TestCase):
'other': 'thing',
}
monitor_gen_mock.side_effect = [gened]
monitor_id, feed_id = provider._monitor_sync(self.record, value,
monitor_id, feed_id = provider._monitor_sync(record, value,
monitor)
self.assertEquals('mon-id', monitor_id)
self.assertEquals('feed-id', feed_id)
@ -883,8 +888,9 @@ class TestNs1ProviderDynamic(TestCase):
monitors_delete_mock.reset_mock()
notifylists_delete_mock.reset_mock()
monitors_for_mock.side_effect = [{}]
provider._monitors_gc(self.record)
monitors_for_mock.assert_has_calls([call(self.record)])
record = self.record()
provider._monitors_gc(record)
monitors_for_mock.assert_has_calls([call(record)])
datafeed_delete_mock.assert_not_called()
monitors_delete_mock.assert_not_called()
notifylists_delete_mock.assert_not_called()
@ -900,8 +906,8 @@ class TestNs1ProviderDynamic(TestCase):
'notify_list': 'nl-id',
}
}]
provider._monitors_gc(self.record)
monitors_for_mock.assert_has_calls([call(self.record)])
provider._monitors_gc(record)
monitors_for_mock.assert_has_calls([call(record)])
datafeed_delete_mock.assert_has_calls([call('foo', 'feed-id')])
monitors_delete_mock.assert_has_calls([call('mon-id')])
notifylists_delete_mock.assert_has_calls([call('nl-id')])
@ -917,8 +923,8 @@ class TestNs1ProviderDynamic(TestCase):
'notify_list': 'nl-id',
}
}]
provider._monitors_gc(self.record, {'mon-id'})
monitors_for_mock.assert_has_calls([call(self.record)])
provider._monitors_gc(record, {'mon-id'})
monitors_for_mock.assert_has_calls([call(record)])
datafeed_delete_mock.assert_not_called()
monitors_delete_mock.assert_not_called()
notifylists_delete_mock.assert_not_called()
@ -939,8 +945,8 @@ class TestNs1ProviderDynamic(TestCase):
'notify_list': 'nl-id2',
},
}]
provider._monitors_gc(self.record, {'mon-id'})
monitors_for_mock.assert_has_calls([call(self.record)])
provider._monitors_gc(record, {'mon-id'})
monitors_for_mock.assert_has_calls([call(record)])
datafeed_delete_mock.assert_not_called()
monitors_delete_mock.assert_has_calls([call('mon-id2')])
notifylists_delete_mock.assert_has_calls([call('nl-id2')])
@ -972,18 +978,202 @@ class TestNs1ProviderDynamic(TestCase):
('mid-3', 'fid-3'),
]
rule0 = self.record.data['dynamic']['rules'][0]
rule1 = self.record.data['dynamic']['rules'][1]
rule0_saved_geos = rule0['geos']
rule1_saved_geos = rule1['geos']
record = self.record()
rule0 = record.data['dynamic']['rules'][0]
rule1 = record.data['dynamic']['rules'][1]
rule0['geos'] = ['AF', 'EU']
rule1['geos'] = ['NA']
ret, _ = provider._params_for_A(self.record)
ret, monitor_ids = provider._params_for_A(record)
self.assertEquals(10, len(ret['answers']))
self.assertEquals(ret['filters'],
Ns1Provider._FILTER_CHAIN_WITH_REGION(provider,
True))
rule0['geos'] = rule0_saved_geos
rule1['geos'] = rule1_saved_geos
self.assertEquals({
'iad__catchall': {
'meta': {
'note': 'rule-order:2'
}
},
'iad__georegion': {
'meta': {
'georegion': ['US-CENTRAL', 'US-EAST', 'US-WEST'],
'note': 'rule-order:1'
}
},
'lhr__georegion': {
'meta': {
'georegion': ['AFRICA', 'EUROPE'],
'note': 'fallback:iad rule-order:0'
}
}
}, ret['regions'])
self.assertEquals({'mid-1', 'mid-2', 'mid-3'}, monitor_ids)
@patch('octodns.provider.ns1.Ns1Provider._monitor_sync')
@patch('octodns.provider.ns1.Ns1Provider._monitors_for')
def test_params_for_dynamic_state_only(self, monitors_for_mock,
monitor_sync_mock):
provider = Ns1Provider('test', 'api-key')
# pre-fill caches to avoid extranious calls (things we're testing
# elsewhere)
provider._client._datasource_id = 'foo'
provider._client._feeds_for_monitors = {
'mon-id': 'feed-id',
}
# provider._params_for_A() calls provider._monitors_for() and
# provider._monitor_sync(). Mock their return values so that we don't
# make NS1 API calls during tests
monitors_for_mock.reset_mock()
monitor_sync_mock.reset_mock()
monitors_for_mock.side_effect = [{
'3.4.5.6': 'mid-3',
}]
monitor_sync_mock.side_effect = [
('mid-1', 'fid-1'),
('mid-2', 'fid-2'),
('mid-3', 'fid-3'),
]
record = self.record()
rule0 = record.data['dynamic']['rules'][0]
rule1 = record.data['dynamic']['rules'][1]
rule0['geos'] = ['AF', 'EU']
rule1['geos'] = ['NA-US-CA']
ret, _ = provider._params_for_A(record)
self.assertEquals(10, len(ret['answers']))
exp = Ns1Provider._FILTER_CHAIN_WITH_REGION_AND_COUNTRY(provider,
True)
self.assertEquals(ret['filters'], exp)
self.assertEquals({
'iad__catchall': {
'meta': {
'note': 'rule-order:2'
}
},
'iad__country': {
'meta': {
'note': 'rule-order:1',
'us_state': ['CA']
}
},
'lhr__georegion': {
'meta': {
'georegion': ['AFRICA', 'EUROPE'],
'note': 'fallback:iad rule-order:0'
}
}
}, ret['regions'])
@patch('octodns.provider.ns1.Ns1Provider._monitor_sync')
@patch('octodns.provider.ns1.Ns1Provider._monitors_for')
def test_params_for_dynamic_contient_and_countries(self,
monitors_for_mock,
monitor_sync_mock):
provider = Ns1Provider('test', 'api-key')
# pre-fill caches to avoid extranious calls (things we're testing
# elsewhere)
provider._client._datasource_id = 'foo'
provider._client._feeds_for_monitors = {
'mon-id': 'feed-id',
}
# provider._params_for_A() calls provider._monitors_for() and
# provider._monitor_sync(). Mock their return values so that we don't
# make NS1 API calls during tests
monitors_for_mock.reset_mock()
monitor_sync_mock.reset_mock()
monitors_for_mock.side_effect = [{
'3.4.5.6': 'mid-3',
}]
monitor_sync_mock.side_effect = [
('mid-1', 'fid-1'),
('mid-2', 'fid-2'),
('mid-3', 'fid-3'),
]
record = self.record()
rule0 = record.data['dynamic']['rules'][0]
rule1 = record.data['dynamic']['rules'][1]
rule0['geos'] = ['AF', 'EU', 'NA-US-CA']
rule1['geos'] = ['NA', 'NA-US']
ret, _ = provider._params_for_A(record)
self.assertEquals(17, len(ret['answers']))
# Deeply check the answers we have here
# group the answers based on where they came from
notes = defaultdict(list)
for answer in ret['answers']:
notes[answer['meta']['note']].append(answer)
# Remove the meta and region part since it'll vary based on the
# exact pool, that'll let us == them down below
del answer['meta']
del answer['region']
# Expected groups. iad has occurances in here: a country and region
# that was split out based on targeting a continent and a state. It
# finally has a catchall. Those are examples of the two ways pools get
# expanded.
#
# lhr splits in two, with a region and country.
#
# well as both lhr georegion (for contients) and country. The first is
# an example of a repeated target pool in a rule (only allowed when the
# 2nd is a catchall.)
self.assertEquals(['from:--default--', 'from:iad__catchall',
'from:iad__country', 'from:iad__georegion',
'from:lhr__country', 'from:lhr__georegion'],
sorted(notes.keys()))
# All the iad's should match (after meta and region were removed)
self.assertEquals(notes['from:iad__catchall'],
notes['from:iad__country'])
self.assertEquals(notes['from:iad__catchall'],
notes['from:iad__georegion'])
# The lhrs should match each other too
self.assertEquals(notes['from:lhr__georegion'],
notes['from:lhr__country'])
# We have both country and region filter chain entries
exp = Ns1Provider._FILTER_CHAIN_WITH_REGION_AND_COUNTRY(provider,
True)
self.assertEquals(ret['filters'], exp)
# and our region details match the expected behaviors/targeting
self.assertEquals({
'iad__catchall': {
'meta': {
'note': 'rule-order:2'
}
},
'iad__country': {
'meta': {
'country': ['US'],
'note': 'rule-order:1'
}
},
'iad__georegion': {
'meta': {
'georegion': ['US-CENTRAL', 'US-EAST', 'US-WEST'],
'note': 'rule-order:1'
}
},
'lhr__country': {
'meta': {
'note': 'fallback:iad rule-order:0',
'us_state': ['CA']
}
},
'lhr__georegion': {
'meta': {
'georegion': ['AFRICA', 'EUROPE'],
'note': 'fallback:iad rule-order:0'
}
}
}, ret['regions'])
@patch('octodns.provider.ns1.Ns1Provider._monitor_sync')
@patch('octodns.provider.ns1.Ns1Provider._monitors_for')
@ -1014,18 +1204,21 @@ class TestNs1ProviderDynamic(TestCase):
# Set geos to 'OC' in rules[0] (pool - 'lhr')
# Check returned dict has list of countries under 'OC'
rule0 = self.record.data['dynamic']['rules'][0]
saved_geos = rule0['geos']
record = self.record()
rule0 = record.data['dynamic']['rules'][0]
rule0['geos'] = ['OC']
ret, _ = provider._params_for_A(self.record)
self.assertEquals(set(ret['regions']['lhr']['meta']['country']),
ret, _ = provider._params_for_A(record)
# Make sure the country list expanded into all the OC countries
got = set(ret['regions']['lhr__country']['meta']['country'])
self.assertEquals(got,
Ns1Provider._CONTINENT_TO_LIST_OF_COUNTRIES['OC'])
# When rules has 'OC', it is converted to list of countries in the
# params. Look if the returned filters is the filter chain with country
self.assertEquals(ret['filters'],
Ns1Provider._FILTER_CHAIN_WITH_COUNTRY(provider,
True))
rule0['geos'] = saved_geos
@patch('octodns.provider.ns1.Ns1Provider._monitor_sync')
@patch('octodns.provider.ns1.Ns1Provider._monitors_for')
@ -1051,19 +1244,20 @@ class TestNs1ProviderDynamic(TestCase):
]
# This indirectly calls into _params_for_dynamic_A and tests the
# handling to get there
ret, _ = provider._params_for_A(self.record)
record = self.record()
ret, _ = provider._params_for_A(record)
# Given that self.record has both country and region in the rules,
# Given that record has both country and region in the rules,
# the returned filter chain should be one with region and country
self.assertEquals(ret['filters'],
Ns1Provider._FILTER_CHAIN_WITH_REGION_AND_COUNTRY(
provider, True))
monitors_for_mock.assert_has_calls([call(self.record)])
monitors_for_mock.assert_has_calls([call(record)])
monitors_sync_mock.assert_has_calls([
call(self.record, '1.2.3.4', None),
call(self.record, '2.3.4.5', None),
call(self.record, '3.4.5.6', 'mid-3'),
call(record, '1.2.3.4', None),
call(record, '2.3.4.5', None),
call(record, '3.4.5.6', 'mid-3'),
])
record = Record.new(self.zone, 'geo', {
@ -1111,13 +1305,13 @@ class TestNs1ProviderDynamic(TestCase):
# Test out a small, but realistic setup that covers all the options
# We have country and region in the test config
filters = provider._get_updated_filter_chain(True, True)
catchall_pool_name = '{}{}'.format(provider.CATCHALL_PREFIX, 'iad')
catchall_pool_name = 'iad__catchall'
ns1_record = {
'answers': [{
'answer': ['3.4.5.6'],
'meta': {
'priority': 1,
'note': 'from:lhr',
'note': 'from:lhr__country',
},
'region': 'lhr',
}, {
@ -1169,14 +1363,24 @@ class TestNs1ProviderDynamic(TestCase):
'domain': 'unit.tests',
'filters': filters,
'regions': {
'lhr': {
# lhr will use the new-split style names (and that will require
# combining in the code to produce the expected answer
'lhr__georegion': {
'meta': {
'note': 'rule-order:1 fallback:iad',
'country': ['CA'],
'georegion': ['AFRICA'],
},
},
'lhr__country': {
'meta': {
'note': 'rule-order:1 fallback:iad',
'country': ['CA'],
'us_state': ['OR'],
},
},
# iad will use the old style "plain" region naming. We won't
# see mixed names like this in practice, but this should
# exercise both paths
'iad': {
'meta': {
'note': 'rule-order:2',
@ -1240,16 +1444,28 @@ class TestNs1ProviderDynamic(TestCase):
data2 = provider._data_for_A('A', ns1_record)
self.assertEquals(data, data2)
# Same answer if we have an old-style catchall name
old_style_catchall_pool_name = 'catchall__iad'
ns1_record['answers'][-2]['region'] = old_style_catchall_pool_name
ns1_record['answers'][-1]['region'] = old_style_catchall_pool_name
ns1_record['regions'][old_style_catchall_pool_name] = \
ns1_record['regions'][catchall_pool_name]
del ns1_record['regions'][catchall_pool_name]
data3 = provider._data_for_dynamic_A('A', ns1_record)
self.assertEquals(data, data2)
# Oceania test cases
# 1. Full list of countries should return 'OC' in geos
oc_countries = Ns1Provider._CONTINENT_TO_LIST_OF_COUNTRIES['OC']
ns1_record['regions']['lhr']['meta']['country'] = list(oc_countries)
ns1_record['regions']['lhr__country']['meta']['country'] = \
list(oc_countries)
data3 = provider._data_for_A('A', ns1_record)
self.assertTrue('OC' in data3['dynamic']['rules'][0]['geos'])
# 2. Partial list of countries should return just those
partial_oc_cntry_list = list(oc_countries)[:5]
ns1_record['regions']['lhr']['meta']['country'] = partial_oc_cntry_list
ns1_record['regions']['lhr__country']['meta']['country'] = \
partial_oc_cntry_list
data4 = provider._data_for_A('A', ns1_record)
for c in partial_oc_cntry_list:
self.assertTrue(


+ 64
- 20
tests/test_octodns_provider_route53.py View File

@ -12,9 +12,9 @@ from unittest import TestCase
from mock import patch
from octodns.record import Create, Delete, Record, Update
from octodns.provider.route53 import Route53Provider, _Route53GeoDefault, \
_Route53DynamicValue, _Route53GeoRecord, _Route53Record, _mod_keyer, \
_octal_replace
from octodns.provider.route53 import Route53Provider, _Route53DynamicValue, \
_Route53GeoDefault, _Route53GeoRecord, Route53ProviderException, \
_Route53Record, _mod_keyer, _octal_replace
from octodns.zone import Zone
from helpers import GeoProvider
@ -304,6 +304,7 @@ class TestRoute53Provider(TestCase):
'Type': 'HTTPS',
'Port': 443,
'MeasureLatency': True,
'RequestInterval': 10,
},
'HealthCheckVersion': 2,
}, {
@ -317,6 +318,7 @@ class TestRoute53Provider(TestCase):
'Type': 'HTTPS',
'Port': 443,
'MeasureLatency': True,
'RequestInterval': 10,
},
'HealthCheckVersion': 42,
}, {
@ -330,6 +332,7 @@ class TestRoute53Provider(TestCase):
'Type': 'HTTPS',
'Port': 443,
'MeasureLatency': True,
'RequestInterval': 10,
},
'HealthCheckVersion': 2,
}, {
@ -343,6 +346,7 @@ class TestRoute53Provider(TestCase):
'Type': 'HTTPS',
'Port': 443,
'MeasureLatency': True,
'RequestInterval': 10,
},
'HealthCheckVersion': 2,
}, {
@ -357,6 +361,7 @@ class TestRoute53Provider(TestCase):
'Type': 'HTTPS',
'Port': 443,
'MeasureLatency': True,
'RequestInterval': 10,
},
'HealthCheckVersion': 2,
}]
@ -1023,6 +1028,7 @@ class TestRoute53Provider(TestCase):
'Type': 'HTTPS',
'Port': 443,
'MeasureLatency': True,
'RequestInterval': 10,
},
'HealthCheckVersion': 2,
}, {
@ -1036,6 +1042,7 @@ class TestRoute53Provider(TestCase):
'Type': 'HTTPS',
'Port': 443,
'MeasureLatency': True,
'RequestInterval': 10,
},
'HealthCheckVersion': 2,
}]
@ -1059,6 +1066,7 @@ class TestRoute53Provider(TestCase):
'Type': 'HTTPS',
'Port': 443,
'MeasureLatency': True,
'RequestInterval': 10,
},
'HealthCheckVersion': 2,
}]
@ -1108,6 +1116,7 @@ class TestRoute53Provider(TestCase):
'Type': 'HTTPS',
'Port': 443,
'MeasureLatency': True,
'RequestInterval': 10,
},
'HealthCheckVersion': 2,
}, {
@ -1121,6 +1130,7 @@ class TestRoute53Provider(TestCase):
'Type': 'HTTPS',
'Port': 443,
'MeasureLatency': True,
'RequestInterval': 10,
},
'HealthCheckVersion': 2,
}]
@ -1242,9 +1252,9 @@ class TestRoute53Provider(TestCase):
self.assertEquals('42', id)
stubber.assert_no_pending_responses()
def test_health_check_measure_latency(self):
def test_health_check_provider_options(self):
provider, stubber = self._get_stubbed_provider()
record_true = Record.new(self.expected, 'a', {
record = Record.new(self.expected, 'a', {
'ttl': 61,
'type': 'A',
'value': '1.2.3.4',
@ -1253,23 +1263,28 @@ class TestRoute53Provider(TestCase):
},
'route53': {
'healthcheck': {
'measure_latency': True
'measure_latency': True,
'request_interval': 10,
}
}
}
})
measure_latency = provider._healthcheck_measure_latency(record_true)
self.assertTrue(measure_latency)
latency = provider._healthcheck_measure_latency(record)
interval = provider._healthcheck_request_interval(record)
self.assertTrue(latency)
self.assertEquals(10, interval)
record_default = Record.new(self.expected, 'a', {
'ttl': 61,
'type': 'A',
'value': '1.2.3.4',
})
measure_latency = provider._healthcheck_measure_latency(record_default)
self.assertTrue(measure_latency)
latency = provider._healthcheck_measure_latency(record_default)
interval = provider._healthcheck_request_interval(record_default)
self.assertTrue(latency)
self.assertEquals(10, interval)
record_false = Record.new(self.expected, 'a', {
record = Record.new(self.expected, 'a', {
'ttl': 61,
'type': 'A',
'value': '1.2.3.4',
@ -1278,15 +1293,35 @@ class TestRoute53Provider(TestCase):
},
'route53': {
'healthcheck': {
'measure_latency': False
'measure_latency': False,
'request_interval': 30,
}
}
}
})
measure_latency = provider._healthcheck_measure_latency(record_false)
self.assertFalse(measure_latency)
latency = provider._healthcheck_measure_latency(record)
interval = provider._healthcheck_request_interval(record)
self.assertFalse(latency)
self.assertEquals(30, interval)
def test_create_health_checks_measure_latency(self):
record_invalid = Record.new(self.expected, 'a', {
'ttl': 61,
'type': 'A',
'value': '1.2.3.4',
'octodns': {
'healthcheck': {
},
'route53': {
'healthcheck': {
'request_interval': 20,
}
}
}
})
with self.assertRaises(Route53ProviderException):
interval = provider._healthcheck_request_interval(record_invalid)
def test_create_health_checks_provider_options(self):
provider, stubber = self._get_stubbed_provider()
health_check_config = {
@ -1296,7 +1331,7 @@ class TestRoute53Provider(TestCase):
'IPAddress': '1.2.3.4',
'MeasureLatency': False,
'Port': 443,
'RequestInterval': 10,
'RequestInterval': 30,
'ResourcePath': '/_dns',
'Type': 'HTTPS'
}
@ -1335,7 +1370,8 @@ class TestRoute53Provider(TestCase):
},
'route53': {
'healthcheck': {
'measure_latency': False
'measure_latency': False,
'request_interval': 30
}
}
}
@ -1344,7 +1380,9 @@ class TestRoute53Provider(TestCase):
value = record.geo['AF'].values[0]
id = provider.get_health_check_id(record, value, True)
ml = provider.health_checks[id]['HealthCheckConfig']['MeasureLatency']
self.assertEqual(False, ml)
ri = provider.health_checks[id]['HealthCheckConfig']['RequestInterval']
self.assertFalse(ml)
self.assertEquals(30, ri)
def test_health_check_gc(self):
provider, stubber = self._get_stubbed_provider()
@ -1436,6 +1474,7 @@ class TestRoute53Provider(TestCase):
'Type': 'HTTPS',
'Port': 443,
'MeasureLatency': True,
'RequestInterval': 10,
},
'HealthCheckVersion': 2,
}, {
@ -1449,6 +1488,7 @@ class TestRoute53Provider(TestCase):
'Type': 'HTTPS',
'Port': 443,
'MeasureLatency': True,
'RequestInterval': 10,
},
'HealthCheckVersion': 2,
}, {
@ -1462,6 +1502,7 @@ class TestRoute53Provider(TestCase):
'Type': 'HTTPS',
'Port': 443,
'MeasureLatency': True,
'RequestInterval': 10,
},
'HealthCheckVersion': 2,
}]
@ -1639,6 +1680,7 @@ class TestRoute53Provider(TestCase):
'Type': 'HTTPS',
'Port': 443,
'MeasureLatency': True,
'RequestInterval': 10,
},
'HealthCheckVersion': 2,
}],
@ -1742,7 +1784,8 @@ class TestRoute53Provider(TestCase):
'ResourcePath': '/_dns',
'Type': 'HTTPS',
'Port': 443,
'MeasureLatency': True
'MeasureLatency': True,
'RequestInterval': 10,
},
'HealthCheckVersion': 2,
}],
@ -1890,7 +1933,8 @@ class TestRoute53Provider(TestCase):
'ResourcePath': '/_dns',
'Type': 'HTTPS',
'Port': 443,
'MeasureLatency': True
'MeasureLatency': True,
'RequestInterval': 10,
},
'HealthCheckVersion': 2,
}],


+ 506
- 0
tests/test_octodns_provider_ultra.py View File

@ -0,0 +1,506 @@
from mock import Mock, call
from os.path import dirname, join
from requests import HTTPError
from requests_mock import ANY, mock as requests_mock
from six import text_type
from unittest import TestCase
from json import load as json_load
from octodns.record import Record
from octodns.provider.ultra import UltraProvider, UltraNoZonesExistException
from octodns.provider.yaml import YamlProvider
from octodns.zone import Zone
def _get_provider():
'''
Helper to return a provider after going through authentication sequence
'''
with requests_mock() as mock:
mock.post('https://restapi.ultradns.com/v2/authorization/token',
status_code=200,
text='{"token type": "Bearer", "refresh_token": "abc", '
'"access_token":"123", "expires_in": "3600"}')
return UltraProvider('test', 'testacct', 'user', 'pass')
class TestUltraProvider(TestCase):
expected = Zone('unit.tests.', [])
host = 'https://restapi.ultradns.com'
empty_body = [{"errorCode": 70002, "errorMessage": "Data not found."}]
expected = Zone('unit.tests.', [])
source = YamlProvider('test', join(dirname(__file__), 'config'))
source.populate(expected)
def test_login(self):
path = '/v2/authorization/token'
# Bad Auth
with requests_mock() as mock:
mock.post('{}{}'.format(self.host, path), status_code=401,
text='{"errorCode": 60001}')
with self.assertRaises(Exception) as ctx:
UltraProvider('test', 'account', 'user', 'wrongpass')
self.assertEquals('Unauthorized', text_type(ctx.exception))
# Good Auth
with requests_mock() as mock:
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
mock.post('{}{}'.format(self.host, path), status_code=200,
request_headers=headers,
text='{"token type": "Bearer", "refresh_token": "abc", '
'"access_token":"123", "expires_in": "3600"}')
UltraProvider('test', 'account', 'user', 'rightpass')
self.assertEquals(1, mock.call_count)
expected_payload = "grant_type=password&username=user&"\
"password=rightpass"
self.assertEquals(mock.last_request.text, expected_payload)
def test_get_zones(self):
provider = _get_provider()
path = "/v2/zones"
# Test authorization issue
with requests_mock() as mock:
mock.get('{}{}'.format(self.host, path), status_code=400,
json={"errorCode": 60004,
"errorMessage": "Authorization Header required"})
with self.assertRaises(HTTPError) as ctx:
zones = provider.zones
self.assertEquals(400, ctx.exception.response.status_code)
# Test no zones exist error
with requests_mock() as mock:
mock.get('{}{}'.format(self.host, path), status_code=404,
headers={'Authorization': 'Bearer 123'},
json=self.empty_body)
zones = provider.zones
self.assertEquals(1, mock.call_count)
self.assertEquals(list(), zones)
# Reset zone cache so they are queried again
provider._zones = None
with requests_mock() as mock:
payload = {
"resultInfo": {
"totalCount": 1,
"offset": 0,
"returnedCount": 1
},
"zones": [
{
"properties": {
"name": "testzone123.com.",
"accountName": "testaccount",
"type": "PRIMARY",
"dnssecStatus": "UNSIGNED",
"status": "ACTIVE",
"owner": "user",
"resourceRecordCount": 5,
"lastModifiedDateTime": "2020-06-19T00:47Z"
}
}
]
}
mock.get('{}{}'.format(self.host, path), status_code=200,
headers={'Authorization': 'Bearer 123'},
json=payload)
zones = provider.zones
self.assertEquals(1, mock.call_count)
self.assertEquals(1, len(zones))
self.assertEquals('testzone123.com.', zones[0])
# Test different paging behavior
provider._zones = None
with requests_mock() as mock:
mock.get('{}{}?limit=100&q=zone_type%3APRIMARY&offset=0'
.format(self.host, path), status_code=200,
json={"resultInfo": {"totalCount": 15,
"offset": 0,
"returnedCount": 10},
"zones": []})
mock.get('{}{}?limit=100&q=zone_type%3APRIMARY&offset=10'
.format(self.host, path), status_code=200,
json={"resultInfo": {"totalCount": 15,
"offset": 10,
"returnedCount": 5},
"zones": []})
zones = provider.zones
self.assertEquals(2, mock.call_count)
def test_request(self):
provider = _get_provider()
path = '/foo'
payload = {'a': 1}
with requests_mock() as mock:
mock.get('{}{}'.format(self.host, path), status_code=401,
headers={'Authorization': 'Bearer 123'}, json={})
with self.assertRaises(Exception) as ctx:
provider._get(path)
self.assertEquals('Unauthorized', text_type(ctx.exception))
# Test all GET patterns
with requests_mock() as mock:
mock.get('{}{}'.format(self.host, path), status_code=200,
headers={'Authorization': 'Bearer 123'},
json=payload)
provider._get(path, json=payload)
mock.get('{}{}?a=1'.format(self.host, path), status_code=200,
headers={'Authorization': 'Bearer 123'})
provider._get(path, params=payload, json_response=False)
# Test all POST patterns
with requests_mock() as mock:
mock.post('{}{}'.format(self.host, path), status_code=200,
headers={'Authorization': 'Bearer 123'},
json=payload)
provider._post(path, json=payload)
mock.post('{}{}'.format(self.host, path), status_code=200,
headers={'Authorization': 'Bearer 123'},
text="{'a':1}")
provider._post(path, data=payload, json_response=False)
# Test all PUT patterns
with requests_mock() as mock:
mock.put('{}{}'.format(self.host, path), status_code=200,
headers={'Authorization': 'Bearer 123'},
json=payload)
provider._put(path, json=payload)
# Test all DELETE patterns
with requests_mock() as mock:
mock.delete('{}{}'.format(self.host, path), status_code=200,
headers={'Authorization': 'Bearer 123'})
provider._delete(path, json_response=False)
def test_zone_records(self):
provider = _get_provider()
zone_payload = {
"resultInfo": {"totalCount": 1,
"offset": 0,
"returnedCount": 1},
"zones": [{"properties": {"name": "octodns1.test."}}]}
records_payload = {
"zoneName": "octodns1.test.",
"rrSets": [
{
"ownerName": "octodns1.test.",
"rrtype": "NS (2)",
"ttl": 86400,
"rdata": [
"ns1.octodns1.test."
]
},
{
"ownerName": "octodns1.test.",
"rrtype": "SOA (6)",
"ttl": 86400,
"rdata": [
"pdns1.ultradns.com. phelps.netflix.com. 1 10 10 10 10"
]
},
],
"resultInfo": {
"totalCount": 2,
"offset": 0,
"returnedCount": 2
}
}
zone_path = '/v2/zones'
rec_path = '/v2/zones/octodns1.test./rrsets'
with requests_mock() as mock:
mock.get('{}{}?limit=100&q=zone_type%3APRIMARY&offset=0'
.format(self.host, zone_path),
status_code=200, json=zone_payload)
mock.get('{}{}?offset=0&limit=100'.format(self.host, rec_path),
status_code=200, json=records_payload)
zone = Zone('octodns1.test.', [])
self.assertTrue(provider.zone_records(zone))
self.assertEquals(mock.call_count, 2)
# Populate the same zone again and confirm cache is hit
self.assertTrue(provider.zone_records(zone))
self.assertEquals(mock.call_count, 2)
def test_populate(self):
provider = _get_provider()
# Non-existent zone doesn't populate anything
with requests_mock() as mock:
mock.get(ANY, status_code=404, json=self.empty_body)
zone = Zone('unit.tests.', [])
provider.populate(zone)
self.assertEquals(set(), zone.records)
# re-populating the same non-existent zone uses cache and makes no
# calls
again = Zone('unit.tests.', [])
provider.populate(again)
self.assertEquals(set(), again.records)
# Test zones with data
provider._zones = None
path = '/v2/zones'
with requests_mock() as mock:
with open('tests/fixtures/ultra-zones-page-1.json') as fh:
mock.get('{}{}?limit=100&q=zone_type%3APRIMARY&offset=0'
.format(self.host, path),
status_code=200, text=fh.read())
with open('tests/fixtures/ultra-zones-page-2.json') as fh:
mock.get('{}{}?limit=100&q=zone_type%3APRIMARY&offset=10'
.format(self.host, path),
status_code=200, text=fh.read())
with open('tests/fixtures/ultra-records-page-1.json') as fh:
rec_path = '/v2/zones/octodns1.test./rrsets'
mock.get('{}{}?offset=0&limit=100'.format(self.host, rec_path),
status_code=200, text=fh.read())
with open('tests/fixtures/ultra-records-page-2.json') as fh:
rec_path = '/v2/zones/octodns1.test./rrsets'
mock.get('{}{}?offset=10&limit=100'
.format(self.host, rec_path),
status_code=200, text=fh.read())
zone = Zone('octodns1.test.', [])
self.assertTrue(provider.populate(zone))
self.assertEquals('octodns1.test.', zone.name)
self.assertEquals(11, len(zone.records))
self.assertEquals(4, mock.call_count)
def test_apply(self):
provider = _get_provider()
provider._request = Mock()
provider._request.side_effect = [
UltraNoZonesExistException('No Zones'),
None, # zone create
] + [None] * 13 # individual record creates
# non-existent zone, create everything
plan = provider.plan(self.expected)
self.assertEquals(13, len(plan.changes))
self.assertEquals(13, provider.apply(plan))
self.assertFalse(plan.exists)
provider._request.assert_has_calls([
# created the domain
call('POST', '/v2/zones', json={
'properties': {'name': 'unit.tests.',
'accountName': 'testacct',
'type': 'PRIMARY'},
'primaryCreateInfo': {'createType': 'NEW'}}),
# Validate multi-ip apex A record is correct
call('POST', '/v2/zones/unit.tests./rrsets/A/unit.tests.', json={
'ttl': 300,
'rdata': ['1.2.3.4', '1.2.3.5'],
'profile': {
'@context':
'http://schemas.ultradns.com/RDPool.jsonschema',
'order': 'FIXED',
'description': 'unit.tests.'
}
}),
# make sure semicolons are not escaped when sending data
call('POST', '/v2/zones/unit.tests./rrsets/TXT/txt.unit.tests.',
json={'ttl': 600,
'rdata': ['Bah bah black sheep',
'have you any wool.',
'v=DKIM1;k=rsa;s=email;h=sha256;'
'p=A/kinda+of/long/string+with+numb3rs']}),
], True)
# expected number of total calls
self.assertEquals(15, provider._request.call_count)
# Create sample rrset payload to attempt to alter
page1 = json_load(open('tests/fixtures/ultra-records-page-1.json'))
page2 = json_load(open('tests/fixtures/ultra-records-page-2.json'))
mock_rrsets = list()
mock_rrsets.extend(page1['rrSets'])
mock_rrsets.extend(page2['rrSets'])
# Seed a bunch of records into a zone and verify update / delete ops
provider._request.reset_mock()
provider._zones = ['octodns1.test.']
provider.zone_records = Mock(return_value=mock_rrsets)
provider._request.side_effect = [None] * 13
wanted = Zone('octodns1.test.', [])
wanted.add_record(Record.new(wanted, '', {
'ttl': 60, # Change TTL
'type': 'A',
'value': '5.6.7.8' # Change number of IPs (3 -> 1)
}))
wanted.add_record(Record.new(wanted, 'txt', {
'ttl': 3600,
'type': 'TXT',
'values': [ # Alter TXT value
"foobar",
"v=spf1 include:mail.server.net ?all"
]
}))
plan = provider.plan(wanted)
self.assertEquals(10, len(plan.changes))
self.assertEquals(10, provider.apply(plan))
self.assertTrue(plan.exists)
provider._request.assert_has_calls([
# Validate multi-ip apex A record replaced with standard A
call('PUT', '/v2/zones/octodns1.test./rrsets/A/octodns1.test.',
json={'ttl': 60,
'rdata': ['5.6.7.8']}),
# Make sure TXT value is properly updated
call('PUT',
'/v2/zones/octodns1.test./rrsets/TXT/txt.octodns1.test.',
json={'ttl': 3600,
'rdata': ["foobar",
"v=spf1 include:mail.server.net ?all"]}),
# Confirm a few of the DELETE operations properly occur
call('DELETE',
'/v2/zones/octodns1.test./rrsets/A/a.octodns1.test.',
json_response=False),
call('DELETE',
'/v2/zones/octodns1.test./rrsets/AAAA/aaaa.octodns1.test.',
json_response=False),
call('DELETE',
'/v2/zones/octodns1.test./rrsets/CAA/caa.octodns1.test.',
json_response=False),
call('DELETE',
'/v2/zones/octodns1.test./rrsets/CNAME/cname.octodns1.test.',
json_response=False),
], True)
def test_gen_data(self):
provider = _get_provider()
zone = Zone('unit.tests.', [])
for name, _type, expected_path, expected_payload, expected_record in (
# A
('a', 'A',
'/v2/zones/unit.tests./rrsets/A/a.unit.tests.',
{'ttl': 60, 'rdata': ['1.2.3.4']},
Record.new(zone, 'a',
{'ttl': 60, 'type': 'A', 'values': ['1.2.3.4']})),
('a', 'A',
'/v2/zones/unit.tests./rrsets/A/a.unit.tests.',
{'ttl': 60, 'rdata': ['1.2.3.4', '5.6.7.8'],
'profile': {'@context':
'http://schemas.ultradns.com/RDPool.jsonschema',
'order': 'FIXED',
'description': 'a.unit.tests.'}},
Record.new(zone, 'a',
{'ttl': 60, 'type': 'A',
'values': ['1.2.3.4', '5.6.7.8']})),
# AAAA
('aaaa', 'AAAA',
'/v2/zones/unit.tests./rrsets/AAAA/aaaa.unit.tests.',
{'ttl': 60, 'rdata': ['::1']},
Record.new(zone, 'aaaa',
{'ttl': 60, 'type': 'AAAA', 'values': ['::1']})),
('aaaa', 'AAAA',
'/v2/zones/unit.tests./rrsets/AAAA/aaaa.unit.tests.',
{'ttl': 60, 'rdata': ['::1', '::2'],
'profile': {'@context':
'http://schemas.ultradns.com/RDPool.jsonschema',
'order': 'FIXED',
'description': 'aaaa.unit.tests.'}},
Record.new(zone, 'aaaa',
{'ttl': 60, 'type': 'AAAA',
'values': ['::1', '::2']})),
# CAA
('caa', 'CAA',
'/v2/zones/unit.tests./rrsets/CAA/caa.unit.tests.',
{'ttl': 60, 'rdata': ['0 issue foo.com']},
Record.new(zone, 'caa',
{'ttl': 60, 'type': 'CAA',
'values':
[{'flags': 0, 'tag': 'issue', 'value': 'foo.com'}]})),
# CNAME
('cname', 'CNAME',
'/v2/zones/unit.tests./rrsets/CNAME/cname.unit.tests.',
{'ttl': 60, 'rdata': ['netflix.com.']},
Record.new(zone, 'cname',
{'ttl': 60, 'type': 'CNAME',
'value': 'netflix.com.'})),
# MX
('mx', 'MX',
'/v2/zones/unit.tests./rrsets/MX/mx.unit.tests.',
{'ttl': 60, 'rdata': ['1 mx1.unit.tests.', '1 mx2.unit.tests.']},
Record.new(zone, 'mx',
{'ttl': 60, 'type': 'MX',
'values': [{'preference': 1,
'exchange': 'mx1.unit.tests.'},
{'preference': 1,
'exchange': 'mx2.unit.tests.'}]})),
# NS
('ns', 'NS',
'/v2/zones/unit.tests./rrsets/NS/ns.unit.tests.',
{'ttl': 60, 'rdata': ['ns1.unit.tests.', 'ns2.unit.tests.']},
Record.new(zone, 'ns',
{'ttl': 60, 'type': 'NS',
'values': ['ns1.unit.tests.', 'ns2.unit.tests.']})),
# PTR
('ptr', 'PTR',
'/v2/zones/unit.tests./rrsets/PTR/ptr.unit.tests.',
{'ttl': 60, 'rdata': ['a.unit.tests.']},
Record.new(zone, 'ptr',
{'ttl': 60, 'type': 'PTR',
'value': 'a.unit.tests.'})),
# SPF
('spf', 'SPF',
'/v2/zones/unit.tests./rrsets/SPF/spf.unit.tests.',
{'ttl': 60, 'rdata': ['v=spf1 -all']},
Record.new(zone, 'spf',
{'ttl': 60, 'type': 'SPF',
'values': ['v=spf1 -all']})),
# SRV
('_srv._tcp', 'SRV',
'/v2/zones/unit.tests./rrsets/SRV/_srv._tcp.unit.tests.',
{'ttl': 60, 'rdata': ['10 20 443 target.unit.tests.']},
Record.new(zone, '_srv._tcp',
{'ttl': 60, 'type': 'SRV',
'values': [{'priority': 10,
'weight': 20,
'port': 443,
'target': 'target.unit.tests.'}]})),
# TXT
('txt', 'TXT',
'/v2/zones/unit.tests./rrsets/TXT/txt.unit.tests.',
{'ttl': 60, 'rdata': ['abc', 'def']},
Record.new(zone, 'txt',
{'ttl': 60, 'type': 'TXT',
'values': ['abc', 'def']})),
):
# Validate path and payload based on record meet expectations
path, payload = provider._gen_data(expected_record)
self.assertEqual(expected_path, path)
self.assertEqual(expected_payload, payload)
# Use generator for record and confirm the output matches
rec = provider._record_for(zone, name, _type,
expected_payload, False)
path, payload = provider._gen_data(rec)
self.assertEqual(expected_path, path)
self.assertEqual(expected_payload, payload)

+ 2
- 1
tests/test_octodns_record.py View File

@ -2167,7 +2167,8 @@ class TestRecordValidation(TestCase):
'target': 'foo.bar.baz.'
}
})
self.assertEquals(['invalid name'], ctx.exception.reasons)
self.assertEquals(['invalid name for SRV record'],
ctx.exception.reasons)
# missing priority
with self.assertRaises(ValidationError) as ctx:


+ 14
- 0
tests/test_octodns_source_axfr.py View File

@ -15,6 +15,7 @@ from unittest import TestCase
from octodns.source.axfr import AxfrSource, AxfrSourceZoneTransferFailed, \
ZoneFileSource, ZoneFileSourceLoadFailure
from octodns.zone import Zone
from octodns.record import ValidationError
class TestAxfrSource(TestCase):
@ -70,3 +71,16 @@ class TestZoneFileSource(TestCase):
self.source.populate(zone)
self.assertEquals('The DNS zone has no NS RRset at its origin.',
text_type(ctx.exception))
# Records are not to RFC (lenient=False)
with self.assertRaises(ValidationError) as ctx:
zone = Zone('invalid.records.', [])
self.source.populate(zone)
self.assertEquals('Invalid record _invalid.invalid.records.\n'
' - invalid name for SRV record',
text_type(ctx.exception))
# Records are not to RFC, but load anyhow (lenient=True)
invalid = Zone('invalid.records.', [])
self.source.populate(invalid, lenient=True)
self.assertEquals(12, len(invalid.records))

+ 41
- 0
tests/test_octodns_source_envvar.py View File

@ -0,0 +1,41 @@
from six import text_type
from unittest import TestCase
from unittest.mock import patch
from octodns.source.envvar import EnvVarSource
from octodns.source.envvar import EnvironmentVariableNotFoundException
from octodns.zone import Zone
class TestEnvVarSource(TestCase):
def test_read_variable(self):
envvar = 'OCTODNS_TEST_ENVIRONMENT_VARIABLE'
source = EnvVarSource('testid', envvar, 'recordname', ttl=120)
with self.assertRaises(EnvironmentVariableNotFoundException) as ctx:
source._read_variable()
msg = 'Unknown environment variable {}'.format(envvar)
self.assertEquals(msg, text_type(ctx.exception))
with patch.dict('os.environ', {envvar: 'testvalue'}):
value = source._read_variable()
self.assertEquals(value, 'testvalue')
def test_populate(self):
envvar = 'TEST_VAR'
value = 'somevalue'
name = 'testrecord'
zone_name = 'unit.tests.'
source = EnvVarSource('testid', envvar, name)
zone = Zone(zone_name, [])
with patch.dict('os.environ', {envvar: value}):
source.populate(zone)
self.assertEquals(1, len(zone.records))
record = list(zone.records)[0]
self.assertEquals(name, record.name)
self.assertEquals('{}.{}'.format(name, zone_name), record.fqdn)
self.assertEquals('TXT', record._type)
self.assertEquals(1, len(record.values))
self.assertEquals(value, record.values[0])

+ 43
- 0
tests/zones/invalid.records. View File

@ -0,0 +1,43 @@
$ORIGIN invalid.records.
@ 3600 IN SOA ns1.invalid.records. root.invalid.records. (
2018071501 ; Serial
3600 ; Refresh (1 hour)
600 ; Retry (10 minutes)
604800 ; Expire (1 week)
3600 ; NXDOMAIN ttl (1 hour)
)
; NS Records
@ 3600 IN NS ns1.invalid.records.
@ 3600 IN NS ns2.invalid.records.
under 3600 IN NS ns1.invalid.records.
under 3600 IN NS ns2.invalid.records.
; SRV Records
_srv._tcp 600 IN SRV 10 20 30 foo-1.invalid.records.
_srv._tcp 600 IN SRV 10 20 30 foo-2.invalid.records.
_invalid 600 IN SRV 10 20 30 foo-3.invalid.records.
; TXT Records
txt 600 IN TXT "Bah bah black sheep"
txt 600 IN TXT "have you any wool."
txt 600 IN TXT "v=DKIM1;k=rsa;s=email;h=sha256;p=A/kinda+of/long/string+with+numb3rs"
; MX Records
mx 300 IN MX 10 smtp-4.invalid.records.
mx 300 IN MX 20 smtp-2.invalid.records.
mx 300 IN MX 30 smtp-3.invalid.records.
mx 300 IN MX 40 smtp-1.invalid.records.
; A Records
@ 300 IN A 1.2.3.4
@ 300 IN A 1.2.3.5
www 300 IN A 2.2.3.6
wwww.sub 300 IN A 2.2.3.6
; AAAA Records
aaaa 600 IN AAAA 2601:644:500:e210:62f8:1dff:feb8:947a
; CNAME Records
cname 300 IN CNAME invalid.records.
included 300 IN CNAME invalid.records.

Loading…
Cancel
Save