Browse Source

Merge branch 'master' of https://github.com/github/octodns into ultradns_support

pull/579/head
Phelps Williams 6 years ago
parent
commit
ea5a5f4ab3
13 changed files with 559 additions and 162 deletions
  1. +2
    -0
      docs/dynamic_records.md
  2. +2
    -0
      docs/geo_records.md
  3. +15
    -4
      octodns/manager.py
  4. +58
    -39
      octodns/provider/ns1.py
  5. +28
    -6
      octodns/provider/route53.py
  6. +1
    -1
      octodns/record/__init__.py
  7. +4
    -4
      requirements.txt
  8. +23
    -0
      tests/test_octodns_manager.py
  9. +303
    -87
      tests/test_octodns_provider_ns1.py
  10. +64
    -20
      tests/test_octodns_provider_route53.py
  11. +2
    -1
      tests/test_octodns_record.py
  12. +14
    -0
      tests/test_octodns_source_axfr.py
  13. +43
    -0
      tests/zones/invalid.records.

+ 2
- 0
docs/dynamic_records.md View File

@ -110,6 +110,7 @@ test:
| Key | Description | Default |
|--|--|--|
| measure_latency | Show latency in AWS console | true |
| request_interval | Healthcheck interval [10\|30] seconds | 10 |
```yaml
@ -123,4 +124,5 @@ test:
route53:
healthcheck:
measure_latency: false
request_interval: 30
```

+ 2
- 0
docs/geo_records.md View File

@ -86,6 +86,7 @@ test:
| Key | Description | Default |
|--|--|--|
| measure_latency | Show latency in AWS console | true |
| request_interval | Healthcheck interval [10\|30] seconds | 10 |
```yaml
---
@ -98,4 +99,5 @@ test:
route53:
healthcheck:
measure_latency: false
request_interval: 30
```

+ 15
- 4
octodns/manager.py View File

@ -8,6 +8,7 @@ from __future__ import absolute_import, division, print_function, \
from concurrent.futures import ThreadPoolExecutor
from importlib import import_module
from os import environ
from six import text_type
import logging
from .provider.base import BaseProvider
@ -221,13 +222,21 @@ class Manager(object):
self.log.debug('configured_sub_zones: subs=%s', sub_zone_names)
return set(sub_zone_names)
def _populate_and_plan(self, zone_name, sources, targets):
def _populate_and_plan(self, zone_name, sources, targets, lenient=False):
self.log.debug('sync: populating, zone=%s', zone_name)
self.log.debug('sync: populating, zone=%s, lenient=%s',
zone_name, lenient)
zone = Zone(zone_name,
sub_zones=self.configured_sub_zones(zone_name))
for source in sources:
source.populate(zone)
try:
source.populate(zone, lenient=lenient)
except TypeError as e:
if "keyword argument 'lenient'" not in text_type(e):
raise
self.log.warn(': provider %s does not accept lenient param',
source.__class__.__name__)
source.populate(zone)
self.log.debug('sync: planning, zone=%s', zone_name)
plans = []
@ -259,6 +268,7 @@ class Manager(object):
futures = []
for zone_name, config in zones:
self.log.info('sync: zone=%s', zone_name)
lenient = config.get('lenient', False)
try:
sources = config['sources']
except KeyError:
@ -308,7 +318,8 @@ class Manager(object):
.format(zone_name, target))
futures.append(self._executor.submit(self._populate_and_plan,
zone_name, sources, targets))
zone_name, sources,
targets, lenient=lenient))
# Wait on all results and unpack/flatten them in to a list of target &
# plan pairs.


+ 58
- 39
octodns/provider/ns1.py View File

@ -237,7 +237,6 @@ class Ns1Provider(BaseProvider):
'NS', 'PTR', 'SPF', 'SRV', 'TXT'))
ZONE_NOT_FOUND_MESSAGE = 'server error: zone not found'
CATCHALL_PREFIX = 'catchall__'
def _update_filter(self, filter, with_disabled):
if with_disabled:
@ -455,6 +454,16 @@ class Ns1Provider(BaseProvider):
data['geo'] = geo
return data
def _parse_dynamic_pool_name(self, pool_name):
if pool_name.startswith('catchall__'):
# Special case for the old-style catchall prefix
return pool_name[10:]
try:
pool_name, _ = pool_name.rsplit('__', 1)
except ValueError:
pass
return pool_name
def _data_for_dynamic_A(self, _type, record):
# First make sure we have the expected filters config
if not self._valid_filter_config(record['filters'], record['domain']):
@ -472,9 +481,8 @@ class Ns1Provider(BaseProvider):
for answer in record['answers']:
# region (group name in the UI) is the pool name
pool_name = answer['region']
# Get the actual pool name from the constructed pool name in case
# of the catchall
pool_name = pool_name.replace(self.CATCHALL_PREFIX, '')
# Get the actual pool name by removing the type
pool_name = self._parse_dynamic_pool_name(pool_name)
pool = pools[pool_name]
meta = answer['meta']
@ -501,15 +509,24 @@ class Ns1Provider(BaseProvider):
# tied to pools on the NS1 side, e.g. we can only have 1 rule per pool,
# that may eventually run into problems, but I don't have any use-cases
# examples currently where it would
rules = []
rules = {}
for pool_name, region in sorted(record['regions'].items()):
# Rules that refer to the catchall pool would have the
# CATCHALL_PREFIX in the pool name. Strip the prefix to get back
# the pool name as in the config
pool_name = pool_name.replace(self.CATCHALL_PREFIX, '')
# Get the actual pool name by removing the type
pool_name = self._parse_dynamic_pool_name(pool_name)
meta = region['meta']
notes = self._parse_notes(meta.get('note', ''))
rule_order = notes['rule-order']
try:
rule = rules[rule_order]
except KeyError:
rule = {
'pool': pool_name,
'_order': rule_order,
}
rules[rule_order] = rule
# The group notes field in the UI is a `note` on the region here,
# that's where we can find our pool's fallback.
if 'fallback' in notes:
@ -560,17 +577,15 @@ class Ns1Provider(BaseProvider):
for state in meta.get('us_state', []):
geos.add('NA-US-{}'.format(state))
rule = {
'pool': pool_name,
'_order': notes['rule-order'],
}
if geos:
rule['geos'] = sorted(geos)
rules.append(rule)
# There are geos, combine them with any existing geos for this
# pool and recorded the sorted unique set of them
rule['geos'] = sorted(set(rule.get('geos', [])) | geos)
# Order and convert to a list
default = sorted(default)
# Order
# Convert to list and order
rules = list(rules.values())
rules.sort(key=lambda r: (r['_order'], r['pool']))
return {
@ -1050,29 +1065,34 @@ class Ns1Provider(BaseProvider):
meta = {
'note': self._encode_notes(notes),
}
if georegion:
meta['georegion'] = sorted(georegion)
if country:
meta['country'] = sorted(country)
if us_state:
meta['us_state'] = sorted(us_state)
georegion_meta = dict(meta)
georegion_meta['georegion'] = sorted(georegion)
regions['{}__georegion'.format(pool_name)] = {
'meta': georegion_meta,
}
if country or us_state:
# If there's country and/or states its a country pool,
# countries and states can coexist as they're handled by the
# same step in the filterchain (countries and georegions
# cannot as they're seperate stages and run the risk of
# eliminating all options)
country_state_meta = dict(meta)
if country:
country_state_meta['country'] = sorted(country)
if us_state:
country_state_meta['us_state'] = sorted(us_state)
regions['{}__country'.format(pool_name)] = {
'meta': country_state_meta,
}
if not georegion and not country and not us_state:
# This is the catchall pool. Modify the pool name in the record
# being pushed
# NS1 regions are indexed by pool names. Any reuse of pool
# names in the rules will result in overwriting of the pool.
# Reuse of pools is in general disallowed but for the case of
# the catchall pool - to allow legitimate usecases.
# The pool name renaming is done to accommodate for such a
# reuse.
# (We expect only one catchall per record. Any associated
# validation is expected to covered under record validation)
pool_name = '{}{}'.format(self.CATCHALL_PREFIX, pool_name)
regions[pool_name] = {
'meta': meta,
}
# If there's no targeting it's a catchall
regions['{}__catchall'.format(pool_name)] = {
'meta': meta,
}
existing_monitors = self._monitors_for(record)
active_monitors = set()
@ -1102,15 +1122,14 @@ class Ns1Provider(BaseProvider):
# Build our list of answers
# The regions dictionary built above already has the required pool
# names. Iterate over them and add answers.
# In the case of the catchall, original pool name can be obtained
# by stripping the CATCHALL_PREFIX from the pool name
answers = []
for pool_name in sorted(regions.keys()):
priority = 1
# Dynamic/health checked
pool_label = pool_name
pool_name = pool_name.replace(self.CATCHALL_PREFIX, '')
# Remove the pool type from the end of the name
pool_name = self._parse_dynamic_pool_name(pool_name)
self._add_answers_for_pool(answers, default_answers, pool_name,
pool_label, pool_answers, pools,
priority)


+ 28
- 6
octodns/provider/route53.py View File

@ -512,6 +512,10 @@ class _Route53GeoRecord(_Route53Record):
self.values)
class Route53ProviderException(Exception):
pass
def _mod_keyer(mod):
rrset = mod['ResourceRecordSet']
@ -1031,8 +1035,20 @@ class Route53Provider(BaseProvider):
.get('healthcheck', {}) \
.get('measure_latency', True)
def _healthcheck_request_interval(self, record):
interval = record._octodns.get('route53', {}) \
.get('healthcheck', {}) \
.get('request_interval', 10)
if (interval in [10, 30]):
return interval
else:
raise Route53ProviderException(
'route53.healthcheck.request_interval '
'parameter must be either 10 or 30.')
def _health_check_equivalent(self, host, path, protocol, port,
measure_latency, health_check, value=None):
measure_latency, request_interval,
health_check, value=None):
config = health_check['HealthCheckConfig']
# So interestingly Route53 normalizes IPAddress which will cause us to
@ -1050,9 +1066,10 @@ class Route53Provider(BaseProvider):
None)
resource_path = config.get('ResourcePath', None)
return host == fully_qualified_domain_name and \
path == resource_path and protocol == config['Type'] \
and port == config['Port'] and \
path == resource_path and protocol == config['Type'] and \
port == config['Port'] and \
measure_latency == config['MeasureLatency'] and \
request_interval == config['RequestInterval'] and \
value == config_ip_address
def get_health_check_id(self, record, value, create):
@ -1077,6 +1094,7 @@ class Route53Provider(BaseProvider):
healthcheck_protocol = record.healthcheck_protocol
healthcheck_port = record.healthcheck_port
healthcheck_latency = self._healthcheck_measure_latency(record)
healthcheck_interval = self._healthcheck_request_interval(record)
# we're looking for a healthcheck with the current version & our record
# type, we'll ignore anything else
@ -1091,6 +1109,7 @@ class Route53Provider(BaseProvider):
healthcheck_protocol,
healthcheck_port,
healthcheck_latency,
healthcheck_interval,
health_check,
value=value):
# this is the health check we're looking for
@ -1108,7 +1127,7 @@ class Route53Provider(BaseProvider):
'FailureThreshold': 6,
'MeasureLatency': healthcheck_latency,
'Port': healthcheck_port,
'RequestInterval': 10,
'RequestInterval': healthcheck_interval,
'Type': healthcheck_protocol,
}
if healthcheck_protocol != 'TCP':
@ -1143,9 +1162,10 @@ class Route53Provider(BaseProvider):
self._health_checks[id] = health_check
self.log.info('get_health_check_id: created id=%s, host=%s, '
'path=%s, protocol=%s, port=%d, measure_latency=%r, '
'value=%s', id, healthcheck_host, healthcheck_path,
'request_interval=%d, value=%s',
id, healthcheck_host, healthcheck_path,
healthcheck_protocol, healthcheck_port,
healthcheck_latency, value)
healthcheck_latency, healthcheck_interval, value)
return id
def _gc_health_checks(self, record, new):
@ -1238,6 +1258,7 @@ class Route53Provider(BaseProvider):
healthcheck_protocol = record.healthcheck_protocol
healthcheck_port = record.healthcheck_port
healthcheck_latency = self._healthcheck_measure_latency(record)
healthcheck_interval = self._healthcheck_request_interval(record)
try:
health_check_id = rrset['HealthCheckId']
@ -1249,6 +1270,7 @@ class Route53Provider(BaseProvider):
healthcheck_protocol,
healthcheck_port,
healthcheck_latency,
healthcheck_interval,
health_check):
# it has the right health check
return False


+ 1
- 1
octodns/record/__init__.py View File

@ -1215,7 +1215,7 @@ class SrvRecord(_ValuesMixin, Record):
def validate(cls, name, fqdn, data):
reasons = []
if not cls._name_re.match(name):
reasons.append('invalid name')
reasons.append('invalid name for SRV record')
reasons.extend(super(SrvRecord, cls).validate(name, fqdn, data))
return reasons


+ 4
- 4
requirements.txt View File

@ -1,8 +1,8 @@
PyYaml==5.3.1
azure-common==1.1.25
azure-mgmt-dns==3.0.0
boto3==1.13.19
botocore==1.16.19
boto3==1.14.14
botocore==1.17.14
dnspython==1.16.0
docutils==0.16
dyn==1.8.1
@ -12,14 +12,14 @@ google-cloud-core==1.3.0
google-cloud-dns==0.32.0
ipaddress==1.0.23
jmespath==0.10.0
msrestazure==0.6.3
msrestazure==0.6.4
natsort==6.2.1
ns1-python==0.16.0
ovh==0.5.0
pycountry-convert==0.7.2
pycountry==19.8.18
python-dateutil==2.8.1
requests==2.23.0
requests==2.24.0
s3transfer==0.3.3
setuptools==44.1.1
six==1.15.0


+ 23
- 0
tests/test_octodns_manager.py View File

@ -278,6 +278,29 @@ class TestManager(TestCase):
.validate_configs()
self.assertTrue('unknown source' in text_type(ctx.exception))
def test_populate_lenient_fallback(self):
with TemporaryDirectory() as tmpdir:
environ['YAML_TMP_DIR'] = tmpdir.dirname
# Only allow a target that doesn't exist
manager = Manager(get_config_filename('simple.yaml'))
class NoLenient(SimpleProvider):
def populate(self, zone, source=False):
pass
# This should be ok, we'll fall back to not passing it
manager._populate_and_plan('unit.tests.', [NoLenient()], [])
class NoZone(SimpleProvider):
def populate(self, lenient=False):
pass
# This will blow up, we don't fallback for source
with self.assertRaises(TypeError):
manager._populate_and_plan('unit.tests.', [NoZone()], [])
class TestMainThreadExecutor(TestCase):


+ 303
- 87
tests/test_octodns_provider_ns1.py View File

@ -528,52 +528,55 @@ class TestNs1Provider(TestCase):
class TestNs1ProviderDynamic(TestCase):
zone = Zone('unit.tests.', [])
record = Record.new(zone, '', {
'dynamic': {
'pools': {
'lhr': {
'fallback': 'iad',
'values': [{
'value': '3.4.5.6',
}],
},
'iad': {
'values': [{
'value': '1.2.3.4',
}, {
'value': '2.3.4.5',
}],
def record(self):
# return a new object each time so we can mess with it without causing
# problems from test to test
return Record.new(self.zone, '', {
'dynamic': {
'pools': {
'lhr': {
'fallback': 'iad',
'values': [{
'value': '3.4.5.6',
}],
},
'iad': {
'values': [{
'value': '1.2.3.4',
}, {
'value': '2.3.4.5',
}],
},
},
'rules': [{
'geos': [
'AF',
'EU-GB',
'NA-US-FL'
],
'pool': 'lhr',
}, {
'geos': [
'AF-ZW',
],
'pool': 'iad',
}, {
'pool': 'iad',
}],
},
'rules': [{
'geos': [
'AF',
'EU-GB',
'NA-US-FL'
],
'pool': 'lhr',
}, {
'geos': [
'AF-ZW',
],
'pool': 'iad',
}, {
'pool': 'iad',
}],
},
'octodns': {
'healthcheck': {
'host': 'send.me',
'path': '/_ping',
'port': 80,
'protocol': 'HTTP',
}
},
'ttl': 32,
'type': 'A',
'value': '1.2.3.4',
'meta': {},
})
'octodns': {
'healthcheck': {
'host': 'send.me',
'path': '/_ping',
'port': 80,
'protocol': 'HTTP',
}
},
'ttl': 32,
'type': 'A',
'value': '1.2.3.4',
'meta': {},
})
def test_notes(self):
provider = Ns1Provider('test', 'api-key')
@ -636,7 +639,7 @@ class TestNs1ProviderDynamic(TestCase):
self.assertEquals({
'1.2.3.4': monitor_one,
'2.3.4.5': monitor_four,
}, provider._monitors_for(self.record))
}, provider._monitors_for(self.record()))
def test_uuid(self):
# Just a smoke test/for coverage
@ -707,18 +710,19 @@ class TestNs1ProviderDynamic(TestCase):
provider = Ns1Provider('test', 'api-key')
value = '3.4.5.6'
monitor = provider._monitor_gen(self.record, value)
record = self.record()
monitor = provider._monitor_gen(record, value)
self.assertEquals(value, monitor['config']['host'])
self.assertTrue('\\nHost: send.me\\r' in monitor['config']['send'])
self.assertFalse(monitor['config']['ssl'])
self.assertEquals('host:unit.tests type:A', monitor['notes'])
self.record._octodns['healthcheck']['protocol'] = 'HTTPS'
monitor = provider._monitor_gen(self.record, value)
record._octodns['healthcheck']['protocol'] = 'HTTPS'
monitor = provider._monitor_gen(record, value)
self.assertTrue(monitor['config']['ssl'])
self.record._octodns['healthcheck']['protocol'] = 'TCP'
monitor = provider._monitor_gen(self.record, value)
record._octodns['healthcheck']['protocol'] = 'TCP'
monitor = provider._monitor_gen(record, value)
# No http send done
self.assertFalse('send' in monitor['config'])
# No http response expected
@ -790,10 +794,11 @@ class TestNs1ProviderDynamic(TestCase):
monitor_gen_mock.side_effect = [{'key': 'value'}]
monitor_create_mock.side_effect = [('mon-id', 'feed-id')]
value = '1.2.3.4'
monitor_id, feed_id = provider._monitor_sync(self.record, value, None)
record = self.record()
monitor_id, feed_id = provider._monitor_sync(record, value, None)
self.assertEquals('mon-id', monitor_id)
self.assertEquals('feed-id', feed_id)
monitor_gen_mock.assert_has_calls([call(self.record, value)])
monitor_gen_mock.assert_has_calls([call(record, value)])
monitor_create_mock.assert_has_calls([call({'key': 'value'})])
monitors_update_mock.assert_not_called()
feed_create_mock.assert_not_called()
@ -809,7 +814,7 @@ class TestNs1ProviderDynamic(TestCase):
'name': 'monitor name',
}
monitor_gen_mock.side_effect = [monitor]
monitor_id, feed_id = provider._monitor_sync(self.record, value,
monitor_id, feed_id = provider._monitor_sync(record, value,
monitor)
self.assertEquals('mon-id', monitor_id)
self.assertEquals('feed-id', feed_id)
@ -830,7 +835,7 @@ class TestNs1ProviderDynamic(TestCase):
}
monitor_gen_mock.side_effect = [monitor]
feed_create_mock.side_effect = ['feed-id2']
monitor_id, feed_id = provider._monitor_sync(self.record, value,
monitor_id, feed_id = provider._monitor_sync(record, value,
monitor)
self.assertEquals('mon-id2', monitor_id)
self.assertEquals('feed-id2', feed_id)
@ -853,7 +858,7 @@ class TestNs1ProviderDynamic(TestCase):
'other': 'thing',
}
monitor_gen_mock.side_effect = [gened]
monitor_id, feed_id = provider._monitor_sync(self.record, value,
monitor_id, feed_id = provider._monitor_sync(record, value,
monitor)
self.assertEquals('mon-id', monitor_id)
self.assertEquals('feed-id', feed_id)
@ -883,8 +888,9 @@ class TestNs1ProviderDynamic(TestCase):
monitors_delete_mock.reset_mock()
notifylists_delete_mock.reset_mock()
monitors_for_mock.side_effect = [{}]
provider._monitors_gc(self.record)
monitors_for_mock.assert_has_calls([call(self.record)])
record = self.record()
provider._monitors_gc(record)
monitors_for_mock.assert_has_calls([call(record)])
datafeed_delete_mock.assert_not_called()
monitors_delete_mock.assert_not_called()
notifylists_delete_mock.assert_not_called()
@ -900,8 +906,8 @@ class TestNs1ProviderDynamic(TestCase):
'notify_list': 'nl-id',
}
}]
provider._monitors_gc(self.record)
monitors_for_mock.assert_has_calls([call(self.record)])
provider._monitors_gc(record)
monitors_for_mock.assert_has_calls([call(record)])
datafeed_delete_mock.assert_has_calls([call('foo', 'feed-id')])
monitors_delete_mock.assert_has_calls([call('mon-id')])
notifylists_delete_mock.assert_has_calls([call('nl-id')])
@ -917,8 +923,8 @@ class TestNs1ProviderDynamic(TestCase):
'notify_list': 'nl-id',
}
}]
provider._monitors_gc(self.record, {'mon-id'})
monitors_for_mock.assert_has_calls([call(self.record)])
provider._monitors_gc(record, {'mon-id'})
monitors_for_mock.assert_has_calls([call(record)])
datafeed_delete_mock.assert_not_called()
monitors_delete_mock.assert_not_called()
notifylists_delete_mock.assert_not_called()
@ -939,8 +945,8 @@ class TestNs1ProviderDynamic(TestCase):
'notify_list': 'nl-id2',
},
}]
provider._monitors_gc(self.record, {'mon-id'})
monitors_for_mock.assert_has_calls([call(self.record)])
provider._monitors_gc(record, {'mon-id'})
monitors_for_mock.assert_has_calls([call(record)])
datafeed_delete_mock.assert_not_called()
monitors_delete_mock.assert_has_calls([call('mon-id2')])
notifylists_delete_mock.assert_has_calls([call('nl-id2')])
@ -972,18 +978,202 @@ class TestNs1ProviderDynamic(TestCase):
('mid-3', 'fid-3'),
]
rule0 = self.record.data['dynamic']['rules'][0]
rule1 = self.record.data['dynamic']['rules'][1]
rule0_saved_geos = rule0['geos']
rule1_saved_geos = rule1['geos']
record = self.record()
rule0 = record.data['dynamic']['rules'][0]
rule1 = record.data['dynamic']['rules'][1]
rule0['geos'] = ['AF', 'EU']
rule1['geos'] = ['NA']
ret, _ = provider._params_for_A(self.record)
ret, monitor_ids = provider._params_for_A(record)
self.assertEquals(10, len(ret['answers']))
self.assertEquals(ret['filters'],
Ns1Provider._FILTER_CHAIN_WITH_REGION(provider,
True))
rule0['geos'] = rule0_saved_geos
rule1['geos'] = rule1_saved_geos
self.assertEquals({
'iad__catchall': {
'meta': {
'note': 'rule-order:2'
}
},
'iad__georegion': {
'meta': {
'georegion': ['US-CENTRAL', 'US-EAST', 'US-WEST'],
'note': 'rule-order:1'
}
},
'lhr__georegion': {
'meta': {
'georegion': ['AFRICA', 'EUROPE'],
'note': 'fallback:iad rule-order:0'
}
}
}, ret['regions'])
self.assertEquals({'mid-1', 'mid-2', 'mid-3'}, monitor_ids)
@patch('octodns.provider.ns1.Ns1Provider._monitor_sync')
@patch('octodns.provider.ns1.Ns1Provider._monitors_for')
def test_params_for_dynamic_state_only(self, monitors_for_mock,
monitor_sync_mock):
provider = Ns1Provider('test', 'api-key')
# pre-fill caches to avoid extranious calls (things we're testing
# elsewhere)
provider._client._datasource_id = 'foo'
provider._client._feeds_for_monitors = {
'mon-id': 'feed-id',
}
# provider._params_for_A() calls provider._monitors_for() and
# provider._monitor_sync(). Mock their return values so that we don't
# make NS1 API calls during tests
monitors_for_mock.reset_mock()
monitor_sync_mock.reset_mock()
monitors_for_mock.side_effect = [{
'3.4.5.6': 'mid-3',
}]
monitor_sync_mock.side_effect = [
('mid-1', 'fid-1'),
('mid-2', 'fid-2'),
('mid-3', 'fid-3'),
]
record = self.record()
rule0 = record.data['dynamic']['rules'][0]
rule1 = record.data['dynamic']['rules'][1]
rule0['geos'] = ['AF', 'EU']
rule1['geos'] = ['NA-US-CA']
ret, _ = provider._params_for_A(record)
self.assertEquals(10, len(ret['answers']))
exp = Ns1Provider._FILTER_CHAIN_WITH_REGION_AND_COUNTRY(provider,
True)
self.assertEquals(ret['filters'], exp)
self.assertEquals({
'iad__catchall': {
'meta': {
'note': 'rule-order:2'
}
},
'iad__country': {
'meta': {
'note': 'rule-order:1',
'us_state': ['CA']
}
},
'lhr__georegion': {
'meta': {
'georegion': ['AFRICA', 'EUROPE'],
'note': 'fallback:iad rule-order:0'
}
}
}, ret['regions'])
@patch('octodns.provider.ns1.Ns1Provider._monitor_sync')
@patch('octodns.provider.ns1.Ns1Provider._monitors_for')
def test_params_for_dynamic_contient_and_countries(self,
monitors_for_mock,
monitor_sync_mock):
provider = Ns1Provider('test', 'api-key')
# pre-fill caches to avoid extranious calls (things we're testing
# elsewhere)
provider._client._datasource_id = 'foo'
provider._client._feeds_for_monitors = {
'mon-id': 'feed-id',
}
# provider._params_for_A() calls provider._monitors_for() and
# provider._monitor_sync(). Mock their return values so that we don't
# make NS1 API calls during tests
monitors_for_mock.reset_mock()
monitor_sync_mock.reset_mock()
monitors_for_mock.side_effect = [{
'3.4.5.6': 'mid-3',
}]
monitor_sync_mock.side_effect = [
('mid-1', 'fid-1'),
('mid-2', 'fid-2'),
('mid-3', 'fid-3'),
]
record = self.record()
rule0 = record.data['dynamic']['rules'][0]
rule1 = record.data['dynamic']['rules'][1]
rule0['geos'] = ['AF', 'EU', 'NA-US-CA']
rule1['geos'] = ['NA', 'NA-US']
ret, _ = provider._params_for_A(record)
self.assertEquals(17, len(ret['answers']))
# Deeply check the answers we have here
# group the answers based on where they came from
notes = defaultdict(list)
for answer in ret['answers']:
notes[answer['meta']['note']].append(answer)
# Remove the meta and region part since it'll vary based on the
# exact pool, that'll let us == them down below
del answer['meta']
del answer['region']
# Expected groups. iad has occurances in here: a country and region
# that was split out based on targeting a continent and a state. It
# finally has a catchall. Those are examples of the two ways pools get
# expanded.
#
# lhr splits in two, with a region and country.
#
# well as both lhr georegion (for contients) and country. The first is
# an example of a repeated target pool in a rule (only allowed when the
# 2nd is a catchall.)
self.assertEquals(['from:--default--', 'from:iad__catchall',
'from:iad__country', 'from:iad__georegion',
'from:lhr__country', 'from:lhr__georegion'],
sorted(notes.keys()))
# All the iad's should match (after meta and region were removed)
self.assertEquals(notes['from:iad__catchall'],
notes['from:iad__country'])
self.assertEquals(notes['from:iad__catchall'],
notes['from:iad__georegion'])
# The lhrs should match each other too
self.assertEquals(notes['from:lhr__georegion'],
notes['from:lhr__country'])
# We have both country and region filter chain entries
exp = Ns1Provider._FILTER_CHAIN_WITH_REGION_AND_COUNTRY(provider,
True)
self.assertEquals(ret['filters'], exp)
# and our region details match the expected behaviors/targeting
self.assertEquals({
'iad__catchall': {
'meta': {
'note': 'rule-order:2'
}
},
'iad__country': {
'meta': {
'country': ['US'],
'note': 'rule-order:1'
}
},
'iad__georegion': {
'meta': {
'georegion': ['US-CENTRAL', 'US-EAST', 'US-WEST'],
'note': 'rule-order:1'
}
},
'lhr__country': {
'meta': {
'note': 'fallback:iad rule-order:0',
'us_state': ['CA']
}
},
'lhr__georegion': {
'meta': {
'georegion': ['AFRICA', 'EUROPE'],
'note': 'fallback:iad rule-order:0'
}
}
}, ret['regions'])
@patch('octodns.provider.ns1.Ns1Provider._monitor_sync')
@patch('octodns.provider.ns1.Ns1Provider._monitors_for')
@ -1014,18 +1204,21 @@ class TestNs1ProviderDynamic(TestCase):
# Set geos to 'OC' in rules[0] (pool - 'lhr')
# Check returned dict has list of countries under 'OC'
rule0 = self.record.data['dynamic']['rules'][0]
saved_geos = rule0['geos']
record = self.record()
rule0 = record.data['dynamic']['rules'][0]
rule0['geos'] = ['OC']
ret, _ = provider._params_for_A(self.record)
self.assertEquals(set(ret['regions']['lhr']['meta']['country']),
ret, _ = provider._params_for_A(record)
# Make sure the country list expanded into all the OC countries
got = set(ret['regions']['lhr__country']['meta']['country'])
self.assertEquals(got,
Ns1Provider._CONTINENT_TO_LIST_OF_COUNTRIES['OC'])
# When rules has 'OC', it is converted to list of countries in the
# params. Look if the returned filters is the filter chain with country
self.assertEquals(ret['filters'],
Ns1Provider._FILTER_CHAIN_WITH_COUNTRY(provider,
True))
rule0['geos'] = saved_geos
@patch('octodns.provider.ns1.Ns1Provider._monitor_sync')
@patch('octodns.provider.ns1.Ns1Provider._monitors_for')
@ -1051,19 +1244,20 @@ class TestNs1ProviderDynamic(TestCase):
]
# This indirectly calls into _params_for_dynamic_A and tests the
# handling to get there
ret, _ = provider._params_for_A(self.record)
record = self.record()
ret, _ = provider._params_for_A(record)
# Given that self.record has both country and region in the rules,
# Given that record has both country and region in the rules,
# the returned filter chain should be one with region and country
self.assertEquals(ret['filters'],
Ns1Provider._FILTER_CHAIN_WITH_REGION_AND_COUNTRY(
provider, True))
monitors_for_mock.assert_has_calls([call(self.record)])
monitors_for_mock.assert_has_calls([call(record)])
monitors_sync_mock.assert_has_calls([
call(self.record, '1.2.3.4', None),
call(self.record, '2.3.4.5', None),
call(self.record, '3.4.5.6', 'mid-3'),
call(record, '1.2.3.4', None),
call(record, '2.3.4.5', None),
call(record, '3.4.5.6', 'mid-3'),
])
record = Record.new(self.zone, 'geo', {
@ -1111,13 +1305,13 @@ class TestNs1ProviderDynamic(TestCase):
# Test out a small, but realistic setup that covers all the options
# We have country and region in the test config
filters = provider._get_updated_filter_chain(True, True)
catchall_pool_name = '{}{}'.format(provider.CATCHALL_PREFIX, 'iad')
catchall_pool_name = 'iad__catchall'
ns1_record = {
'answers': [{
'answer': ['3.4.5.6'],
'meta': {
'priority': 1,
'note': 'from:lhr',
'note': 'from:lhr__country',
},
'region': 'lhr',
}, {
@ -1169,14 +1363,24 @@ class TestNs1ProviderDynamic(TestCase):
'domain': 'unit.tests',
'filters': filters,
'regions': {
'lhr': {
# lhr will use the new-split style names (and that will require
# combining in the code to produce the expected answer
'lhr__georegion': {
'meta': {
'note': 'rule-order:1 fallback:iad',
'country': ['CA'],
'georegion': ['AFRICA'],
},
},
'lhr__country': {
'meta': {
'note': 'rule-order:1 fallback:iad',
'country': ['CA'],
'us_state': ['OR'],
},
},
# iad will use the old style "plain" region naming. We won't
# see mixed names like this in practice, but this should
# exercise both paths
'iad': {
'meta': {
'note': 'rule-order:2',
@ -1240,16 +1444,28 @@ class TestNs1ProviderDynamic(TestCase):
data2 = provider._data_for_A('A', ns1_record)
self.assertEquals(data, data2)
# Same answer if we have an old-style catchall name
old_style_catchall_pool_name = 'catchall__iad'
ns1_record['answers'][-2]['region'] = old_style_catchall_pool_name
ns1_record['answers'][-1]['region'] = old_style_catchall_pool_name
ns1_record['regions'][old_style_catchall_pool_name] = \
ns1_record['regions'][catchall_pool_name]
del ns1_record['regions'][catchall_pool_name]
data3 = provider._data_for_dynamic_A('A', ns1_record)
self.assertEquals(data, data2)
# Oceania test cases
# 1. Full list of countries should return 'OC' in geos
oc_countries = Ns1Provider._CONTINENT_TO_LIST_OF_COUNTRIES['OC']
ns1_record['regions']['lhr']['meta']['country'] = list(oc_countries)
ns1_record['regions']['lhr__country']['meta']['country'] = \
list(oc_countries)
data3 = provider._data_for_A('A', ns1_record)
self.assertTrue('OC' in data3['dynamic']['rules'][0]['geos'])
# 2. Partial list of countries should return just those
partial_oc_cntry_list = list(oc_countries)[:5]
ns1_record['regions']['lhr']['meta']['country'] = partial_oc_cntry_list
ns1_record['regions']['lhr__country']['meta']['country'] = \
partial_oc_cntry_list
data4 = provider._data_for_A('A', ns1_record)
for c in partial_oc_cntry_list:
self.assertTrue(


+ 64
- 20
tests/test_octodns_provider_route53.py View File

@ -12,9 +12,9 @@ from unittest import TestCase
from mock import patch
from octodns.record import Create, Delete, Record, Update
from octodns.provider.route53 import Route53Provider, _Route53GeoDefault, \
_Route53DynamicValue, _Route53GeoRecord, _Route53Record, _mod_keyer, \
_octal_replace
from octodns.provider.route53 import Route53Provider, _Route53DynamicValue, \
_Route53GeoDefault, _Route53GeoRecord, Route53ProviderException, \
_Route53Record, _mod_keyer, _octal_replace
from octodns.zone import Zone
from helpers import GeoProvider
@ -304,6 +304,7 @@ class TestRoute53Provider(TestCase):
'Type': 'HTTPS',
'Port': 443,
'MeasureLatency': True,
'RequestInterval': 10,
},
'HealthCheckVersion': 2,
}, {
@ -317,6 +318,7 @@ class TestRoute53Provider(TestCase):
'Type': 'HTTPS',
'Port': 443,
'MeasureLatency': True,
'RequestInterval': 10,
},
'HealthCheckVersion': 42,
}, {
@ -330,6 +332,7 @@ class TestRoute53Provider(TestCase):
'Type': 'HTTPS',
'Port': 443,
'MeasureLatency': True,
'RequestInterval': 10,
},
'HealthCheckVersion': 2,
}, {
@ -343,6 +346,7 @@ class TestRoute53Provider(TestCase):
'Type': 'HTTPS',
'Port': 443,
'MeasureLatency': True,
'RequestInterval': 10,
},
'HealthCheckVersion': 2,
}, {
@ -357,6 +361,7 @@ class TestRoute53Provider(TestCase):
'Type': 'HTTPS',
'Port': 443,
'MeasureLatency': True,
'RequestInterval': 10,
},
'HealthCheckVersion': 2,
}]
@ -1023,6 +1028,7 @@ class TestRoute53Provider(TestCase):
'Type': 'HTTPS',
'Port': 443,
'MeasureLatency': True,
'RequestInterval': 10,
},
'HealthCheckVersion': 2,
}, {
@ -1036,6 +1042,7 @@ class TestRoute53Provider(TestCase):
'Type': 'HTTPS',
'Port': 443,
'MeasureLatency': True,
'RequestInterval': 10,
},
'HealthCheckVersion': 2,
}]
@ -1059,6 +1066,7 @@ class TestRoute53Provider(TestCase):
'Type': 'HTTPS',
'Port': 443,
'MeasureLatency': True,
'RequestInterval': 10,
},
'HealthCheckVersion': 2,
}]
@ -1108,6 +1116,7 @@ class TestRoute53Provider(TestCase):
'Type': 'HTTPS',
'Port': 443,
'MeasureLatency': True,
'RequestInterval': 10,
},
'HealthCheckVersion': 2,
}, {
@ -1121,6 +1130,7 @@ class TestRoute53Provider(TestCase):
'Type': 'HTTPS',
'Port': 443,
'MeasureLatency': True,
'RequestInterval': 10,
},
'HealthCheckVersion': 2,
}]
@ -1242,9 +1252,9 @@ class TestRoute53Provider(TestCase):
self.assertEquals('42', id)
stubber.assert_no_pending_responses()
def test_health_check_measure_latency(self):
def test_health_check_provider_options(self):
provider, stubber = self._get_stubbed_provider()
record_true = Record.new(self.expected, 'a', {
record = Record.new(self.expected, 'a', {
'ttl': 61,
'type': 'A',
'value': '1.2.3.4',
@ -1253,23 +1263,28 @@ class TestRoute53Provider(TestCase):
},
'route53': {
'healthcheck': {
'measure_latency': True
'measure_latency': True,
'request_interval': 10,
}
}
}
})
measure_latency = provider._healthcheck_measure_latency(record_true)
self.assertTrue(measure_latency)
latency = provider._healthcheck_measure_latency(record)
interval = provider._healthcheck_request_interval(record)
self.assertTrue(latency)
self.assertEquals(10, interval)
record_default = Record.new(self.expected, 'a', {
'ttl': 61,
'type': 'A',
'value': '1.2.3.4',
})
measure_latency = provider._healthcheck_measure_latency(record_default)
self.assertTrue(measure_latency)
latency = provider._healthcheck_measure_latency(record_default)
interval = provider._healthcheck_request_interval(record_default)
self.assertTrue(latency)
self.assertEquals(10, interval)
record_false = Record.new(self.expected, 'a', {
record = Record.new(self.expected, 'a', {
'ttl': 61,
'type': 'A',
'value': '1.2.3.4',
@ -1278,15 +1293,35 @@ class TestRoute53Provider(TestCase):
},
'route53': {
'healthcheck': {
'measure_latency': False
'measure_latency': False,
'request_interval': 30,
}
}
}
})
measure_latency = provider._healthcheck_measure_latency(record_false)
self.assertFalse(measure_latency)
latency = provider._healthcheck_measure_latency(record)
interval = provider._healthcheck_request_interval(record)
self.assertFalse(latency)
self.assertEquals(30, interval)
def test_create_health_checks_measure_latency(self):
record_invalid = Record.new(self.expected, 'a', {
'ttl': 61,
'type': 'A',
'value': '1.2.3.4',
'octodns': {
'healthcheck': {
},
'route53': {
'healthcheck': {
'request_interval': 20,
}
}
}
})
with self.assertRaises(Route53ProviderException):
interval = provider._healthcheck_request_interval(record_invalid)
def test_create_health_checks_provider_options(self):
provider, stubber = self._get_stubbed_provider()
health_check_config = {
@ -1296,7 +1331,7 @@ class TestRoute53Provider(TestCase):
'IPAddress': '1.2.3.4',
'MeasureLatency': False,
'Port': 443,
'RequestInterval': 10,
'RequestInterval': 30,
'ResourcePath': '/_dns',
'Type': 'HTTPS'
}
@ -1335,7 +1370,8 @@ class TestRoute53Provider(TestCase):
},
'route53': {
'healthcheck': {
'measure_latency': False
'measure_latency': False,
'request_interval': 30
}
}
}
@ -1344,7 +1380,9 @@ class TestRoute53Provider(TestCase):
value = record.geo['AF'].values[0]
id = provider.get_health_check_id(record, value, True)
ml = provider.health_checks[id]['HealthCheckConfig']['MeasureLatency']
self.assertEqual(False, ml)
ri = provider.health_checks[id]['HealthCheckConfig']['RequestInterval']
self.assertFalse(ml)
self.assertEquals(30, ri)
def test_health_check_gc(self):
provider, stubber = self._get_stubbed_provider()
@ -1436,6 +1474,7 @@ class TestRoute53Provider(TestCase):
'Type': 'HTTPS',
'Port': 443,
'MeasureLatency': True,
'RequestInterval': 10,
},
'HealthCheckVersion': 2,
}, {
@ -1449,6 +1488,7 @@ class TestRoute53Provider(TestCase):
'Type': 'HTTPS',
'Port': 443,
'MeasureLatency': True,
'RequestInterval': 10,
},
'HealthCheckVersion': 2,
}, {
@ -1462,6 +1502,7 @@ class TestRoute53Provider(TestCase):
'Type': 'HTTPS',
'Port': 443,
'MeasureLatency': True,
'RequestInterval': 10,
},
'HealthCheckVersion': 2,
}]
@ -1639,6 +1680,7 @@ class TestRoute53Provider(TestCase):
'Type': 'HTTPS',
'Port': 443,
'MeasureLatency': True,
'RequestInterval': 10,
},
'HealthCheckVersion': 2,
}],
@ -1742,7 +1784,8 @@ class TestRoute53Provider(TestCase):
'ResourcePath': '/_dns',
'Type': 'HTTPS',
'Port': 443,
'MeasureLatency': True
'MeasureLatency': True,
'RequestInterval': 10,
},
'HealthCheckVersion': 2,
}],
@ -1890,7 +1933,8 @@ class TestRoute53Provider(TestCase):
'ResourcePath': '/_dns',
'Type': 'HTTPS',
'Port': 443,
'MeasureLatency': True
'MeasureLatency': True,
'RequestInterval': 10,
},
'HealthCheckVersion': 2,
}],


+ 2
- 1
tests/test_octodns_record.py View File

@ -2167,7 +2167,8 @@ class TestRecordValidation(TestCase):
'target': 'foo.bar.baz.'
}
})
self.assertEquals(['invalid name'], ctx.exception.reasons)
self.assertEquals(['invalid name for SRV record'],
ctx.exception.reasons)
# missing priority
with self.assertRaises(ValidationError) as ctx:


+ 14
- 0
tests/test_octodns_source_axfr.py View File

@ -15,6 +15,7 @@ from unittest import TestCase
from octodns.source.axfr import AxfrSource, AxfrSourceZoneTransferFailed, \
ZoneFileSource, ZoneFileSourceLoadFailure
from octodns.zone import Zone
from octodns.record import ValidationError
class TestAxfrSource(TestCase):
@ -70,3 +71,16 @@ class TestZoneFileSource(TestCase):
self.source.populate(zone)
self.assertEquals('The DNS zone has no NS RRset at its origin.',
text_type(ctx.exception))
# Records are not to RFC (lenient=False)
with self.assertRaises(ValidationError) as ctx:
zone = Zone('invalid.records.', [])
self.source.populate(zone)
self.assertEquals('Invalid record _invalid.invalid.records.\n'
' - invalid name for SRV record',
text_type(ctx.exception))
# Records are not to RFC, but load anyhow (lenient=True)
invalid = Zone('invalid.records.', [])
self.source.populate(invalid, lenient=True)
self.assertEquals(12, len(invalid.records))

+ 43
- 0
tests/zones/invalid.records. View File

@ -0,0 +1,43 @@
$ORIGIN invalid.records.
@ 3600 IN SOA ns1.invalid.records. root.invalid.records. (
2018071501 ; Serial
3600 ; Refresh (1 hour)
600 ; Retry (10 minutes)
604800 ; Expire (1 week)
3600 ; NXDOMAIN ttl (1 hour)
)
; NS Records
@ 3600 IN NS ns1.invalid.records.
@ 3600 IN NS ns2.invalid.records.
under 3600 IN NS ns1.invalid.records.
under 3600 IN NS ns2.invalid.records.
; SRV Records
_srv._tcp 600 IN SRV 10 20 30 foo-1.invalid.records.
_srv._tcp 600 IN SRV 10 20 30 foo-2.invalid.records.
_invalid 600 IN SRV 10 20 30 foo-3.invalid.records.
; TXT Records
txt 600 IN TXT "Bah bah black sheep"
txt 600 IN TXT "have you any wool."
txt 600 IN TXT "v=DKIM1;k=rsa;s=email;h=sha256;p=A/kinda+of/long/string+with+numb3rs"
; MX Records
mx 300 IN MX 10 smtp-4.invalid.records.
mx 300 IN MX 20 smtp-2.invalid.records.
mx 300 IN MX 30 smtp-3.invalid.records.
mx 300 IN MX 40 smtp-1.invalid.records.
; A Records
@ 300 IN A 1.2.3.4
@ 300 IN A 1.2.3.5
www 300 IN A 2.2.3.6
wwww.sub 300 IN A 2.2.3.6
; AAAA Records
aaaa 600 IN AAAA 2601:644:500:e210:62f8:1dff:feb8:947a
; CNAME Records
cname 300 IN CNAME invalid.records.
included 300 IN CNAME invalid.records.

Loading…
Cancel
Save