Browse Source

Merge pull request #312 from github/dynamic-records-dyn

Dynamic records dyn
pull/325/head
Ross McFarland 7 years ago
committed by GitHub
parent
commit
7bf4914a41
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 2022 additions and 143 deletions
  1. +553
    -57
      octodns/provider/dyn.py
  2. +35
    -2
      octodns/record/__init__.py
  3. +37
    -1
      octodns/record/geo.py
  4. +1081
    -29
      tests/test_octodns_provider_dyn.py
  5. +0
    -1
      tests/test_octodns_provider_etc_hosts.py
  6. +289
    -53
      tests/test_octodns_record.py
  7. +27
    -0
      tests/test_octodns_record_geo.py

+ 553
- 57
octodns/provider/dyn.py View File

@ -7,9 +7,9 @@ from __future__ import absolute_import, division, print_function, \
from collections import defaultdict from collections import defaultdict
from dyn.tm.errors import DynectGetError from dyn.tm.errors import DynectGetError
from dyn.tm.services.dsf import DSFARecord, DSFAAAARecord, DSFFailoverChain, \
DSFMonitor, DSFNode, DSFRecordSet, DSFResponsePool, DSFRuleset, \
TrafficDirector, get_all_dsf_monitors, get_all_dsf_services, \
from dyn.tm.services.dsf import DSFARecord, DSFAAAARecord, DSFCNAMERecord, \
DSFFailoverChain, DSFMonitor, DSFNode, DSFRecordSet, DSFResponsePool, \
DSFRuleset, TrafficDirector, get_all_dsf_monitors, get_all_dsf_services, \
get_response_pool get_response_pool
from dyn.tm.session import DynectSession from dyn.tm.session import DynectSession
from dyn.tm.zones import Zone as DynZone from dyn.tm.zones import Zone as DynZone
@ -18,6 +18,7 @@ from threading import Lock
from uuid import uuid4 from uuid import uuid4
from ..record import Record, Update from ..record import Record, Update
from ..record.geo import GeoCodes
from .base import BaseProvider from .base import BaseProvider
@ -178,6 +179,10 @@ class _CachingDynZone(DynZone):
self.flush_cache() self.flush_cache()
def _dynamic_value_sort_key(value):
return value['value']
class DynProvider(BaseProvider): class DynProvider(BaseProvider):
''' '''
Dynect Managed DNS provider Dynect Managed DNS provider
@ -232,6 +237,8 @@ class DynProvider(BaseProvider):
'OC': 16, # Continental Australia/Oceania 'OC': 16, # Continental Australia/Oceania
'AN': 17, # Continental Antarctica 'AN': 17, # Continental Antarctica
} }
# Reverse of ^
REGION_CODES_LOOKUP = {code: geo for geo, code in REGION_CODES.items()}
MONITOR_HEADER = 'User-Agent: Dyn Monitor' MONITOR_HEADER = 'User-Agent: Dyn Monitor'
MONITOR_TIMEOUT = 10 MONITOR_TIMEOUT = 10
@ -261,8 +268,7 @@ class DynProvider(BaseProvider):
@property @property
def SUPPORTS_DYNAMIC(self): def SUPPORTS_DYNAMIC(self):
# TODO: dynamic
return False
return True
def _check_dyn_sess(self): def _check_dyn_sess(self):
# We don't have to worry about locking for the check since the # We don't have to worry about locking for the check since the
@ -395,61 +401,246 @@ class DynProvider(BaseProvider):
for td in get_all_dsf_services(): for td in get_all_dsf_services():
try: try:
fqdn, _type = td.label.split(':', 1) fqdn, _type = td.label.split(':', 1)
except ValueError as e:
self.log.warn("Failed to load TrafficDirector '%s': %s",
td.label, e.message)
except ValueError:
self.log.warn("Unsupported TrafficDirector '%s'", td.label)
continue continue
tds[fqdn][_type] = td tds[fqdn][_type] = td
self._traffic_directors = dict(tds) self._traffic_directors = dict(tds)
return self._traffic_directors return self._traffic_directors
def _populate_geo_traffic_director(self, zone, fqdn, _type, td, rulesets,
lenient):
# We start out with something that will always show change in case this
# is a busted TD. This will prevent us from creating a duplicate td.
# We'll overwrite this with real data provided we have it
geo = {}
data = {
'geo': geo,
'type': _type,
'ttl': td.ttl,
'values': ['0.0.0.0']
}
for ruleset in rulesets:
try:
record_set = ruleset.response_pools[0].rs_chains[0] \
.record_sets[0]
except IndexError:
# problems indicate a malformed ruleset, ignore it
continue
if ruleset.label.startswith('default:'):
data_for = getattr(self, '_data_for_{}'.format(_type))
data.update(data_for(_type, record_set.records))
else:
# We've stored the geo in label
try:
code, _ = ruleset.label.split(':', 1)
except ValueError:
continue
values = [r.address for r in record_set.records]
geo[code] = values
name = zone.hostname_from_fqdn(fqdn)
record = Record.new(zone, name, data, source=self)
zone.add_record(record, lenient=lenient)
return record
def _value_for_address(self, _type, record):
return {
'value': record.address,
'weight': record.weight,
}
_value_for_A = _value_for_address
_value_for_AAAA = _value_for_address
def _value_for_CNAME(self, _type, record):
return {
'value': record.cname,
'weight': record.weight,
}
def _populate_dynamic_pools(self, _type, rulesets, response_pools):
default = {}
pools = {}
data_for = getattr(self, '_data_for_{}'.format(_type))
value_for = getattr(self, '_value_for_{}'.format(_type))
# Build the list of pools, we can't just read them off of rules b/c we
# won't see unused pools there. If/when we dis-allow unused pools we
# could probably change that and avoid the refresh
for response_pool in response_pools:
# We have to refresh the response pool to have access to its
# rs_chains and thus records, yeah... :-(
# TODO: look at rulesets first b/c they won't need a refresh...
response_pool.refresh()
try:
record_set = response_pool.rs_chains[0] \
.record_sets[0]
except IndexError:
# problems indicate a malformed ruleset, ignore it
self.log.warn('_populate_dynamic_pools: '
'malformed response_pool "%s" ignoring',
response_pool.label)
continue
label = response_pool.label
if label == 'default':
# The default pool has the base record values
default = data_for(_type, record_set.records)
else:
if label not in pools:
# First time we've seen it get its data
# Note we'll have to set fallbacks as we go through rules
# b/c we can't determine them here
values = [value_for(_type, r) for r in record_set.records]
# Sort to ensure consistent ordering so we can compare them
values.sort(key=_dynamic_value_sort_key)
pools[label] = {
'values': values,
}
return default, pools
def _populate_dynamic_rules(self, rulesets, pools):
rules = []
# Build the list of rules based on the rulesets
for ruleset in rulesets:
if ruleset.label.startswith('default:'):
# Ignore the default, it's implicit in our model
continue
num_pools = len(ruleset.response_pools)
if num_pools > 0:
# Find the primary pool for this rule
pool = ruleset.response_pools[0].label
# TODO: verify pool exists
if num_pools > 1:
# We have a fallback, record it in the approrpriate pool.
# Note we didn't have fallback info when we populated the
# pools above so we're filling that info in here. It's
# possible that rules will have disagreeing values for the
# fallbacks. That's annoying but a sync should fix it and
# match stuff up with the config.
fallback = ruleset.response_pools[1].label
# TODO: verify fallback exists
if fallback != 'default':
pools[pool]['fallback'] = fallback
else:
self.log.warn('_populate_dynamic_pools: '
'ruleset "%s" has no response_pools',
ruleset.label)
continue
# OK we have the rule's pool info, record it and work on the rule's
# matching criteria
rule = {
'pool': pool,
}
criteria_type = ruleset.criteria_type
if criteria_type == 'geoip':
# Geo
geo = ruleset.criteria['geoip']
geos = []
# Dyn uses the same 2-letter codes as octoDNS (except for
# continents) but it doesn't have the hierary, e.g. US is
# just US, not NA-US. We'll have to map these things back
for code in geo['country']:
geos.append(GeoCodes.country_to_code(code))
for code in geo['province']:
geos.append(GeoCodes.province_to_code(code.upper()))
for code in geo['region']:
geos.append(self.REGION_CODES_LOOKUP[int(code)])
geos.sort()
rule['geos'] = geos
elif criteria_type == 'always':
pass
else:
self.log.warn('_populate_dynamic_rules: '
'unsupported criteria_type "%s", ignoring',
criteria_type)
continue
rules.append(rule)
return rules
def _populate_dynamic_traffic_director(self, zone, fqdn, _type, td,
rulesets, lenient):
# We'll go ahead and grab pools too, using all will include unref'd
# pools
response_pools = td.all_response_pools
# Populate pools
default, pools = self._populate_dynamic_pools(_type, rulesets,
response_pools)
# Populate rules
rules = self._populate_dynamic_rules(rulesets, pools)
# We start out with something that will always show
# change in case this is a busted TD. This will prevent us from
# creating a duplicate td. We'll overwrite this with real data
# provide we have it
data = {
'dynamic': {
'pools': pools,
'rules': rules,
},
'type': _type,
'ttl': td.ttl,
}
# Include default's information in data
data.update(default)
name = zone.hostname_from_fqdn(fqdn)
record = Record.new(zone, name, data, source=self, lenient=lenient)
zone.add_record(record, lenient=lenient)
return record
def _is_traffic_director_dyanmic(self, td, rulesets):
for ruleset in rulesets:
try:
pieces = ruleset.label.split(':')
if len(pieces) == 2:
# It matches octoDNS's format
int(pieces[0])
# It's an integer, so probably rule_num, thus dynamic
return True
except (IndexError, ValueError):
pass
# We didn't see any rulesets that look like a dynamic record so maybe
# geo...
return False
def _populate_traffic_directors(self, zone, lenient): def _populate_traffic_directors(self, zone, lenient):
self.log.debug('_populate_traffic_directors: zone=%s', zone.name)
self.log.debug('_populate_traffic_directors: zone=%s, lenient=%s',
zone.name, lenient)
td_records = set() td_records = set()
for fqdn, types in self.traffic_directors.items(): for fqdn, types in self.traffic_directors.items():
# TODO: skip subzones # TODO: skip subzones
if not fqdn.endswith(zone.name): if not fqdn.endswith(zone.name):
continue continue
for _type, td in types.items(): for _type, td in types.items():
# critical to call rulesets once, each call loads them :-( # critical to call rulesets once, each call loads them :-(
rulesets = td.rulesets rulesets = td.rulesets
# We start out with something that will always change show
# change in case this is a busted TD. This will prevent us from
# creating a duplicate td. We'll overwrite this with real data
# provide we have it
geo = {}
data = {
'geo': geo,
'type': _type,
'ttl': td.ttl,
'values': ['0.0.0.0']
}
for ruleset in rulesets:
try:
record_set = ruleset.response_pools[0].rs_chains[0] \
.record_sets[0]
except IndexError:
# problems indicate a malformed ruleset, ignore it
continue
_type = record_set.rdata_class
if ruleset.label.startswith('default:'):
data_for = getattr(self, '_data_for_{}'.format(_type))
data.update(data_for(_type, record_set.records))
else:
# We've stored the geo in label
try:
code, _ = ruleset.label.split(':', 1)
except ValueError:
continue
values = [r.address for r in record_set.records]
geo[code] = values
name = zone.hostname_from_fqdn(fqdn)
record = Record.new(zone, name, data, source=self)
zone.add_record(record, lenient=lenient)
if self._is_traffic_director_dyanmic(td, rulesets):
record = \
self._populate_dynamic_traffic_director(zone, fqdn,
_type, td,
rulesets,
lenient)
else:
record = \
self._populate_geo_traffic_director(zone, fqdn, _type,
td, rulesets,
lenient)
td_records.add(record) td_records.add(record)
return td_records return td_records
@ -659,8 +850,8 @@ class DynProvider(BaseProvider):
self._traffic_director_monitors[label] = monitor self._traffic_director_monitors[label] = monitor
return monitor return monitor
def _find_or_create_pool(self, td, pools, label, _type, values,
monitor_id=None):
def _find_or_create_geo_pool(self, td, pools, label, _type, values,
monitor_id=None):
for pool in pools: for pool in pools:
if pool.label != label: if pool.label != label:
continue continue
@ -680,9 +871,78 @@ class DynProvider(BaseProvider):
chain = DSFFailoverChain(label, record_sets=[record_set]) chain = DSFFailoverChain(label, record_sets=[record_set])
pool = DSFResponsePool(label, rs_chains=[chain]) pool = DSFResponsePool(label, rs_chains=[chain])
pool.create(td) pool.create(td)
# We need to store the newly created pool in the pools list since the
# caller won't know if it was newly created or not. This will allow us
# to find this pool again if another rule references it and avoid
# creating duplicates
pools.append(pool)
return pool return pool
def _mod_rulesets(self, td, change):
def _dynamic_records_for_A(self, values, record_extras):
return [DSFARecord(v['value'], weight=v.get('weight', 1),
**record_extras)
for v in values]
def _dynamic_records_for_AAAA(self, values, record_extras):
return [DSFAAAARecord(v['value'], weight=v.get('weight', 1),
**record_extras)
for v in values]
def _dynamic_records_for_CNAME(self, values, record_extras):
return [DSFCNAMERecord(v['value'], weight=v.get('weight', 1),
**record_extras)
for v in values]
def _find_or_create_dynamic_pool(self, td, pools, label, _type, values,
monitor_id=None, record_extras={}):
# Sort the values for consistent ordering so that we can compare
values = sorted(values, key=_dynamic_value_sort_key)
# Ensure that weight is included and if not use the default
values = map(lambda v: {
'value': v['value'],
'weight': v.get('weight', 1),
}, values)
# Walk through our existing pools looking for a match we can use
for pool in pools:
# It must have the same label
if pool.label != label:
continue
try:
records = pool.rs_chains[0].record_sets[0].records
except IndexError:
# No values, can't match
continue
# And the (sorted) values must match once converted for comparison
# purposes
value_for = getattr(self, '_value_for_{}'.format(_type))
record_values = [value_for(_type, r) for r in records]
if record_values == values:
# it's a match
return pool
# We don't have this pool and thus need to create it
records_for = getattr(self, '_dynamic_records_for_{}'.format(_type))
records = records_for(values, record_extras)
record_set = DSFRecordSet(_type, label,
serve_count=min(len(records), 2),
records=records, dsf_monitor_id=monitor_id)
chain = DSFFailoverChain(label, record_sets=[record_set])
pool = DSFResponsePool(label, rs_chains=[chain])
pool.create(td)
# We need to store the newly created pool in the pools list since the
# caller won't know if it was newly created or not. This will allow us
# to find this pool again if another rule references it and avoid
# creating duplicates
pools.append(pool)
return pool
def _mod_geo_rulesets(self, td, change):
new = change.new new = change.new
# Response Pools # Response Pools
@ -732,14 +992,14 @@ class DynProvider(BaseProvider):
int(r._ordering) int(r._ordering)
for r in existing_rulesets for r in existing_rulesets
] + [-1]) + 1 ] + [-1]) + 1
self.log.debug('_mod_rulesets: insert_at=%d', insert_at)
self.log.debug('_mod_geo_rulesets: insert_at=%d', insert_at)
# add the default # add the default
label = 'default:{}'.format(uuid4().hex) label = 'default:{}'.format(uuid4().hex)
ruleset = DSFRuleset(label, 'always', []) ruleset = DSFRuleset(label, 'always', [])
ruleset.create(td, index=insert_at) ruleset.create(td, index=insert_at)
pool = self._find_or_create_pool(td, pools, 'default', new._type,
new.values)
pool = self._find_or_create_geo_pool(td, pools, 'default', new._type,
new.values)
# There's no way in the client lib to create a ruleset with an existing # There's no way in the client lib to create a ruleset with an existing
# pool (ref'd by id) so we have to do this round-a-bout. # pool (ref'd by id) so we have to do this round-a-bout.
active_pools = { active_pools = {
@ -773,8 +1033,8 @@ class DynProvider(BaseProvider):
ruleset.create(td, index=insert_at) ruleset.create(td, index=insert_at)
first = geo.values[0] first = geo.values[0]
pool = self._find_or_create_pool(td, pools, first, new._type,
geo.values, monitor_id)
pool = self._find_or_create_geo_pool(td, pools, first, new._type,
geo.values, monitor_id)
active_pools[geo.code] = pool.response_pool_id active_pools[geo.code] = pool.response_pool_id
ruleset.add_response_pool(pool.response_pool_id) ruleset.add_response_pool(pool.response_pool_id)
@ -811,7 +1071,7 @@ class DynProvider(BaseProvider):
node = DSFNode(new.zone.name, fqdn) node = DSFNode(new.zone.name, fqdn)
td = TrafficDirector(label, ttl=new.ttl, nodes=[node], publish='Y') td = TrafficDirector(label, ttl=new.ttl, nodes=[node], publish='Y')
self.log.debug('_mod_geo_Create: td=%s', td.service_id) self.log.debug('_mod_geo_Create: td=%s', td.service_id)
self._mod_rulesets(td, change)
self._mod_geo_rulesets(td, change)
self.traffic_directors[fqdn] = { self.traffic_directors[fqdn] = {
_type: td _type: td
} }
@ -832,7 +1092,7 @@ class DynProvider(BaseProvider):
self._mod_geo_Create(dyn_zone, change) self._mod_geo_Create(dyn_zone, change)
self._mod_Delete(dyn_zone, change) self._mod_Delete(dyn_zone, change)
return return
self._mod_rulesets(td, change)
self._mod_geo_rulesets(td, change)
def _mod_geo_Delete(self, dyn_zone, change): def _mod_geo_Delete(self, dyn_zone, change):
existing = change.existing existing = change.existing
@ -841,6 +1101,237 @@ class DynProvider(BaseProvider):
fqdn_tds[_type].delete() fqdn_tds[_type].delete()
del fqdn_tds[_type] del fqdn_tds[_type]
def _mod_dynamic_rulesets(self, td, change):
new = change.new
# TODO: make sure we can update TTLs
if td.ttl != new.ttl:
td.ttl = new.ttl
# Get existing pools. This should be simple, but it's not b/c the dyn
# api is a POS. We need all response pools so we can GC and check to
# make sure that what we're after doesn't already exist.
# td.all_response_pools just returns thin objects that don't include
# their rs_chains (and children down to actual records.) We could just
# foreach over those turning them into full DSFResponsePool objects
# with get_response_pool, but that'd be N round-trips. We can avoid
# those round trips in cases where the pools are in use in rules where
# they're already full objects.
# First up populate all the pools we have under rules, the _ prevents a
# td.refresh we don't need :-( seriously?
existing_rulesets = td._rulesets
pools = {}
for ruleset in existing_rulesets:
for pool in ruleset.response_pools:
pools[pool.response_pool_id] = pool
# Reverse sort the existing_rulesets by _ordering so that we'll remove
# them in that order later, this will ensure that we remove the old
# default before any of the old geo rules preventing it from catching
# everything.
existing_rulesets.sort(key=lambda r: r._ordering, reverse=True)
# Add in any pools that aren't currently referenced by rules
for pool in td.all_response_pools:
rpid = pool.response_pool_id
if rpid not in pools:
# we want this one, but it's thin, inflate it
pools[rpid] = get_response_pool(rpid, td)
# now that we have full objects for the complete set of existing pools,
# a list will be more useful
pools = pools.values()
# Rulesets
# We need to make sure and insert the new rules after any existing
# rules so they won't take effect before we've had a chance to add
# response pools to them. I've tried both publish=False (which is
# completely broken in the client) and creating the rulesets with
# response_pool_ids neither of which appear to work from the client
# library. If there are no existing rulesets fallback to 0
insert_at = max([
int(r._ordering)
for r in existing_rulesets
] + [-1]) + 1
self.log.debug('_mod_dynamic_rulesets: insert_at=%d', insert_at)
# Add the base record values as the ultimate/unhealthchecked default
label = 'default:{}'.format(uuid4().hex)
ruleset = DSFRuleset(label, 'always', [])
ruleset.create(td, index=insert_at)
# If/when we go beyond A, AAAA, and CNAME this will have to get
# more intelligent, probably a weighted_values method on Record objects
# or something like that?
try:
values = new.values
except AttributeError:
values = [new.value]
values = [{
'value': v,
'weight': 1,
} for v in values]
# For these defaults we need to set them to always be served and to
# ignore any health checking (since they won't have one)
pool = self._find_or_create_dynamic_pool(td, pools, 'default',
new._type, values,
record_extras={
'automation': 'manual',
'eligible': True,
})
# There's no way in the client lib to create a ruleset with an existing
# pool (ref'd by id) so we have to do this round-a-bout.
active_pools = {
# TODO: disallow default as a pool id
'default': pool.response_pool_id
}
ruleset.add_response_pool(pool.response_pool_id)
# Get our monitor
monitor_id = self._traffic_director_monitor(new).dsf_monitor_id
# Make sure we have all the pools we're going to need
for _id, pool in sorted(new.dynamic.pools.items()):
values = [{
'weight': v.get('weight', 1),
'value': v['value'],
} for v in pool.data['values']]
pool = self._find_or_create_dynamic_pool(td, pools, _id,
new._type, values,
monitor_id)
active_pools[_id] = pool.response_pool_id
# Run through and configure our rules
for rule_num, rule in enumerate(reversed(new.dynamic.rules)):
criteria = defaultdict(lambda: defaultdict(list))
criteria_type = 'always'
try:
geos = rule.data['geos']
criteria_type = 'geoip'
except KeyError:
geos = []
for geo in geos:
geo = GeoCodes.parse(geo)
if geo['province_code']:
criteria['geoip']['province'] \
.append(geo['province_code'].lower())
elif geo['country_code']:
criteria['geoip']['country'] \
.append(geo['country_code'])
else:
criteria['geoip']['region'] \
.append(self.REGION_CODES[geo['continent_code']])
label = '{}:{}'.format(rule_num, uuid4().hex)
ruleset = DSFRuleset(label, criteria_type, [], criteria)
# Something you have to call create others the constructor does it
ruleset.create(td, index=insert_at)
# Add the primary pool for this rule
rule_pool = rule.data['pool']
ruleset.add_response_pool(active_pools[rule_pool])
# OK, we have the rule and its primary pool setup, now look to see
# if there's a fallback chain that needs to be configured
fallback = new.dynamic.pools[rule_pool].data.get('fallback', None)
seen = set([rule_pool])
while fallback and fallback not in seen:
seen.add(fallback)
# looking at client lib code, index > exists appends
ruleset.add_response_pool(active_pools[fallback], index=999)
fallback = new.dynamic.pools[fallback].data.get('fallback',
None)
if fallback is not None:
# If we're out of the while and fallback is not None that means
# there was a loop. This generally shouldn't happen since
# Record validations test for it, but this is a
# belt-and-suspenders setup. Excepting here would put things
# into a partially configured state which would be bad. We'll
# just break at the point where the loop was going to happen
# and log about it. Note that any time we hit this we're likely
# to hit it multiple times as we configure the other pools
self.log.warn('_mod_dynamic_rulesets: loop detected in '
'fallback chain, fallback=%s, seen=%s', fallback,
seen)
# and always add default as the last
ruleset.add_response_pool(active_pools['default'], index=999)
# we're done with active_pools as a lookup, convert it in to a set of
# the ids in use
active_pools = set(active_pools.values())
# Clean up unused response_pools
for pool in pools:
if pool.response_pool_id in active_pools:
continue
pool.delete()
# Clean out the old rulesets
for ruleset in existing_rulesets:
ruleset.delete()
def _mod_dynamic_Create(self, dyn_zone, change):
new = change.new
fqdn = new.fqdn
_type = new._type
# Create a new traffic director
label = '{}:{}'.format(fqdn, _type)
node = DSFNode(new.zone.name, fqdn)
td = TrafficDirector(label, ttl=new.ttl, nodes=[node], publish='Y')
self.log.debug('_mod_dynamic_Create: td=%s', td.service_id)
# Sync up it's pools & rules
self._mod_dynamic_rulesets(td, change)
# Store it for future reference
self.traffic_directors[fqdn] = {
_type: td
}
def _mod_dynamic_Update(self, dyn_zone, change):
new = change.new
if not new.dynamic:
if new.geo:
# New record is a geo record
self.log.info('_mod_dynamic_Update: %s to geo', new.fqdn)
# Convert the TD over to a geo and we're done
self._mod_geo_Update(dyn_zone, change)
else:
# New record doesn't have dynamic, we're going from a TD to a
# regular record
self.log.info('_mod_dynamic_Update: %s to plain', new.fqdn)
# Create the regular record
self._mod_Create(dyn_zone, change)
# Delete the dynamic
self._mod_dynamic_Delete(dyn_zone, change)
return
try:
# We'll be dynamic going forward, see if we have one already
td = self.traffic_directors[new.fqdn][new._type]
if change.existing.geo:
self.log.info('_mod_dynamic_Update: %s from geo', new.fqdn)
else:
self.log.debug('_mod_dynamic_Update: %s existing', new.fqdn)
# If we're here we do, we'll just update it down below
except KeyError:
# There's no td, this is actually a create, we must be going from a
# non-dynamic to dynamic record
# First create the dynamic record
self.log.info('_mod_dynamic_Update: %s from regular', new.fqdn)
self._mod_dynamic_Create(dyn_zone, change)
# From a generic so remove the old generic
self._mod_Delete(dyn_zone, change)
return
# IF we're here it's actually an update, sync up rules
self._mod_dynamic_rulesets(td, change)
def _mod_dynamic_Delete(self, dyn_zone, change):
existing = change.existing
fqdn_tds = self.traffic_directors[existing.fqdn]
_type = existing._type
fqdn_tds[_type].delete()
del fqdn_tds[_type]
def _mod_Create(self, dyn_zone, change): def _mod_Create(self, dyn_zone, change):
new = change.new new = change.new
kwargs_for = getattr(self, '_kwargs_for_{}'.format(new._type)) kwargs_for = getattr(self, '_kwargs_for_{}'.format(new._type))
@ -867,8 +1358,13 @@ class DynProvider(BaseProvider):
unhandled_changes = [] unhandled_changes = []
for c in changes: for c in changes:
# we only mess with changes that have geo info somewhere # we only mess with changes that have geo info somewhere
if getattr(c.new, 'geo', False) or getattr(c.existing, 'geo',
False):
if getattr(c.new, 'dynamic', False) or getattr(c.existing,
'dynamic', False):
mod = getattr(self, '_mod_dynamic_{}'
.format(c.__class__.__name__))
mod(dyn_zone, c)
elif getattr(c.new, 'geo', False) or getattr(c.existing, 'geo',
False):
mod = getattr(self, '_mod_geo_{}'.format(c.__class__.__name__)) mod = getattr(self, '_mod_geo_{}'.format(c.__class__.__name__))
mod(dyn_zone, c) mod(dyn_zone, c)
else: else:


+ 35
- 2
octodns/record/__init__.py View File

@ -268,6 +268,7 @@ class _ValuesMixin(object):
values = data['values'] values = data['values']
except KeyError: except KeyError:
values = [data['value']] values = [data['value']]
# TODO: should we natsort values?
self.values = sorted(self._value_type.process(values)) self.values = sorted(self._value_type.process(values))
def changes(self, other, target): def changes(self, other, target):
@ -384,12 +385,27 @@ class _DynamicPool(object):
def __init__(self, _id, data): def __init__(self, _id, data):
self._id = _id self._id = _id
self.data = data
values = [
{
'value': d['value'],
'weight': d.get('weight', 1),
} for d in data['values']
]
values.sort(key=lambda d: d['value'])
fallback = data.get('fallback', None)
self.data = {
'fallback': fallback if fallback != 'default' else None,
'values': values,
}
def _data(self): def _data(self):
return self.data return self.data
def __eq__(self, other): def __eq__(self, other):
if not isinstance(other, _DynamicPool):
return False
return self.data == other.data return self.data == other.data
def __ne__(self, other): def __ne__(self, other):
@ -403,12 +419,23 @@ class _DynamicRule(object):
def __init__(self, i, data): def __init__(self, i, data):
self.i = i self.i = i
self.data = data
self.data = {}
try:
self.data['pool'] = data['pool']
except KeyError:
pass
try:
self.data['geos'] = sorted(data['geos'])
except KeyError:
pass
def _data(self): def _data(self):
return self.data return self.data
def __eq__(self, other): def __eq__(self, other):
if not isinstance(other, _DynamicRule):
return False
return self.data == other.data return self.data == other.data
def __ne__(self, other): def __ne__(self, other):
@ -437,6 +464,8 @@ class _Dynamic(object):
} }
def __eq__(self, other): def __eq__(self, other):
if not isinstance(other, _Dynamic):
return False
ret = self.pools == other.pools and self.rules == other.rules ret = self.pools == other.pools and self.rules == other.rules
return ret return ret
@ -457,6 +486,8 @@ class _DynamicMixin(object):
if 'dynamic' not in data: if 'dynamic' not in data:
return reasons return reasons
elif 'geo' in data:
reasons.append('"dynamic" record with "geo" content')
try: try:
pools = data['dynamic']['pools'] pools = data['dynamic']['pools']
@ -533,6 +564,8 @@ class _DynamicMixin(object):
else: else:
seen_default = False seen_default = False
# TODO: don't allow 'default' as a pool name, reserved
# TODO: warn or error on unused pools?
for i, rule in enumerate(rules): for i, rule in enumerate(rules):
rule_num = i + 1 rule_num = i + 1
try: try:


+ 37
- 1
octodns/record/geo.py View File

@ -2,11 +2,13 @@
# #
# #
from logging import getLogger
from .geo_data import geo_data from .geo_data import geo_data
class GeoCodes(object): class GeoCodes(object):
__COUNTRIES = None
log = getLogger('GeoCodes')
@classmethod @classmethod
def validate(cls, code, prefix): def validate(cls, code, prefix):
@ -33,3 +35,37 @@ class GeoCodes(object):
reasons.append('{}unknown province code "{}"'.format(prefix, code)) reasons.append('{}unknown province code "{}"'.format(prefix, code))
return reasons return reasons
@classmethod
def parse(cls, code):
pieces = code.split('-')
try:
country_code = pieces[1]
except IndexError:
country_code = None
try:
province_code = pieces[2]
except IndexError:
province_code = None
return {
'continent_code': pieces[0],
'country_code': country_code,
'province_code': province_code,
}
@classmethod
def country_to_code(cls, country):
for continent, countries in geo_data.items():
if country in countries:
return '{}-{}'.format(continent, country)
cls.log.warn('country_to_code: unrecognized country "%s"', country)
return
@classmethod
def province_to_code(cls, province):
# We get to cheat on this one since we only support provinces in NA-US
if province not in geo_data['NA']['US']['provinces']:
cls.log.warn('country_to_code: unrecognized province "%s"',
province)
return
return 'NA-US-{}'.format(province)

+ 1081
- 29
tests/test_octodns_provider_dyn.py
File diff suppressed because it is too large
View File


+ 0
- 1
tests/test_octodns_provider_etc_hosts.py View File

@ -170,7 +170,6 @@ class TestEtcHostsProvider(TestCase):
with open(hosts_file) as fh: with open(hosts_file) as fh:
data = fh.read() data = fh.read()
print(data)
self.assertTrue('# loop.unit.tests -> start.unit.tests ' self.assertTrue('# loop.unit.tests -> start.unit.tests '
'**loop**' in data) '**loop**' in data)
self.assertTrue('# middle.unit.tests -> loop.unit.tests ' self.assertTrue('# middle.unit.tests -> loop.unit.tests '


+ 289
- 53
tests/test_octodns_record.py View File

@ -10,7 +10,7 @@ from unittest import TestCase
from octodns.record import ARecord, AaaaRecord, AliasRecord, CaaRecord, \ from octodns.record import ARecord, AaaaRecord, AliasRecord, CaaRecord, \
CnameRecord, Create, Delete, GeoValue, MxRecord, NaptrRecord, \ CnameRecord, Create, Delete, GeoValue, MxRecord, NaptrRecord, \
NaptrValue, NsRecord, Record, SshfpRecord, SpfRecord, SrvRecord, \ NaptrValue, NsRecord, Record, SshfpRecord, SpfRecord, SrvRecord, \
TxtRecord, Update, ValidationError
TxtRecord, Update, ValidationError, _Dynamic, _DynamicPool, _DynamicRule
from octodns.zone import Zone from octodns.zone import Zone
from helpers import DynamicProvider, GeoProvider, SimpleProvider from helpers import DynamicProvider, GeoProvider, SimpleProvider
@ -1906,10 +1906,11 @@ class TestDynamicRecords(TestCase):
}], }],
}, },
'two': { 'two': {
# Testing out of order value sorting here
'values': [{ 'values': [{
'value': '4.4.4.4',
}, {
'value': '5.5.5.5', 'value': '5.5.5.5',
}, {
'value': '4.4.4.4',
}], }],
}, },
'three': { 'three': {
@ -1948,10 +1949,24 @@ class TestDynamicRecords(TestCase):
pools = dynamic.pools pools = dynamic.pools
self.assertTrue(pools) self.assertTrue(pools)
self.assertEquals(a_data['dynamic']['pools']['one'], pools['one'].data)
self.assertEquals(a_data['dynamic']['pools']['two'], pools['two'].data)
self.assertEquals(a_data['dynamic']['pools']['three'],
pools['three'].data)
self.assertEquals({
'value': '3.3.3.3',
'weight': 1,
}, pools['one'].data['values'][0])
self.assertEquals([{
'value': '4.4.4.4',
'weight': 1,
}, {
'value': '5.5.5.5',
'weight': 1,
}], pools['two'].data['values'])
self.assertEquals([{
'weight': 10,
'value': '4.4.4.4',
}, {
'weight': 12,
'value': '5.5.5.5',
}], pools['three'].data['values'])
rules = dynamic.rules rules = dynamic.rules
self.assertTrue(rules) self.assertTrue(rules)
@ -1994,10 +2009,11 @@ class TestDynamicRecords(TestCase):
}], }],
}, },
'two': { 'two': {
# Testing out of order value sorting here
'values': [{ 'values': [{
'value': '2601:642:500:e210:62f8:1dff:feb8:9474',
}, {
'value': '2601:642:500:e210:62f8:1dff:feb8:9475', 'value': '2601:642:500:e210:62f8:1dff:feb8:9475',
}, {
'value': '2601:642:500:e210:62f8:1dff:feb8:9474',
}], }],
}, },
'three': { 'three': {
@ -2036,12 +2052,24 @@ class TestDynamicRecords(TestCase):
pools = dynamic.pools pools = dynamic.pools
self.assertTrue(pools) self.assertTrue(pools)
self.assertEquals(aaaa_data['dynamic']['pools']['one'],
pools['one'].data)
self.assertEquals(aaaa_data['dynamic']['pools']['two'],
pools['two'].data)
self.assertEquals(aaaa_data['dynamic']['pools']['three'],
pools['three'].data)
self.assertEquals({
'value': '2601:642:500:e210:62f8:1dff:feb8:9473',
'weight': 1,
}, pools['one'].data['values'][0])
self.assertEquals([{
'value': '2601:642:500:e210:62f8:1dff:feb8:9474',
'weight': 1,
}, {
'value': '2601:642:500:e210:62f8:1dff:feb8:9475',
'weight': 1,
}], pools['two'].data['values'])
self.assertEquals([{
'weight': 10,
'value': '2601:642:500:e210:62f8:1dff:feb8:9476',
}, {
'weight': 12,
'value': '2601:642:500:e210:62f8:1dff:feb8:9477',
}], pools['three'].data['values'])
rules = dynamic.rules rules = dynamic.rules
self.assertTrue(rules) self.assertTrue(rules)
@ -2094,12 +2122,21 @@ class TestDynamicRecords(TestCase):
pools = dynamic.pools pools = dynamic.pools
self.assertTrue(pools) self.assertTrue(pools)
self.assertEquals(cname_data['dynamic']['pools']['one'],
pools['one'].data)
self.assertEquals(cname_data['dynamic']['pools']['two'],
pools['two'].data)
self.assertEquals(cname_data['dynamic']['pools']['three'],
pools['three'].data)
self.assertEquals({
'value': 'one.cname.target.',
'weight': 1,
}, pools['one'].data['values'][0])
self.assertEquals({
'value': 'two.cname.target.',
'weight': 1,
}, pools['two'].data['values'][0])
self.assertEquals([{
'value': 'three-1.cname.target.',
'weight': 12,
}, {
'value': 'three-2.cname.target.',
'weight': 32,
}], pools['three'].data['values'])
rules = dynamic.rules rules = dynamic.rules
self.assertTrue(rules) self.assertTrue(rules)
@ -2906,9 +2943,10 @@ class TestDynamicRecords(TestCase):
a_data = { a_data = {
'dynamic': { 'dynamic': {
'rules': [{ 'rules': [{
'pools': {
1: 'one',
}
'geos': ['EU'],
'pool': 'two',
}, {
'pool': 'one',
}], }],
}, },
'ttl': 60, 'ttl': 60,
@ -2928,7 +2966,19 @@ class TestDynamicRecords(TestCase):
a_data = { a_data = {
'dynamic': { 'dynamic': {
'pools': { 'pools': {
'one': '1.1.1.1',
'one': {
'values': [{
'value': '3.3.3.3',
}]
},
'two': {
'values': [{
'value': '4.4.4.4',
}, {
'value': '5.5.5.5',
'weight': 2,
}]
},
}, },
}, },
'ttl': 60, 'ttl': 60,
@ -2941,11 +2991,82 @@ class TestDynamicRecords(TestCase):
a = Record.new(self.zone, 'bad', a_data, lenient=True) a = Record.new(self.zone, 'bad', a_data, lenient=True)
self.assertEquals({ self.assertEquals({
'pools': { 'pools': {
'one': '1.1.1.1',
'one': {
'fallback': None,
'values': [{
'value': '3.3.3.3',
'weight': 1,
}]
},
'two': {
'fallback': None,
'values': [{
'value': '4.4.4.4',
'weight': 1,
}, {
'value': '5.5.5.5',
'weight': 2,
}]
},
}, },
'rules': [], 'rules': [],
}, a._data()['dynamic']) }, a._data()['dynamic'])
# rule without pool
a_data = {
'dynamic': {
'pools': {
'one': {
'values': [{
'value': '3.3.3.3',
}]
},
'two': {
'values': [{
'value': '4.4.4.4',
}, {
'value': '5.5.5.5',
'weight': 2,
}]
},
},
'rules': [{
'geos': ['EU'],
'pool': 'two',
}, {
}],
},
'ttl': 60,
'type': 'A',
'values': [
'1.1.1.1',
'2.2.2.2',
],
}
a = Record.new(self.zone, 'bad', a_data, lenient=True)
self.assertEquals({
'pools': {
'one': {
'fallback': None,
'values': [{
'value': '3.3.3.3',
'weight': 1,
}]
},
'two': {
'fallback': None,
'values': [{
'value': '4.4.4.4',
'weight': 1,
}, {
'value': '5.5.5.5',
'weight': 2,
}]
},
},
'rules': a_data['dynamic']['rules'],
}, a._data()['dynamic'])
def test_dynamic_changes(self): def test_dynamic_changes(self):
simple = SimpleProvider() simple = SimpleProvider()
dynamic = DynamicProvider() dynamic = DynamicProvider()
@ -2953,17 +3074,24 @@ class TestDynamicRecords(TestCase):
a_data = { a_data = {
'dynamic': { 'dynamic': {
'pools': { 'pools': {
'one': '3.3.3.3',
'two': [
'4.4.4.4',
'5.5.5.5',
],
'one': {
'values': [{
'value': '3.3.3.3',
}]
},
'two': {
'values': [{
'value': '4.4.4.4',
}, {
'value': '5.5.5.5',
}]
},
}, },
'rules': [{ 'rules': [{
'pools': {
100: 'one',
200: 'two',
}
'geos': ['EU'],
'pool': 'two',
}, {
'pool': 'one',
}], }],
}, },
'ttl': 60, 'ttl': 60,
@ -2978,17 +3106,25 @@ class TestDynamicRecords(TestCase):
b_data = { b_data = {
'dynamic': { 'dynamic': {
'pools': { 'pools': {
'one': '3.3.3.5',
'two': [
'4.4.4.4',
'5.5.5.5',
],
'one': {
'values': [{
'value': '3.3.3.3',
}]
},
'two': {
'values': [{
'value': '4.4.4.4',
'weight': 2,
}, {
'value': '5.5.5.5',
}]
},
}, },
'rules': [{ 'rules': [{
'pools': {
100: 'one',
200: 'two',
}
'geos': ['EU'],
'pool': 'two',
}, {
'pool': 'one',
}], }],
}, },
'ttl': 60, 'ttl': 60,
@ -3002,17 +3138,24 @@ class TestDynamicRecords(TestCase):
c_data = { c_data = {
'dynamic': { 'dynamic': {
'pools': { 'pools': {
'one': '3.3.3.3',
'two': [
'4.4.4.4',
'5.5.5.5',
],
'one': {
'values': [{
'value': '3.3.3.3',
}]
},
'two': {
'values': [{
'value': '4.4.4.4',
}, {
'value': '5.5.5.5',
}]
},
}, },
'rules': [{ 'rules': [{
'pools': {
100: 'one',
300: 'two',
}
'geos': ['NA'],
'pool': 'two',
}, {
'pool': 'one',
}], }],
}, },
'ttl': 60, 'ttl': 60,
@ -3052,3 +3195,96 @@ class TestDynamicRecords(TestCase):
self.assertEquals(a.dynamic.rules, a.dynamic.rules) self.assertEquals(a.dynamic.rules, a.dynamic.rules)
self.assertEquals(a.dynamic.rules[0], a.dynamic.rules[0]) self.assertEquals(a.dynamic.rules[0], a.dynamic.rules[0])
self.assertNotEquals(a.dynamic.rules[0], c.dynamic.rules[0]) self.assertNotEquals(a.dynamic.rules[0], c.dynamic.rules[0])
def test_dynamic_and_geo_validation(self):
a_data = {
'dynamic': {
'pools': {
'one': {
'values': [{
'value': '3.3.3.3',
}],
},
'two': {
# Testing out of order value sorting here
'values': [{
'value': '5.5.5.5',
}, {
'value': '4.4.4.4',
}],
},
'three': {
'values': [{
'weight': 10,
'value': '4.4.4.4',
}, {
'weight': 12,
'value': '5.5.5.5',
}],
},
},
'rules': [{
'geos': ['AF', 'EU'],
'pool': 'three',
}, {
'geos': ['NA-US-CA'],
'pool': 'two',
}, {
'pool': 'one',
}],
},
'geo': {
'NA': ['1.2.3.5'],
'NA-US': ['1.2.3.5', '1.2.3.6']
},
'type': 'A',
'ttl': 60,
'values': [
'1.1.1.1',
'2.2.2.2',
],
}
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, 'bad', a_data)
self.assertEquals(['"dynamic" record with "geo" content'],
ctx.exception.reasons)
def test_dynamic_eqs(self):
pool_one = _DynamicPool('one', {
'values': [{
'value': '1.2.3.4',
}],
})
pool_two = _DynamicPool('two', {
'values': [{
'value': '1.2.3.5',
}],
})
self.assertEquals(pool_one, pool_one)
self.assertNotEquals(pool_one, pool_two)
self.assertNotEquals(pool_one, 42)
pools = {
'one': pool_one,
'two': pool_two,
}
rule_one = _DynamicRule(0, {
'pool': 'one',
})
rule_two = _DynamicRule(1, {
'pool': 'two',
})
self.assertEquals(rule_one, rule_one)
self.assertNotEquals(rule_one, rule_two)
self.assertNotEquals(rule_one, 42)
rules = [
rule_one,
rule_two,
]
dynamic = _Dynamic(pools, rules)
other = _Dynamic({}, [])
self.assertEquals(dynamic, dynamic)
self.assertNotEquals(dynamic, other)
self.assertNotEquals(dynamic, 42)

+ 27
- 0
tests/test_octodns_record_geo.py View File

@ -51,3 +51,30 @@ class TestRecordGeoCodes(TestCase):
# Bad province code, good continent and country # Bad province code, good continent and country
self.assertEquals(['xyz unknown province code "NA-US-XX"'], self.assertEquals(['xyz unknown province code "NA-US-XX"'],
GeoCodes.validate('NA-US-XX', prefix)) GeoCodes.validate('NA-US-XX', prefix))
def test_parse(self):
self.assertEquals({
'continent_code': 'NA',
'country_code': None,
'province_code': None,
}, GeoCodes.parse('NA'))
self.assertEquals({
'continent_code': 'NA',
'country_code': 'US',
'province_code': None,
}, GeoCodes.parse('NA-US'))
self.assertEquals({
'continent_code': 'NA',
'country_code': 'US',
'province_code': 'CA',
}, GeoCodes.parse('NA-US-CA'))
def test_country_to_code(self):
self.assertEquals('NA-US', GeoCodes.country_to_code('US'))
self.assertEquals('EU-GB', GeoCodes.country_to_code('GB'))
self.assertFalse(GeoCodes.country_to_code('XX'))
def test_province_to_code(self):
self.assertEquals('NA-US-OR', GeoCodes.province_to_code('OR'))
self.assertEquals('NA-US-KY', GeoCodes.province_to_code('KY'))
self.assertFalse(GeoCodes.province_to_code('XX'))

Loading…
Cancel
Save