diff --git a/CHANGELOG.md b/CHANGELOG.md index 11d984c..c3eaae6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,17 @@ +## v0.9.1 - UNRELEASED + +### NOTICE + +Using this version on existing records with `geo` will result in +recreating all health checks. This process has been tested pretty thoroughly to +try and ensure a seemless upgrade without any traffic shifting around. It's +probably best to take extra care when updating and to try and make sure that +all health checks are passing before the first sync with `--doit`. See +[#67](https://github.com/github/octodns/pull/67) for more information. + +* Major update to geo healthchecks to allow configuring host (header), path, + protocol, and port [#67](https://github.com/github/octodns/pull/67) + ## v0.9.0 - 2018-03-26 - Way too long since we last met * Way way way too much to list out here, shouldn't have waited so long diff --git a/octodns/provider/base.py b/octodns/provider/base.py index b0d6ea8..ada0c7a 100644 --- a/octodns/provider/base.py +++ b/octodns/provider/base.py @@ -34,7 +34,7 @@ class BaseProvider(BaseSource): ''' return True - def _extra_changes(self, existing, changes): + def _extra_changes(self, existing, desired, changes): ''' An opportunity for providers to add extra changes to the plan that are necessary to update ancillary record data or configure the zone. E.g. @@ -64,7 +64,8 @@ class BaseProvider(BaseSource): self.log.info('plan: filtered out %s changes', before - after) # allow the provider to add extra changes it needs - extra = self._extra_changes(existing, changes) + extra = self._extra_changes(existing=existing, desired=desired, + changes=changes) if extra: self.log.info('plan: extra changes\n %s', '\n ' .join([unicode(c) for c in extra])) diff --git a/octodns/provider/dyn.py b/octodns/provider/dyn.py index cb0dabf..de430db 100644 --- a/octodns/provider/dyn.py +++ b/octodns/provider/dyn.py @@ -17,10 +17,107 @@ from logging import getLogger from threading import Lock from uuid import uuid4 -from ..record import Record +from ..record import Record, Update from .base import BaseProvider +############################################################################### +# +# The following monkey patching is to work around functionality that is lacking +# from DSFMonitor. You cannot set host or path (which we need) and there's no +# update method. What's more host & path aren't publically accessible on the +# object so you can't see their current values and depending on how the object +# came to be (constructor vs pulled from the api) the "private" location of +# those fields varies :-( +# +############################################################################### +def _monitor_host_get(self): + return self._host or self._options['host'] + + +DSFMonitor.host = property(_monitor_host_get) + + +def _monitor_host_set(self, value): + if self._options is None: + self._options = {} + self._host = self._options['host'] = value + + +DSFMonitor.host = DSFMonitor.host.setter(_monitor_host_set) + + +def _monitor_path_get(self): + return self._path or self._options['path'] + + +DSFMonitor.path = property(_monitor_path_get) + + +def _monitor_path_set(self, value): + if self._options is None: + self._options = {} + self._path = self._options['path'] = value + + +DSFMonitor.path = DSFMonitor.path.setter(_monitor_path_set) + + +def _monitor_protocol_get(self): + return self._protocol + + +DSFMonitor.protocol = property(_monitor_protocol_get) + + +def _monitor_protocol_set(self, value): + self._protocol = value + + +DSFMonitor.protocol = DSFMonitor.protocol.setter(_monitor_protocol_set) + + +def _monitor_port_get(self): + return self._port or self._options['port'] + + +DSFMonitor.port = property(_monitor_port_get) + + +def _monitor_port_set(self, value): + if self._options is None: + self._options = {} + self._port = self._options['port'] = value + + +DSFMonitor.port = DSFMonitor.port.setter(_monitor_port_set) + + +def _monitor_update(self, host, path, protocol, port): + # I can't see how to actually do this with the client lib so + # I'm having to hack around it. Have to provide all the + # options or else things complain + return self._update({ + 'protocol': protocol, + 'options': { + 'host': host, + 'path': path, + 'port': port, + 'timeout': DynProvider.MONITOR_TIMEOUT, + 'header': DynProvider.MONITOR_HEADER, + } + }) + + +DSFMonitor.update = _monitor_update +############################################################################### + + +def _monitor_doesnt_match(monitor, host, path, protocol, port): + return monitor.host != host or monitor.path != path or \ + monitor.protocol != protocol or int(monitor.port) != port + + class _CachingDynZone(DynZone): log = getLogger('_CachingDynZone') @@ -136,6 +233,9 @@ class DynProvider(BaseProvider): 'AN': 17, # Continental Antarctica } + MONITOR_HEADER = 'User-Agent: Dyn Monitor' + MONITOR_TIMEOUT = 10 + _sess_create_lock = Lock() def __init__(self, id, customer, username, password, @@ -389,6 +489,34 @@ class DynProvider(BaseProvider): len(zone.records) - before, exists) return exists + def _extra_changes(self, desired, changes, **kwargs): + self.log.debug('_extra_changes: desired=%s', desired.name) + + changed = set([c.record for c in changes]) + + extra = [] + for record in desired.records: + if record in changed or not getattr(record, 'geo', False): + # Already changed, or no geo, no need to check it + continue + label = '{}:{}'.format(record.fqdn, record._type) + try: + monitor = self.traffic_director_monitors[label] + except KeyError: + self.log.info('_extra_changes: health-check missing for %s', + label) + extra.append(Update(record, record)) + continue + if _monitor_doesnt_match(monitor, record.healthcheck_host, + record.healthcheck_path, + record.healthcheck_protocol, + record.healthcheck_port): + self.log.info('_extra_changes: health-check mis-match for %s', + label) + extra.append(Update(record, record)) + + return extra + def _kwargs_for_A(self, record): return [{ 'address': v, @@ -474,20 +602,55 @@ class DynProvider(BaseProvider): _kwargs_for_TXT = _kwargs_for_SPF - def _traffic_director_monitor(self, fqdn): + @property + def traffic_director_monitors(self): if self._traffic_director_monitors is None: + self.log.debug('traffic_director_monitors: loading') self._traffic_director_monitors = \ {m.label: m for m in get_all_dsf_monitors()} + return self._traffic_director_monitors + + def _traffic_director_monitor(self, record): + fqdn = record.fqdn + label = '{}:{}'.format(fqdn, record._type) try: - return self._traffic_director_monitors[fqdn] + try: + monitor = self.traffic_director_monitors[label] + self.log.debug('_traffic_director_monitor: existing for %s', + label) + except KeyError: + # UNTIL 1.0 We don't have one for the new label format, see if + # we still have one for the old and update it + monitor = self.traffic_director_monitors[fqdn] + self.log.info('_traffic_director_monitor: upgrading label ' + 'to %s', label) + monitor.label = label + self.traffic_director_monitors[label] = \ + self.traffic_director_monitors[fqdn] + del self.traffic_director_monitors[fqdn] + if _monitor_doesnt_match(monitor, record.healthcheck_host, + record.healthcheck_path, + record.healthcheck_protocol, + record.healthcheck_port): + self.log.info('_traffic_director_monitor: updating monitor ' + 'for %s', label) + monitor.update(record.healthcheck_host, + record.healthcheck_path, + record.healthcheck_protocol, + record.healthcheck_port) + return monitor except KeyError: - monitor = DSFMonitor(fqdn, protocol='HTTPS', response_count=2, - probe_interval=60, retries=2, port=443, - active='Y', host=fqdn[:-1], timeout=10, - header='User-Agent: Dyn Monitor', - path='/_dns') - self._traffic_director_monitors[fqdn] = monitor + self.log.info('_traffic_director_monitor: creating monitor ' + 'for %s', label) + monitor = DSFMonitor(label, protocol=record.healthcheck_protocol, + response_count=2, probe_interval=60, + retries=2, port=record.healthcheck_port, + active='Y', host=record.healthcheck_host, + timeout=self.MONITOR_TIMEOUT, + header=self.MONITOR_HEADER, + path=record.healthcheck_path) + self._traffic_director_monitors[label] = monitor return monitor def _find_or_create_pool(self, td, pools, label, _type, values, @@ -578,7 +741,7 @@ class DynProvider(BaseProvider): } ruleset.add_response_pool(pool.response_pool_id) - monitor_id = self._traffic_director_monitor(new.fqdn).dsf_monitor_id + monitor_id = self._traffic_director_monitor(new).dsf_monitor_id # Geos ordered least to most specific so that parents will always be # created before their children (and thus can be referenced geos = sorted(new.geo.items(), key=lambda d: d[0]) diff --git a/octodns/provider/powerdns.py b/octodns/provider/powerdns.py index f6f218e..02319e5 100644 --- a/octodns/provider/powerdns.py +++ b/octodns/provider/powerdns.py @@ -289,7 +289,7 @@ class PowerDnsBaseProvider(BaseProvider): def _get_nameserver_record(self, existing): return None - def _extra_changes(self, existing, _): + def _extra_changes(self, existing, **kwargs): self.log.debug('_extra_changes: zone=%s', existing.name) ns = self._get_nameserver_record(existing) diff --git a/octodns/provider/route53.py b/octodns/provider/route53.py index 1a2ca2e..c99873a 100644 --- a/octodns/provider/route53.py +++ b/octodns/provider/route53.py @@ -230,7 +230,7 @@ class Route53Provider(BaseProvider): # This should be bumped when there are underlying changes made to the # health check config. - HEALTH_CHECK_VERSION = '0000' + HEALTH_CHECK_VERSION = '0001' def __init__(self, id, access_key_id, secret_access_key, max_changes=1000, client_max_attempts=None, *args, **kwargs): @@ -526,6 +526,14 @@ class Route53Provider(BaseProvider): # We've got a cached version use it return self._health_checks + def _health_check_equivilent(self, host, path, protocol, port, + health_check, first_value=None): + config = health_check['HealthCheckConfig'] + return host == config['FullyQualifiedDomainName'] and \ + path == config['ResourcePath'] and protocol == config['Type'] \ + and port == config['Port'] and \ + (first_value is None or first_value == config['IPAddress']) + def get_health_check_id(self, record, ident, geo, create): # fqdn & the first value are special, we use them to match up health # checks to their records. Route53 health checks check a single ip and @@ -537,41 +545,47 @@ class Route53Provider(BaseProvider): 'first_value=%s', fqdn, record._type, ident, first_value) - # health check host can't end with a . - host = fqdn[:-1] + healthcheck_host = record.healthcheck_host + healthcheck_path = record.healthcheck_path + healthcheck_protocol = record.healthcheck_protocol + healthcheck_port = record.healthcheck_port + # we're looking for a healthcheck with the current version & our record # type, we'll ignore anything else - expected_version_and_type = '{}:{}:'.format(self.HEALTH_CHECK_VERSION, - record._type) + expected_ref = '{}:{}:{}:'.format(self.HEALTH_CHECK_VERSION, + record._type, record.fqdn) for id, health_check in self.health_checks.items(): - if not health_check['CallerReference'] \ - .startswith(expected_version_and_type): - # not a version & type match, ignore + if not health_check['CallerReference'].startswith(expected_ref): + # not match, ignore continue - config = health_check['HealthCheckConfig'] - if host == config['FullyQualifiedDomainName'] and \ - first_value == config['IPAddress']: + if self._health_check_equivilent(healthcheck_host, + healthcheck_path, + healthcheck_protocol, + healthcheck_port, health_check, + first_value=first_value): # this is the health check we're looking for + self.log.debug('get_health_check_id: found match id=%s', id) return id if not create: # no existing matches and not allowed to create, return none + self.log.debug('get_health_check_id: no matches, no create') return # no existing matches, we need to create a new health check config = { - 'EnableSNI': True, + 'EnableSNI': healthcheck_protocol == 'HTTPS', 'FailureThreshold': 6, - 'FullyQualifiedDomainName': host, + 'FullyQualifiedDomainName': healthcheck_host, 'IPAddress': first_value, 'MeasureLatency': True, - 'Port': 443, + 'Port': healthcheck_port, 'RequestInterval': 10, - 'ResourcePath': '/_dns', - 'Type': 'HTTPS', + 'ResourcePath': healthcheck_path, + 'Type': healthcheck_protocol, } - ref = '{}:{}:{}'.format(self.HEALTH_CHECK_VERSION, record._type, - uuid4().hex[:16]) + ref = '{}:{}:{}:{}'.format(self.HEALTH_CHECK_VERSION, record._type, + record.fqdn, uuid4().hex[:12]) resp = self._conn.create_health_check(CallerReference=ref, HealthCheckConfig=config) health_check = resp['HealthCheck'] @@ -579,11 +593,15 @@ class Route53Provider(BaseProvider): # store the new health check so that we'll be able to find it in the # future self._health_checks[id] = health_check - self.log.info('get_health_check_id: created id=%s, host=%s, ' - 'first_value=%s', id, host, first_value) + self.log.info('get_health_check_id: created id=%s, host=%s, path=%s, ' + 'protocol=%s, port=%d, first_value=%s', id, + healthcheck_host, healthcheck_path, healthcheck_protocol, + healthcheck_port, first_value) return id def _gc_health_checks(self, record, new): + if record._type not in ('A', 'AAAA'): + return self.log.debug('_gc_health_checks: record=%s', record) # Find the health checks we're using for the new route53 records in_use = set() @@ -595,17 +613,25 @@ class Route53Provider(BaseProvider): # Now we need to run through ALL the health checks looking for those # that apply to this record, deleting any that do and are no longer in # use - host = record.fqdn[:-1] + expected_re = re.compile(r'^\d\d\d\d:{}:{}:' + .format(record._type, record.fqdn)) + # UNITL 1.0: we'll clean out the previous version of Route53 health + # checks as best as we can. + expected_legacy_host = record.fqdn[:-1] + expected_legacy = '0000:{}:'.format(record._type) for id, health_check in self.health_checks.items(): - config = health_check['HealthCheckConfig'] - _type = health_check['CallerReference'].split(':', 2)[1] - # if host and the pulled out type match it applies - if host == config['FullyQualifiedDomainName'] and \ - _type == record._type and id not in in_use: - # this is a health check for our fqdn & type but not one we're + ref = health_check['CallerReference'] + if expected_re.match(ref) and id not in in_use: + # this is a health check for this record, but not one we're # planning to use going forward self.log.info('_gc_health_checks: deleting id=%s', id) self._conn.delete_health_check(HealthCheckId=id) + elif ref.startswith(expected_legacy): + config = health_check['HealthCheckConfig'] + if expected_legacy_host == config['FullyQualifiedDomainName']: + self.log.info('_gc_health_checks: deleting legacy id=%s', + id) + self._conn.delete_health_check(HealthCheckId=id) def _gen_records(self, record, creating=False): ''' @@ -655,18 +681,18 @@ class Route53Provider(BaseProvider): self._gc_health_checks(change.existing, []) return self._gen_mods('DELETE', existing_records) - def _extra_changes(self, existing, changes): - self.log.debug('_extra_changes: existing=%s', existing.name) - zone_id = self._get_zone_id(existing.name) + def _extra_changes(self, desired, changes, **kwargs): + self.log.debug('_extra_changes: desired=%s', desired.name) + zone_id = self._get_zone_id(desired.name) if not zone_id: # zone doesn't exist so no extras to worry about return [] # we'll skip extra checking for anything we're already going to change changed = set([c.record for c in changes]) # ok, now it's time for the reason we're here, we need to go over all - # the existing records + # the desired records extra = [] - for record in existing.records: + for record in desired.records: if record in changed: # already have a change for it, skipping continue @@ -678,7 +704,13 @@ class Route53Provider(BaseProvider): # b/c of a health check version bump self.log.debug('_extra_changes: inspecting=%s, %s', record.fqdn, record._type) + + healthcheck_host = record.healthcheck_host + healthcheck_path = record.healthcheck_path + healthcheck_protocol = record.healthcheck_protocol + healthcheck_port = record.healthcheck_port fqdn = record.fqdn + # loop through all the r53 rrsets for rrset in self._load_records(zone_id): if fqdn != rrset['Name'] or record._type != rrset['Type']: @@ -691,17 +723,22 @@ class Route53Provider(BaseProvider): # we expect a healthcheck now try: health_check_id = rrset['HealthCheckId'] - caller_ref = \ - self.health_checks[health_check_id]['CallerReference'] + health_check = self.health_checks[health_check_id] + caller_ref = health_check['CallerReference'] if caller_ref.startswith(self.HEALTH_CHECK_VERSION): - # it has the right health check - continue - except KeyError: + if self._health_check_equivilent(healthcheck_host, + healthcheck_path, + healthcheck_protocol, + healthcheck_port, + health_check): + # it has the right health check + continue + except (IndexError, KeyError): # no health check id or one that isn't the right version pass # no good, doesn't have the right health check, needs an update - self.log.debug('_extra_changes: health-check caused ' - 'update') + self.log.info('_extra_changes: health-check caused ' + 'update of %s:%s', record.fqdn, record._type) extra.append(Update(record, record)) # We don't need to process this record any longer break diff --git a/octodns/record.py b/octodns/record.py index 0ae1335..201488c 100644 --- a/octodns/record.py +++ b/octodns/record.py @@ -115,6 +115,12 @@ class Record(object): reasons.append('invalid ttl') except KeyError: reasons.append('missing ttl') + try: + if data['octodns']['healthcheck']['protocol'] \ + not in ('HTTP', 'HTTPS'): + reasons.append('invalid healthcheck protocol') + except KeyError: + pass return reasons def __init__(self, zone, name, data, source=None): @@ -126,10 +132,7 @@ class Record(object): self.source = source self.ttl = int(data['ttl']) - octodns = data.get('octodns', {}) - self.ignored = octodns.get('ignored', False) - self.excluded = octodns.get('excluded', []) - self.included = octodns.get('included', []) + self._octodns = data.get('octodns', {}) def _data(self): return {'ttl': self.ttl} @@ -144,6 +147,46 @@ class Record(object): return '{}.{}'.format(self.name, self.zone.name) return self.zone.name + @property + def ignored(self): + return self._octodns.get('ignored', False) + + @property + def excluded(self): + return self._octodns.get('excluded', []) + + @property + def included(self): + return self._octodns.get('included', []) + + @property + def healthcheck_host(self): + try: + return self._octodns['healthcheck']['host'] + except KeyError: + return self.fqdn[:-1] + + @property + def healthcheck_path(self): + try: + return self._octodns['healthcheck']['path'] + except KeyError: + return '/_dns' + + @property + def healthcheck_protocol(self): + try: + return self._octodns['healthcheck']['protocol'] + except KeyError: + return 'HTTPS' + + @property + def healthcheck_port(self): + try: + return int(self._octodns['healthcheck']['port']) + except KeyError: + return 443 + def changes(self, other, target): # We're assuming we have the same name and type if we're being compared if self.ttl != other.ttl: diff --git a/tests/test_octodns_provider_base.py b/tests/test_octodns_provider_base.py index 64c0377..d5ac5b3 100644 --- a/tests/test_octodns_provider_base.py +++ b/tests/test_octodns_provider_base.py @@ -35,7 +35,7 @@ class HelperProvider(BaseProvider): return not self.include_change_callback or \ self.include_change_callback(change) - def _extra_changes(self, existing, changes): + def _extra_changes(self, **kwargs): return self.__extra_changes def _apply(self, plan): diff --git a/tests/test_octodns_provider_dyn.py b/tests/test_octodns_provider_dyn.py index edb604a..ac56477 100644 --- a/tests/test_octodns_provider_dyn.py +++ b/tests/test_octodns_provider_dyn.py @@ -13,7 +13,7 @@ from unittest import TestCase from octodns.record import Create, Delete, Record, Update from octodns.provider.base import Plan -from octodns.provider.dyn import DynProvider, _CachingDynZone +from octodns.provider.dyn import DynProvider, _CachingDynZone, DSFMonitor from octodns.zone import Zone from helpers import SimpleProvider @@ -547,21 +547,40 @@ class TestDynProviderGeo(TestCase): monitors_response = { 'data': [{ 'active': 'Y', + 'agent_scheme': 'geo', 'dsf_monitor_id': monitor_id, 'endpoints': [], - 'label': 'unit.tests.', - 'notifier': '', - 'options': { - 'expected': '', - 'header': 'User-Agent: Dyn Monitor', - 'host': 'unit.tests', - 'path': '/_dns', - 'port': '443', - 'timeout': '10'}, + 'label': 'unit.tests.:A', + 'notifier': [], + 'expected': '', + 'header': 'User-Agent: Dyn Monitor', + 'host': 'unit.tests', + 'path': '/_dns', + 'port': '443', + 'timeout': '10', + 'probe_interval': '60', + 'protocol': 'HTTPS', + 'response_count': '2', + 'retries': '2', + 'services': ['12311'] + }, { + 'active': 'Y', + 'agent_scheme': 'geo', + 'dsf_monitor_id': 'b52', + 'endpoints': [], + 'label': 'old-label.unit.tests.', + 'notifier': [], + 'expected': '', + 'header': 'User-Agent: Dyn Monitor', + 'host': 'old-label.unit.tests', + 'path': '/_dns', + 'port': '443', + 'timeout': '10', 'probe_interval': '60', 'protocol': 'HTTPS', 'response_count': '2', - 'retries': '2' + 'retries': '2', + 'services': ['12312'] }], 'job_id': 3376281406, 'msgs': [{ @@ -662,6 +681,7 @@ class TestDynProviderGeo(TestCase): provider = DynProvider('test', 'cust', 'user', 'pass', True) # short-circuit session checking provider._dyn_sess = True + existing = Zone('unit.tests.', []) # no monitors, will try and create geo_monitor_id = '42x' @@ -670,16 +690,14 @@ class TestDynProviderGeo(TestCase): 'active': 'Y', 'dsf_monitor_id': geo_monitor_id, 'endpoints': [], - 'label': 'geo.unit.tests.', + 'label': 'geo.unit.tests.:A', 'notifier': '', - 'options': { - 'expected': '', - 'header': 'User-Agent: Dyn Monitor', - 'host': 'geo.unit.tests.', - 'path': '/_dns', - 'port': '443', - 'timeout': '10' - }, + 'expected': '', + 'header': 'User-Agent: Dyn Monitor', + 'host': 'geo.unit.tests.', + 'path': '/_dns', + 'port': '443', + 'timeout': '10', 'probe_interval': '60', 'protocol': 'HTTPS', 'response_count': '2', @@ -694,7 +712,18 @@ class TestDynProviderGeo(TestCase): }] # ask for a monitor that doesn't exist - monitor = provider._traffic_director_monitor('geo.unit.tests.') + record = Record.new(existing, 'geo', { + 'ttl': 60, + 'type': 'A', + 'value': '1.2.3.4', + 'octodns': { + 'healthcheck': { + 'host': 'foo.bar', + 'path': '/_ready' + } + } + }) + monitor = provider._traffic_director_monitor(record) self.assertEquals(geo_monitor_id, monitor.dsf_monitor_id) # should see a request for the list and a create mock.assert_has_calls([ @@ -703,12 +732,12 @@ class TestDynProviderGeo(TestCase): 'retries': 2, 'protocol': 'HTTPS', 'response_count': 2, - 'label': 'geo.unit.tests.', + 'label': 'geo.unit.tests.:A', 'probe_interval': 60, 'active': 'Y', 'options': { - 'path': '/_dns', - 'host': 'geo.unit.tests', + 'path': '/_ready', + 'host': 'foo.bar', 'header': 'User-Agent: Dyn Monitor', 'port': 443, 'timeout': 10 @@ -716,19 +745,214 @@ class TestDynProviderGeo(TestCase): }) ]) # created monitor is now cached - self.assertTrue('geo.unit.tests.' in + self.assertTrue('geo.unit.tests.:A' in provider._traffic_director_monitors) # pre-existing one is there too - self.assertTrue('unit.tests.' in + self.assertTrue('unit.tests.:A' in provider._traffic_director_monitors) # now ask for a monitor that does exist + record = Record.new(existing, '', { + 'ttl': 60, + 'type': 'A', + 'value': '1.2.3.4' + }) mock.reset_mock() - monitor = provider._traffic_director_monitor('unit.tests.') + monitor = provider._traffic_director_monitor(record) self.assertEquals(self.monitor_id, monitor.dsf_monitor_id) # should have resulted in no calls b/c exists & we've cached the list mock.assert_not_called() + # and finally for a monitor that exists, but with a differing config + record = Record.new(existing, '', { + 'octodns': { + 'healthcheck': { + 'host': 'bleep.bloop', + 'path': '/_nope', + 'protocol': 'HTTP', + 'port': 8080, + } + }, + 'ttl': 60, + 'type': 'A', + 'value': '1.2.3.4' + }) + mock.reset_mock() + mock.side_effect = [{ + 'data': { + 'active': 'Y', + 'dsf_monitor_id': self.monitor_id, + 'endpoints': [], + 'label': 'unit.tests.:A', + 'notifier': '', + 'expected': '', + 'header': 'User-Agent: Dyn Monitor', + 'host': 'bleep.bloop', + 'path': '/_nope', + 'port': '8080', + 'timeout': '10', + 'probe_interval': '60', + 'protocol': 'HTTP', + 'response_count': '2', + 'retries': '2' + }, + 'job_id': 3376259461, + 'msgs': [{'ERR_CD': None, + 'INFO': 'add: Here is the new monitor', + 'LVL': 'INFO', + 'SOURCE': 'BLL'}], + 'status': 'success' + }] + monitor = provider._traffic_director_monitor(record) + self.assertEquals(self.monitor_id, monitor.dsf_monitor_id) + # should have resulted an update + mock.assert_has_calls([ + call('/DSFMonitor/42a/', 'PUT', { + 'protocol': 'HTTP', + 'options': { + 'path': '/_nope', + 'host': 'bleep.bloop', + 'header': 'User-Agent: Dyn Monitor', + 'port': 8080, + 'timeout': 10 + } + }) + ]) + # cached monitor should have been updated + self.assertTrue('unit.tests.:A' in + provider._traffic_director_monitors) + monitor = provider._traffic_director_monitors['unit.tests.:A'] + self.assertEquals('bleep.bloop', monitor.host) + self.assertEquals('/_nope', monitor.path) + self.assertEquals('HTTP', monitor.protocol) + self.assertEquals('8080', monitor.port) + + # test upgrading an old label + record = Record.new(existing, 'old-label', { + 'ttl': 60, + 'type': 'A', + 'value': '1.2.3.4' + }) + mock.reset_mock() + mock.side_effect = [{ + 'data': { + 'active': 'Y', + 'dsf_monitor_id': self.monitor_id, + 'endpoints': [], + 'label': 'old-label.unit.tests.:A', + 'notifier': '', + 'expected': '', + 'header': 'User-Agent: Dyn Monitor', + 'host': 'old-label.unit.tests', + 'path': '/_dns', + 'port': '443', + 'timeout': '10', + 'probe_interval': '60', + 'protocol': 'HTTPS', + 'response_count': '2', + 'retries': '2' + }, + 'job_id': 3376259461, + 'msgs': [{'ERR_CD': None, + 'INFO': 'add: Here is the new monitor', + 'LVL': 'INFO', + 'SOURCE': 'BLL'}], + 'status': 'success' + }] + monitor = provider._traffic_director_monitor(record) + self.assertEquals(self.monitor_id, monitor.dsf_monitor_id) + # should have resulted an update + mock.assert_has_calls([ + call('/DSFMonitor/b52/', 'PUT', { + 'label': 'old-label.unit.tests.:A' + }) + ]) + # cached monitor should have been updated + self.assertTrue('old-label.unit.tests.:A' in + provider._traffic_director_monitors) + + @patch('dyn.core.SessionEngine.execute') + def test_extra_changes(self, mock): + provider = DynProvider('test', 'cust', 'user', 'pass', True) + # short-circuit session checking + provider._dyn_sess = True + + mock.side_effect = [self.monitors_response] + + # non-geo + desired = Zone('unit.tests.', []) + record = Record.new(desired, '', { + 'ttl': 60, + 'type': 'A', + 'value': '1.2.3.4', + }) + desired.add_record(record) + extra = provider._extra_changes(desired=desired, + changes=[Create(record)]) + self.assertEquals(0, len(extra)) + + # in changes, noop + desired = Zone('unit.tests.', []) + record = Record.new(desired, '', { + 'geo': { + 'NA': ['1.2.3.4'], + }, + 'ttl': 60, + 'type': 'A', + 'value': '1.2.3.4', + }) + desired.add_record(record) + extra = provider._extra_changes(desired=desired, + changes=[Create(record)]) + self.assertEquals(0, len(extra)) + + # no diff, no extra + extra = provider._extra_changes(desired=desired, changes=[]) + self.assertEquals(0, len(extra)) + + # monitors should have been fetched now + mock.assert_called_once() + + # diff in healthcheck, gets extra + desired = Zone('unit.tests.', []) + record = Record.new(desired, '', { + 'geo': { + 'NA': ['1.2.3.4'], + }, + 'octodns': { + 'healthcheck': { + 'host': 'foo.bar', + 'path': '/_ready' + } + }, + 'ttl': 60, + 'type': 'A', + 'value': '1.2.3.4', + }) + desired.add_record(record) + extra = provider._extra_changes(desired=desired, changes=[]) + self.assertEquals(1, len(extra)) + extra = extra[0] + self.assertIsInstance(extra, Update) + self.assertEquals(record, extra.record) + + # missing health check + desired = Zone('unit.tests.', []) + record = Record.new(desired, 'geo', { + 'geo': { + 'NA': ['1.2.3.4'], + }, + 'ttl': 60, + 'type': 'A', + 'value': '1.2.3.4', + }) + desired.add_record(record) + extra = provider._extra_changes(desired=desired, changes=[]) + self.assertEquals(1, len(extra)) + extra = extra[0] + self.assertIsInstance(extra, Update) + self.assertEquals(record, extra.record) + @patch('dyn.core.SessionEngine.execute') def test_populate_traffic_directors_empty(self, mock): provider = DynProvider('test', 'cust', 'user', 'pass', @@ -1285,3 +1509,71 @@ class TestDynProviderAlias(TestCase): execute_mock.assert_has_calls([call('/Zone/unit.tests/', 'GET', {}), call('/Zone/unit.tests/', 'GET', {})]) self.assertEquals(2, len(plan.changes)) + + +# Need a class that doesn't do all the "real" stuff, but gets our monkey +# patching +class DummyDSFMonitor(DSFMonitor): + + def __init__(self, host=None, path=None, protocol=None, port=None, + options_host=None, options_path=None, options_protocol=None, + options_port=None): + # not calling super on purpose + self._host = host + self._path = path + self._protocol = protocol + self._port = port + if options_host: + self._options = { + 'host': options_host, + 'path': options_path, + 'protocol': options_protocol, + 'port': options_port, + } + else: + self._options = None + + +class TestDSFMonitorMonkeyPatching(TestCase): + + def test_host(self): + monitor = DummyDSFMonitor(host='host.com', path='/path', + protocol='HTTP', port=8080) + self.assertEquals('host.com', monitor.host) + self.assertEquals('/path', monitor.path) + self.assertEquals('HTTP', monitor.protocol) + self.assertEquals(8080, monitor.port) + + monitor = DummyDSFMonitor(options_host='host.com', + options_path='/path', + options_protocol='HTTP', options_port=8080) + self.assertEquals('host.com', monitor.host) + self.assertEquals('/path', monitor.path) + + monitor.host = 'other.com' + self.assertEquals('other.com', monitor.host) + monitor.path = '/other-path' + self.assertEquals('/other-path', monitor.path) + monitor.protocol = 'HTTPS' + self.assertEquals('HTTPS', monitor.protocol) + monitor.port = 8081 + self.assertEquals(8081, monitor.port) + + monitor = DummyDSFMonitor() + monitor.host = 'other.com' + self.assertEquals('other.com', monitor.host) + monitor = DummyDSFMonitor() + monitor.path = '/other-path' + self.assertEquals('/other-path', monitor.path) + monitor.protocol = 'HTTP' + self.assertEquals('HTTP', monitor.protocol) + monitor.port = 8080 + self.assertEquals(8080, monitor.port) + + # Just to exercise the _options init + monitor = DummyDSFMonitor() + monitor.protocol = 'HTTP' + self.assertEquals('HTTP', monitor.protocol) + monitor = DummyDSFMonitor() + monitor.port = 8080 + self.assertEquals(8080, monitor.port) diff --git a/tests/test_octodns_provider_powerdns.py b/tests/test_octodns_provider_powerdns.py index 2722991..067dc74 100644 --- a/tests/test_octodns_provider_powerdns.py +++ b/tests/test_octodns_provider_powerdns.py @@ -52,7 +52,6 @@ class TestPowerDnsProvider(TestCase): with self.assertRaises(Exception) as ctx: zone = Zone('unit.tests.', []) provider.populate(zone) - print(ctx.exception.message) self.assertTrue('unauthorized' in ctx.exception.message) # General error diff --git a/tests/test_octodns_provider_route53.py b/tests/test_octodns_provider_route53.py index f4fc99f..aec31cb 100644 --- a/tests/test_octodns_provider_route53.py +++ b/tests/test_octodns_provider_route53.py @@ -18,6 +18,12 @@ from octodns.zone import Zone from helpers import GeoProvider +class DummyR53Record(object): + + def __init__(self, health_check_id): + self.health_check_id = health_check_id + + class TestOctalReplace(TestCase): def test_basic(self): @@ -87,7 +93,8 @@ class TestRoute53Provider(TestCase): record = Record.new(expected, name, data) expected.add_record(record) - caller_ref = '{}:A:1324'.format(Route53Provider.HEALTH_CHECK_VERSION) + caller_ref = '{}:A:unit.tests.:1324' \ + .format(Route53Provider.HEALTH_CHECK_VERSION) health_checks = [{ 'Id': '42', 'CallerReference': caller_ref, @@ -95,6 +102,9 @@ class TestRoute53Provider(TestCase): 'Type': 'HTTPS', 'FullyQualifiedDomainName': 'unit.tests', 'IPAddress': '4.2.3.4', + 'ResourcePath': '/_dns', + 'Type': 'HTTPS', + 'Port': 443, }, 'HealthCheckVersion': 2, }, { @@ -104,6 +114,9 @@ class TestRoute53Provider(TestCase): 'Type': 'HTTPS', 'FullyQualifiedDomainName': 'unit.tests', 'IPAddress': '5.2.3.4', + 'ResourcePath': '/_dns', + 'Type': 'HTTPS', + 'Port': 443, }, 'HealthCheckVersion': 42, }, { @@ -113,6 +126,9 @@ class TestRoute53Provider(TestCase): 'Type': 'HTTPS', 'FullyQualifiedDomainName': 'unit.tests', 'IPAddress': '5.2.3.4', + 'ResourcePath': '/_dns', + 'Type': 'HTTPS', + 'Port': 443, }, 'HealthCheckVersion': 2, }, { @@ -122,6 +138,9 @@ class TestRoute53Provider(TestCase): 'Type': 'HTTPS', 'FullyQualifiedDomainName': 'unit.tests', 'IPAddress': '7.2.3.4', + 'ResourcePath': '/_dns', + 'Type': 'HTTPS', + 'Port': 443, }, 'HealthCheckVersion': 2, }, { @@ -132,6 +151,9 @@ class TestRoute53Provider(TestCase): 'Type': 'HTTPS', 'FullyQualifiedDomainName': 'unit.tests', 'IPAddress': '7.2.3.4', + 'ResourcePath': '/_dns', + 'Type': 'HTTPS', + 'Port': 443, }, 'HealthCheckVersion': 2, }] @@ -662,6 +684,9 @@ class TestRoute53Provider(TestCase): 'Type': 'HTTPS', 'FullyQualifiedDomainName': 'unit.tests', 'IPAddress': '4.2.3.4', + 'ResourcePath': '/_dns', + 'Type': 'HTTPS', + 'Port': 443, }, 'HealthCheckVersion': 2, }, { @@ -671,6 +696,9 @@ class TestRoute53Provider(TestCase): 'Type': 'HTTPS', 'FullyQualifiedDomainName': 'unit.tests', 'IPAddress': '9.2.3.4', + 'ResourcePath': '/_dns', + 'Type': 'HTTPS', + 'Port': 443, }, 'HealthCheckVersion': 2, }] @@ -690,6 +718,9 @@ class TestRoute53Provider(TestCase): 'Type': 'HTTPS', 'FullyQualifiedDomainName': 'unit.tests', 'IPAddress': '8.2.3.4', + 'ResourcePath': '/_dns', + 'Type': 'HTTPS', + 'Port': 443, }, 'HealthCheckVersion': 2, }] @@ -734,6 +765,9 @@ class TestRoute53Provider(TestCase): 'Type': 'HTTPS', 'FullyQualifiedDomainName': 'unit.tests', 'IPAddress': '4.2.3.4', + 'ResourcePath': '/_dns', + 'Type': 'HTTPS', + 'Port': 443, }, 'HealthCheckVersion': 2, }, { @@ -743,6 +777,9 @@ class TestRoute53Provider(TestCase): 'Type': 'HTTPS', 'FullyQualifiedDomainName': 'unit.tests', 'IPAddress': '4.2.3.4', + 'ResourcePath': '/_dns', + 'Type': 'HTTPS', + 'Port': 443, }, 'HealthCheckVersion': 2, }] @@ -754,15 +791,15 @@ class TestRoute53Provider(TestCase): }) health_check_config = { - 'EnableSNI': True, + 'EnableSNI': False, 'FailureThreshold': 6, - 'FullyQualifiedDomainName': 'unit.tests', + 'FullyQualifiedDomainName': 'foo.bar.com', 'IPAddress': '4.2.3.4', 'MeasureLatency': True, - 'Port': 443, + 'Port': 8080, 'RequestInterval': 10, - 'ResourcePath': '/_dns', - 'Type': 'HTTPS' + 'ResourcePath': '/_status', + 'Type': 'HTTP' } stubber.add_response('create_health_check', { 'HealthCheck': { @@ -783,6 +820,14 @@ class TestRoute53Provider(TestCase): 'values': ['2.2.3.4', '3.2.3.4'], 'geo': { 'AF': ['4.2.3.4'], + }, + 'octodns': { + 'healthcheck': { + 'host': 'foo.bar.com', + 'path': '/_status', + 'port': 8080, + 'protocol': 'HTTP', + }, } }) @@ -817,18 +862,13 @@ class TestRoute53Provider(TestCase): } }) - class DummyRecord(object): - - def __init__(self, health_check_id): - self.health_check_id = health_check_id - # gc no longer in_use records (directly) stubber.add_response('delete_health_check', {}, { 'HealthCheckId': '44', }) provider._gc_health_checks(record, [ - DummyRecord('42'), - DummyRecord('43'), + DummyR53Record('42'), + DummyR53Record('43'), ]) stubber.assert_no_pending_responses() @@ -876,6 +916,77 @@ class TestRoute53Provider(TestCase): provider._gc_health_checks(record, []) stubber.assert_no_pending_responses() + def test_legacy_health_check_gc(self): + provider, stubber = self._get_stubbed_provider() + + old_caller_ref = '0000:A:3333' + health_checks = [{ + 'Id': '42', + 'CallerReference': self.caller_ref, + 'HealthCheckConfig': { + 'Type': 'HTTPS', + 'FullyQualifiedDomainName': 'unit.tests', + 'IPAddress': '4.2.3.4', + 'ResourcePath': '/_dns', + 'Type': 'HTTPS', + 'Port': 443, + }, + 'HealthCheckVersion': 2, + }, { + 'Id': '43', + 'CallerReference': old_caller_ref, + 'HealthCheckConfig': { + 'Type': 'HTTPS', + 'FullyQualifiedDomainName': 'unit.tests', + 'IPAddress': '4.2.3.4', + 'ResourcePath': '/_dns', + 'Type': 'HTTPS', + 'Port': 443, + }, + 'HealthCheckVersion': 2, + }, { + 'Id': '44', + 'CallerReference': old_caller_ref, + 'HealthCheckConfig': { + 'Type': 'HTTPS', + 'FullyQualifiedDomainName': 'other.unit.tests', + 'IPAddress': '4.2.3.4', + 'ResourcePath': '/_dns', + 'Type': 'HTTPS', + 'Port': 443, + }, + 'HealthCheckVersion': 2, + }] + + stubber.add_response('list_health_checks', { + 'HealthChecks': health_checks, + 'IsTruncated': False, + 'MaxItems': '100', + 'Marker': '', + }) + + # No changes to the record itself + record = Record.new(self.expected, '', { + 'ttl': 61, + 'type': 'A', + 'values': ['2.2.3.4', '3.2.3.4'], + 'geo': { + 'AF': ['4.2.3.4'], + 'NA-US': ['5.2.3.4', '6.2.3.4'], + 'NA-US-CA': ['7.2.3.4'] + } + }) + + # Expect to delete the legacy hc for our record, but not touch the new + # one or the other legacy record + stubber.add_response('delete_health_check', {}, { + 'HealthCheckId': '43', + }) + + provider._gc_health_checks(record, [ + DummyR53Record('42'), + ]) + def test_no_extra_changes(self): provider, stubber = self._get_stubbed_provider() @@ -892,26 +1003,26 @@ class TestRoute53Provider(TestCase): stubber.add_response('list_hosted_zones', list_hosted_zones_resp, {}) # empty is empty - existing = Zone('unit.tests.', []) - extra = provider._extra_changes(existing, []) + desired = Zone('unit.tests.', []) + extra = provider._extra_changes(desired=desired, changes=[]) self.assertEquals([], extra) stubber.assert_no_pending_responses() # single record w/o geo is empty - existing = Zone('unit.tests.', []) - record = Record.new(existing, 'a', { + desired = Zone('unit.tests.', []) + record = Record.new(desired, 'a', { 'ttl': 30, 'type': 'A', 'value': '1.2.3.4', }) - existing.add_record(record) - extra = provider._extra_changes(existing, []) + desired.add_record(record) + extra = provider._extra_changes(desired=desired, changes=[]) self.assertEquals([], extra) stubber.assert_no_pending_responses() # short-circuit for unknown zone other = Zone('other.tests.', []) - extra = provider._extra_changes(other, []) + extra = provider._extra_changes(desired=other, changes=[]) self.assertEquals([], extra) stubber.assert_no_pending_responses() @@ -931,8 +1042,8 @@ class TestRoute53Provider(TestCase): stubber.add_response('list_hosted_zones', list_hosted_zones_resp, {}) # record with geo and no health check returns change - existing = Zone('unit.tests.', []) - record = Record.new(existing, 'a', { + desired = Zone('unit.tests.', []) + record = Record.new(desired, 'a', { 'ttl': 30, 'type': 'A', 'value': '1.2.3.4', @@ -940,7 +1051,7 @@ class TestRoute53Provider(TestCase): 'NA': ['2.2.3.4'], } }) - existing.add_record(record) + desired.add_record(record) list_resource_record_sets_resp = { 'ResourceRecordSets': [{ 'Name': 'a.unit.tests.', @@ -959,7 +1070,7 @@ class TestRoute53Provider(TestCase): stubber.add_response('list_resource_record_sets', list_resource_record_sets_resp, {'HostedZoneId': 'z42'}) - extra = provider._extra_changes(existing, []) + extra = provider._extra_changes(desired=desired, changes=[]) self.assertEquals(1, len(extra)) stubber.assert_no_pending_responses() @@ -979,8 +1090,8 @@ class TestRoute53Provider(TestCase): stubber.add_response('list_hosted_zones', list_hosted_zones_resp, {}) # record with geo and no health check returns change - existing = Zone('unit.tests.', []) - record = Record.new(existing, 'a', { + desired = Zone('unit.tests.', []) + record = Record.new(desired, 'a', { 'ttl': 30, 'type': 'A', 'value': '1.2.3.4', @@ -988,7 +1099,7 @@ class TestRoute53Provider(TestCase): 'NA': ['2.2.3.4'], } }) - existing.add_record(record) + desired.add_record(record) list_resource_record_sets_resp = { 'ResourceRecordSets': [{ 'Name': 'a.unit.tests.', @@ -1016,6 +1127,9 @@ class TestRoute53Provider(TestCase): 'Type': 'HTTPS', 'FullyQualifiedDomainName': 'unit.tests', 'IPAddress': '2.2.3.4', + 'ResourcePath': '/_dns', + 'Type': 'HTTPS', + 'Port': 443, }, 'HealthCheckVersion': 2, }], @@ -1023,12 +1137,12 @@ class TestRoute53Provider(TestCase): 'MaxItems': '100', 'Marker': '', }) - extra = provider._extra_changes(existing, []) + extra = provider._extra_changes(desired=desired, changes=[]) self.assertEquals(1, len(extra)) stubber.assert_no_pending_responses() for change in (Create(record), Update(record, record), Delete(record)): - extra = provider._extra_changes(existing, [change]) + extra = provider._extra_changes(desired=desired, changes=[change]) self.assertEquals(0, len(extra)) stubber.assert_no_pending_responses() @@ -1048,8 +1162,8 @@ class TestRoute53Provider(TestCase): stubber.add_response('list_hosted_zones', list_hosted_zones_resp, {}) # record with geo and no health check returns change - existing = Zone('unit.tests.', []) - record = Record.new(existing, 'a', { + desired = Zone('unit.tests.', []) + record = Record.new(desired, 'a', { 'ttl': 30, 'type': 'A', 'value': '1.2.3.4', @@ -1057,7 +1171,7 @@ class TestRoute53Provider(TestCase): 'NA': ['2.2.3.4'], } }) - existing.add_record(record) + desired.add_record(record) list_resource_record_sets_resp = { 'ResourceRecordSets': [{ # other name @@ -1114,8 +1228,11 @@ class TestRoute53Provider(TestCase): 'CallerReference': self.caller_ref, 'HealthCheckConfig': { 'Type': 'HTTPS', - 'FullyQualifiedDomainName': 'unit.tests', + 'FullyQualifiedDomainName': 'a.unit.tests', 'IPAddress': '2.2.3.4', + 'ResourcePath': '/_dns', + 'Type': 'HTTPS', + 'Port': 443, }, 'HealthCheckVersion': 2, }], @@ -1123,10 +1240,26 @@ class TestRoute53Provider(TestCase): 'MaxItems': '100', 'Marker': '', }) - extra = provider._extra_changes(existing, []) + extra = provider._extra_changes(desired=desired, changes=[]) self.assertEquals(0, len(extra)) stubber.assert_no_pending_responses() + # change b/c of healthcheck path + record._octodns['healthcheck'] = { + 'path': '/_ready' + } + extra = provider._extra_changes(desired=desired, changes=[]) + self.assertEquals(1, len(extra)) + stubber.assert_no_pending_responses() + + # change b/c of healthcheck host + record._octodns['healthcheck'] = { + 'host': 'foo.bar.io' + } + extra = provider._extra_changes(desired=desired, changes=[]) + self.assertEquals(1, len(extra)) + stubber.assert_no_pending_responses() + def _get_test_plan(self, max_changes): provider = Route53Provider('test', 'abc', '123', max_changes) diff --git a/tests/test_octodns_record.py b/tests/test_octodns_record.py index 56502a0..16da404 100644 --- a/tests/test_octodns_record.py +++ b/tests/test_octodns_record.py @@ -746,6 +746,61 @@ class TestRecord(TestCase): self.assertEquals(values, geo.values) self.assertEquals(['NA-US', 'NA'], list(geo.parents)) + def test_healthcheck(self): + new = Record.new(self.zone, 'a', { + 'ttl': 44, + 'type': 'A', + 'value': '1.2.3.4', + 'octodns': { + 'healthcheck': { + 'path': '/_ready', + 'host': 'bleep.bloop', + 'protocol': 'HTTP', + 'port': 8080, + } + } + }) + self.assertEquals('/_ready', new.healthcheck_path) + self.assertEquals('bleep.bloop', new.healthcheck_host) + self.assertEquals('HTTP', new.healthcheck_protocol) + self.assertEquals(8080, new.healthcheck_port) + + new = Record.new(self.zone, 'a', { + 'ttl': 44, + 'type': 'A', + 'value': '1.2.3.4', + }) + self.assertEquals('/_dns', new.healthcheck_path) + self.assertEquals('a.unit.tests', new.healthcheck_host) + self.assertEquals('HTTPS', new.healthcheck_protocol) + self.assertEquals(443, new.healthcheck_port) + + def test_inored(self): + new = Record.new(self.zone, 'txt', { + 'ttl': 44, + 'type': 'TXT', + 'value': 'some change', + 'octodns': { + 'ignored': True, + } + }) + self.assertTrue(new.ignored) + new = Record.new(self.zone, 'txt', { + 'ttl': 44, + 'type': 'TXT', + 'value': 'some change', + 'octodns': { + 'ignored': False, + } + }) + self.assertFalse(new.ignored) + new = Record.new(self.zone, 'txt', { + 'ttl': 44, + 'type': 'TXT', + 'value': 'some change', + }) + self.assertFalse(new.ignored) + class TestRecordValidation(TestCase): zone = Zone('unit.tests.', []) @@ -965,6 +1020,25 @@ class TestRecordValidation(TestCase): 'invalid ip address "goodbye"' ], ctx.exception.reasons) + # invalid healthcheck protocol + with self.assertRaises(ValidationError) as ctx: + Record.new(self.zone, 'a', { + 'geo': { + 'NA': ['1.2.3.5'], + 'NA-US': ['1.2.3.5', '1.2.3.6'] + }, + 'type': 'A', + 'ttl': 600, + 'value': '1.2.3.4', + 'octodns': { + 'healthcheck': { + 'protocol': 'FTP', + } + } + }) + self.assertEquals(['invalid healthcheck protocol'], + ctx.exception.reasons) + def test_AAAA(self): # doesn't blow up Record.new(self.zone, '', {