diff --git a/CHANGELOG.md b/CHANGELOG.md index 1d16544..f70d67c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,13 @@ +## v0.9.13 - 2021-..-.. - + +#### Noteworthy changes + +* Alpha support for Processors has been added. Processors allow for hooking + into the source, target, and planing process to make nearly arbitrary changes + to data. See the [octodns/processor/](/octodns/processor) directory for + examples. The change has been designed to have no impact on the process + unless the `processors` key is present in zone configs. + ## v0.9.12 - 2021-04-30 - Enough time has passed #### Noteworthy changes diff --git a/octodns/manager.py b/octodns/manager.py index 9b10196..dcbf083 100644 --- a/octodns/manager.py +++ b/octodns/manager.py @@ -122,6 +122,25 @@ class Manager(object): raise ManagerException('Incorrect provider config for {}' .format(provider_name)) + self.processors = {} + for processor_name, processor_config in \ + self.config.get('processors', {}).items(): + try: + _class = processor_config.pop('class') + except KeyError: + self.log.exception('Invalid processor class') + raise ManagerException('Processor {} is missing class' + .format(processor_name)) + _class = self._get_named_class('processor', _class) + kwargs = self._build_kwargs(processor_config) + try: + self.processors[processor_name] = _class(processor_name, + **kwargs) + except TypeError: + self.log.exception('Invalid processor config') + raise ManagerException('Incorrect processor config for {}' + .format(processor_name)) + zone_tree = {} # sort by reversed strings so that parent zones always come first for name in sorted(self.config['zones'].keys(), key=lambda s: s[::-1]): @@ -223,8 +242,8 @@ class Manager(object): self.log.debug('configured_sub_zones: subs=%s', sub_zone_names) return set(sub_zone_names) - def _populate_and_plan(self, zone_name, sources, targets, desired=None, - lenient=False): + def _populate_and_plan(self, zone_name, processors, sources, targets, + desired=None, lenient=False): self.log.debug('sync: populating, zone=%s, lenient=%s', zone_name, lenient) @@ -237,7 +256,6 @@ class Manager(object): for _, records in desired._records.items(): for record in records: zone.add_record(record.copy(zone=zone), lenient=lenient) - else: for source in sources: try: @@ -245,10 +263,13 @@ class Manager(object): except TypeError as e: if "keyword argument 'lenient'" not in text_type(e): raise - self.log.warn(': provider %s does not accept lenient ' + self.log.warn('provider %s does not accept lenient ' 'param', source.__class__.__name__) source.populate(zone) + for processor in processors: + zone = processor.process_source_zone(zone, sources=sources) + self.log.debug('sync: planning, zone=%s', zone_name) plans = [] @@ -260,7 +281,18 @@ class Manager(object): 'value': 'provider={}'.format(target.id) }) zone.add_record(meta, replace=True) - plan = target.plan(zone) + try: + plan = target.plan(zone, processors=processors) + except TypeError as e: + if "keyword argument 'processors'" not in text_type(e): + raise + self.log.warn('provider.plan %s does not accept processors ' + 'param', target.__class__.__name__) + plan = target.plan(zone) + + for processor in processors: + plan = processor.process_plan(plan, sources=sources, + target=target) if plan: plans.append((target, plan)) @@ -319,6 +351,8 @@ class Manager(object): raise ManagerException('Zone {} is missing targets' .format(zone_name)) + processors = config.get('processors', []) + if (eligible_sources and not [s for s in sources if s in eligible_sources]): self.log.info('sync: no eligible sources, skipping') @@ -336,6 +370,15 @@ class Manager(object): self.log.info('sync: sources=%s -> targets=%s', sources, targets) + try: + collected = [] + for processor in processors: + collected.append(self.processors[processor]) + processors = collected + except KeyError: + raise ManagerException('Zone {}, unknown processor: {}' + .format(zone_name, processor)) + try: # rather than using a list comprehension, we break this loop # out so that the `except` block below can reference the @@ -362,8 +405,9 @@ class Manager(object): .format(zone_name, target)) futures.append(self._executor.submit(self._populate_and_plan, - zone_name, sources, - targets, lenient=lenient)) + zone_name, processors, + sources, targets, + lenient=lenient)) # Wait on all results and unpack/flatten the plans and store the # desired states in case we need them below @@ -388,6 +432,7 @@ class Manager(object): futures.append(self._executor.submit( self._populate_and_plan, zone_name, + processors, [], [self.providers[t] for t in source_config['targets']], desired=desired_config, @@ -528,6 +573,16 @@ class Manager(object): if isinstance(source, YamlProvider): source.populate(zone) + # check that processors are in order if any are specified + processors = config.get('processors', []) + try: + # same as above, but for processors this time + for processor in processors: + collected.append(self.processors[processor]) + except KeyError: + raise ManagerException('Zone {}, unknown processor: {}' + .format(zone_name, processor)) + def get_zone(self, zone_name): if not zone_name[-1] == '.': raise ManagerException('Invalid zone name {}, missing ending dot' diff --git a/octodns/processor/__init__.py b/octodns/processor/__init__.py new file mode 100644 index 0000000..14ccf18 --- /dev/null +++ b/octodns/processor/__init__.py @@ -0,0 +1,6 @@ +# +# +# + +from __future__ import absolute_import, division, print_function, \ + unicode_literals diff --git a/octodns/processor/base.py b/octodns/processor/base.py new file mode 100644 index 0000000..327b1c2 --- /dev/null +++ b/octodns/processor/base.py @@ -0,0 +1,30 @@ +# +# +# + +from __future__ import absolute_import, division, print_function, \ + unicode_literals + +from ..zone import Zone + + +class BaseProcessor(object): + + def __init__(self, name): + self.name = name + + def _clone_zone(self, zone): + return Zone(zone.name, sub_zones=zone.sub_zones) + + def process_source_zone(self, zone, sources): + # sources may be empty, as will be the case for aliased zones + return zone + + def process_target_zone(self, zone, target): + return zone + + def process_plan(self, plan, sources, target): + # plan may be None if no changes were detected up until now, the + # process may still create a plan. + # sources may be empty, as will be the case for aliased zones + return plan diff --git a/octodns/processor/filter.py b/octodns/processor/filter.py new file mode 100644 index 0000000..369e987 --- /dev/null +++ b/octodns/processor/filter.py @@ -0,0 +1,44 @@ +# +# +# + +from __future__ import absolute_import, division, print_function, \ + unicode_literals + +from .base import BaseProcessor + + +class TypeAllowlistFilter(BaseProcessor): + + def __init__(self, name, allowlist): + super(TypeAllowlistFilter, self).__init__(name) + self.allowlist = set(allowlist) + + def _process(self, zone, *args, **kwargs): + ret = self._clone_zone(zone) + for record in zone.records: + if record._type in self.allowlist: + ret.add_record(record) + + return ret + + process_source_zone = _process + process_target_zone = _process + + +class TypeRejectlistFilter(BaseProcessor): + + def __init__(self, name, rejectlist): + super(TypeRejectlistFilter, self).__init__(name) + self.rejectlist = set(rejectlist) + + def _process(self, zone, *args, **kwargs): + ret = self._clone_zone(zone) + for record in zone.records: + if record._type not in self.rejectlist: + ret.add_record(record) + + return ret + + process_source_zone = _process + process_target_zone = _process diff --git a/octodns/processor/ownership.py b/octodns/processor/ownership.py new file mode 100644 index 0000000..172f349 --- /dev/null +++ b/octodns/processor/ownership.py @@ -0,0 +1,103 @@ +# +# +# + +from __future__ import absolute_import, division, print_function, \ + unicode_literals + +from collections import defaultdict + +from ..provider.plan import Plan +from ..record import Record + +from .base import BaseProcessor + + +# Mark anything octoDNS is managing that way it can know it's safe to modify or +# delete. We'll take ownership of existing records that we're told to manage +# and thus "own" them going forward. +class OwnershipProcessor(BaseProcessor): + + def __init__(self, name, txt_name='_owner', txt_value='*octodns*'): + super(OwnershipProcessor, self).__init__(name) + self.txt_name = txt_name + self.txt_value = txt_value + self._txt_values = [txt_value] + + def process_source_zone(self, zone, *args, **kwargs): + ret = self._clone_zone(zone) + for record in zone.records: + # Always copy over the source records + ret.add_record(record) + # Then create and add an ownership TXT for each of them + record_name = record.name.replace('*', '_wildcard') + if record.name: + name = '{}.{}.{}'.format(self.txt_name, record._type, + record_name) + else: + name = '{}.{}'.format(self.txt_name, record._type) + txt = Record.new(zone, name, { + 'type': 'TXT', + 'ttl': 60, + 'value': self.txt_value, + }) + ret.add_record(txt) + + return ret + + def _is_ownership(self, record): + return record._type == 'TXT' and \ + record.name.startswith(self.txt_name) \ + and record.values == self._txt_values + + def process_plan(self, plan, *args, **kwargs): + if not plan: + # If we don't have any change there's nothing to do + return plan + + # First find all the ownership info + owned = defaultdict(dict) + # We need to look for ownership in both the desired and existing + # states, many things will show up in both, but that's fine. + for record in list(plan.existing.records) + list(plan.desired.records): + if self._is_ownership(record): + pieces = record.name.split('.', 2) + if len(pieces) > 2: + _, _type, name = pieces + name = name.replace('_wildcard', '*') + else: + _type = pieces[1] + name = '' + owned[name][_type.upper()] = True + + # Cases: + # - Configured in source + # - We'll fully CRU/manage it adding ownership TXT, + # thanks to process_source_zone, if needed + # - Not in source + # - Has an ownership TXT - delete it & the ownership TXT + # - Does not have an ownership TXT - don't delete it + # - Special records like octodns-meta + # - Should be left alone and should not have ownerthis TXTs + + filtered_changes = [] + for change in plan.changes: + record = change.record + + if not self._is_ownership(record) and \ + record._type not in owned[record.name] and \ + record.name != 'octodns-meta': + # It's not an ownership TXT, it's not owned, and it's not + # special we're going to ignore it + continue + + # We own this record or owned it up until now so whatever the + # change is we should do + filtered_changes.append(change) + + if plan.changes != filtered_changes: + return Plan(plan.existing, plan.desired, filtered_changes, + plan.exists, plan.update_pcent_threshold, + plan.delete_pcent_threshold) + + return plan diff --git a/octodns/provider/base.py b/octodns/provider/base.py index eb097a2..729c9ee 100644 --- a/octodns/provider/base.py +++ b/octodns/provider/base.py @@ -44,7 +44,7 @@ class BaseProvider(BaseSource): ''' return [] - def plan(self, desired): + def plan(self, desired, processors=[]): self.log.info('plan: desired=%s', desired.name) existing = Zone(desired.name, desired.sub_zones) @@ -55,6 +55,9 @@ class BaseProvider(BaseSource): self.log.warn('Provider %s used in target mode did not return ' 'exists', self.id) + for processor in processors: + existing = processor.process_target_zone(existing, target=self) + # compute the changes at the zone/record level changes = existing.changes(desired, self) diff --git a/tests/config/processors-missing-class.yaml b/tests/config/processors-missing-class.yaml new file mode 100644 index 0000000..4594307 --- /dev/null +++ b/tests/config/processors-missing-class.yaml @@ -0,0 +1,23 @@ +providers: + config: + class: octodns.provider.yaml.YamlProvider + directory: tests/config + dump: + class: octodns.provider.yaml.YamlProvider + directory: env/YAML_TMP_DIR + geo: + class: helpers.GeoProvider + nosshfp: + class: helpers.NoSshFpProvider + +processors: + no-class: {} + +zones: + unit.tests.: + processors: + - noop + sources: + - in + targets: + - dump diff --git a/tests/config/processors-wants-config.yaml b/tests/config/processors-wants-config.yaml new file mode 100644 index 0000000..53fc397 --- /dev/null +++ b/tests/config/processors-wants-config.yaml @@ -0,0 +1,25 @@ +providers: + config: + class: octodns.provider.yaml.YamlProvider + directory: tests/config + dump: + class: octodns.provider.yaml.YamlProvider + directory: env/YAML_TMP_DIR + geo: + class: helpers.GeoProvider + nosshfp: + class: helpers.NoSshFpProvider + +processors: + # valid class, but it wants a param and we're not passing it + wants-config: + class: helpers.WantsConfigProcessor + +zones: + unit.tests.: + processors: + - noop + sources: + - in + targets: + - dump diff --git a/tests/config/processors.yaml b/tests/config/processors.yaml new file mode 100644 index 0000000..097024b --- /dev/null +++ b/tests/config/processors.yaml @@ -0,0 +1,33 @@ +providers: + config: + class: octodns.provider.yaml.YamlProvider + directory: tests/config + dump: + class: octodns.provider.yaml.YamlProvider + directory: env/YAML_TMP_DIR + geo: + class: helpers.GeoProvider + nosshfp: + class: helpers.NoSshFpProvider + +processors: + # Just testing config so any processor will do + noop: + class: octodns.processor.base.BaseProcessor + +zones: + unit.tests.: + processors: + - noop + sources: + - config + targets: + - dump + + bad.unit.tests.: + processors: + - doesnt-exist + sources: + - in + targets: + - dump diff --git a/tests/config/unknown-processor.yaml b/tests/config/unknown-processor.yaml new file mode 100644 index 0000000..4aff713 --- /dev/null +++ b/tests/config/unknown-processor.yaml @@ -0,0 +1,17 @@ +manager: + max_workers: 2 +providers: + in: + class: octodns.provider.yaml.YamlProvider + directory: tests/config + dump: + class: octodns.provider.yaml.YamlProvider + directory: env/YAML_TMP_DIR +zones: + unit.tests.: + sources: + - in + processors: + - missing + targets: + - dump diff --git a/tests/helpers.py b/tests/helpers.py index ff7f7cc..eedfd8b 100644 --- a/tests/helpers.py +++ b/tests/helpers.py @@ -7,6 +7,10 @@ from __future__ import absolute_import, division, print_function, \ from shutil import rmtree from tempfile import mkdtemp +from logging import getLogger + +from octodns.processor.base import BaseProcessor +from octodns.provider.base import BaseProvider class SimpleSource(object): @@ -90,3 +94,29 @@ class TemporaryDirectory(object): rmtree(self.dirname) else: raise Exception(self.dirname) + + +class WantsConfigProcessor(BaseProcessor): + + def __init__(self, name, some_config): + super(WantsConfigProcessor, self).__init__(name) + + +class PlannableProvider(BaseProvider): + log = getLogger('PlannableProvider') + + SUPPORTS_GEO = False + SUPPORTS_DYNAMIC = False + SUPPORTS = set(('A',)) + + def __init__(self, *args, **kwargs): + super(PlannableProvider, self).__init__(*args, **kwargs) + + def populate(self, zone, source=False, target=False, lenient=False): + pass + + def supports(self, record): + return True + + def __repr__(self): + return self.__class__.__name__ diff --git a/tests/test_octodns_manager.py b/tests/test_octodns_manager.py index 91ee374..8bada06 100644 --- a/tests/test_octodns_manager.py +++ b/tests/test_octodns_manager.py @@ -9,9 +9,10 @@ from os import environ from os.path import dirname, join from six import text_type -from octodns.record import Record from octodns.manager import _AggregateTarget, MainThreadExecutor, Manager, \ ManagerException +from octodns.processor.base import BaseProcessor +from octodns.record import Create, Delete, Record from octodns.yaml import safe_load from octodns.zone import Zone @@ -19,7 +20,7 @@ from mock import MagicMock, patch from unittest import TestCase from helpers import DynamicProvider, GeoProvider, NoSshFpProvider, \ - SimpleProvider, TemporaryDirectory + PlannableProvider, SimpleProvider, TemporaryDirectory config_dir = join(dirname(__file__), 'config') @@ -338,6 +339,11 @@ class TestManager(TestCase): Manager(get_config_filename('simple-alias-zone.yaml')) \ .validate_configs() + with self.assertRaises(ManagerException) as ctx: + Manager(get_config_filename('unknown-processor.yaml')) \ + .validate_configs() + self.assertTrue('unknown processor' in text_type(ctx.exception)) + def test_get_zone(self): Manager(get_config_filename('simple.yaml')).get_zone('unit.tests.') @@ -358,20 +364,48 @@ class TestManager(TestCase): class NoLenient(SimpleProvider): - def populate(self, zone, source=False): + def populate(self, zone): pass # This should be ok, we'll fall back to not passing it - manager._populate_and_plan('unit.tests.', [NoLenient()], []) + manager._populate_and_plan('unit.tests.', [], [NoLenient()], []) + + class OtherType(SimpleProvider): + + def populate(self, zone, lenient=False): + raise TypeError('something else') + + # This will blow up, we don't fallback for source + with self.assertRaises(TypeError) as ctx: + manager._populate_and_plan('unit.tests.', [], [OtherType()], + []) + self.assertEquals('something else', text_type(ctx.exception)) + + def test_plan_processors_fallback(self): + with TemporaryDirectory() as tmpdir: + environ['YAML_TMP_DIR'] = tmpdir.dirname + # Only allow a target that doesn't exist + manager = Manager(get_config_filename('simple.yaml')) - class NoZone(SimpleProvider): + class NoProcessors(SimpleProvider): - def populate(self, lenient=False): + def plan(self, zone): pass + # This should be ok, we'll fall back to not passing it + manager._populate_and_plan('unit.tests.', [], [], + [NoProcessors()]) + + class OtherType(SimpleProvider): + + def plan(self, zone, processors): + raise TypeError('something else') + # This will blow up, we don't fallback for source - with self.assertRaises(TypeError): - manager._populate_and_plan('unit.tests.', [NoZone()], []) + with self.assertRaises(TypeError) as ctx: + manager._populate_and_plan('unit.tests.', [], [], + [OtherType()]) + self.assertEquals('something else', text_type(ctx.exception)) @patch('octodns.manager.Manager._get_named_class') def test_sync_passes_file_handle(self, mock): @@ -391,6 +425,92 @@ class TestManager(TestCase): _, kwargs = plan_output_mock.run.call_args self.assertEqual(fh_mock, kwargs.get('fh')) + def test_processor_config(self): + # Smoke test loading a valid config + manager = Manager(get_config_filename('processors.yaml')) + self.assertEquals(['noop'], list(manager.processors.keys())) + # This zone specifies a valid processor + manager.sync(['unit.tests.']) + + with self.assertRaises(ManagerException) as ctx: + # This zone specifies a non-existant processor + manager.sync(['bad.unit.tests.']) + self.assertTrue('Zone bad.unit.tests., unknown processor: ' + 'doesnt-exist' in text_type(ctx.exception)) + + with self.assertRaises(ManagerException) as ctx: + Manager(get_config_filename('processors-missing-class.yaml')) + self.assertTrue('Processor no-class is missing class' in + text_type(ctx.exception)) + + with self.assertRaises(ManagerException) as ctx: + Manager(get_config_filename('processors-wants-config.yaml')) + self.assertTrue('Incorrect processor config for wants-config' in + text_type(ctx.exception)) + + def test_processors(self): + manager = Manager(get_config_filename('simple.yaml')) + + targets = [PlannableProvider('prov')] + + zone = Zone('unit.tests.', []) + record = Record.new(zone, 'a', { + 'ttl': 30, + 'type': 'A', + 'value': '1.2.3.4', + }) + + # muck with sources + class MockProcessor(BaseProcessor): + + def process_source_zone(self, zone, sources): + zone = self._clone_zone(zone) + zone.add_record(record) + return zone + + mock = MockProcessor('mock') + plans, zone = manager._populate_and_plan('unit.tests.', [mock], [], + targets) + # Our mock was called and added the record + self.assertEquals(record, list(zone.records)[0]) + # We got a create for the thing added to the expected state (source) + self.assertIsInstance(plans[0][1].changes[0], Create) + + # muck with targets + class MockProcessor(BaseProcessor): + + def process_target_zone(self, zone, target): + zone = self._clone_zone(zone) + zone.add_record(record) + return zone + + mock = MockProcessor('mock') + plans, zone = manager._populate_and_plan('unit.tests.', [mock], [], + targets) + # No record added since it's target this time + self.assertFalse(zone.records) + # We got a delete for the thing added to the existing state (target) + self.assertIsInstance(plans[0][1].changes[0], Delete) + + # muck with plans + class MockProcessor(BaseProcessor): + + def process_target_zone(self, zone, target): + zone = self._clone_zone(zone) + zone.add_record(record) + return zone + + def process_plan(self, plans, sources, target): + # get rid of the change + plans.changes.pop(0) + + mock = MockProcessor('mock') + plans, zone = manager._populate_and_plan('unit.tests.', [mock], [], + targets) + # We planned a delete again, but this time removed it from the plan, so + # no plans + self.assertFalse(plans) + class TestMainThreadExecutor(TestCase): diff --git a/tests/test_octodns_processor_filter.py b/tests/test_octodns_processor_filter.py new file mode 100644 index 0000000..f421676 --- /dev/null +++ b/tests/test_octodns_processor_filter.py @@ -0,0 +1,90 @@ +# +# +# + +from __future__ import absolute_import, division, print_function, \ + unicode_literals + +from unittest import TestCase + +from octodns.processor.filter import TypeAllowlistFilter, TypeRejectlistFilter +from octodns.record import Record +from octodns.zone import Zone + +zone = Zone('unit.tests.', []) +for record in [ + Record.new(zone, 'a', { + 'ttl': 30, + 'type': 'A', + 'value': '1.2.3.4', + }), + Record.new(zone, 'aaaa', { + 'ttl': 30, + 'type': 'AAAA', + 'value': '::1', + }), + Record.new(zone, 'txt', { + 'ttl': 30, + 'type': 'TXT', + 'value': 'Hello World!', + }), + Record.new(zone, 'a2', { + 'ttl': 30, + 'type': 'A', + 'value': '2.3.4.5', + }), + Record.new(zone, 'txt2', { + 'ttl': 30, + 'type': 'TXT', + 'value': 'That will do', + }), +]: + zone.add_record(record) + + +class TestTypeAllowListFilter(TestCase): + + def test_basics(self): + filter_a = TypeAllowlistFilter('only-a', set(('A'))) + + got = filter_a.process_source_zone(zone) + self.assertEquals(['a', 'a2'], sorted([r.name for r in got.records])) + + filter_aaaa = TypeAllowlistFilter('only-aaaa', ('AAAA',)) + got = filter_aaaa.process_source_zone(zone) + self.assertEquals(['aaaa'], sorted([r.name for r in got.records])) + + filter_txt = TypeAllowlistFilter('only-txt', ['TXT']) + got = filter_txt.process_target_zone(zone) + self.assertEquals(['txt', 'txt2'], + sorted([r.name for r in got.records])) + + filter_a_aaaa = TypeAllowlistFilter('only-aaaa', set(('A', 'AAAA'))) + got = filter_a_aaaa.process_target_zone(zone) + self.assertEquals(['a', 'a2', 'aaaa'], + sorted([r.name for r in got.records])) + + +class TestTypeRejectListFilter(TestCase): + + def test_basics(self): + filter_a = TypeRejectlistFilter('not-a', set(('A'))) + + got = filter_a.process_source_zone(zone) + self.assertEquals(['aaaa', 'txt', 'txt2'], + sorted([r.name for r in got.records])) + + filter_aaaa = TypeRejectlistFilter('not-aaaa', ('AAAA',)) + got = filter_aaaa.process_source_zone(zone) + self.assertEquals(['a', 'a2', 'txt', 'txt2'], + sorted([r.name for r in got.records])) + + filter_txt = TypeRejectlistFilter('not-txt', ['TXT']) + got = filter_txt.process_target_zone(zone) + self.assertEquals(['a', 'a2', 'aaaa'], + sorted([r.name for r in got.records])) + + filter_a_aaaa = TypeRejectlistFilter('not-a-aaaa', set(('A', 'AAAA'))) + got = filter_a_aaaa.process_target_zone(zone) + self.assertEquals(['txt', 'txt2'], + sorted([r.name for r in got.records])) diff --git a/tests/test_octodns_processor_ownership.py b/tests/test_octodns_processor_ownership.py new file mode 100644 index 0000000..959f4c2 --- /dev/null +++ b/tests/test_octodns_processor_ownership.py @@ -0,0 +1,146 @@ +# +# +# + +from __future__ import absolute_import, division, print_function, \ + unicode_literals + +from unittest import TestCase + +from octodns.processor.ownership import OwnershipProcessor +from octodns.record import Delete, Record +from octodns.zone import Zone + +from helpers import PlannableProvider + + +zone = Zone('unit.tests.', []) +records = {} +for record in [ + Record.new(zone, '', { + 'ttl': 30, + 'type': 'A', + 'values': [ + '1.2.3.4', + '5.6.7.8', + ], + }), + Record.new(zone, 'the-a', { + 'ttl': 30, + 'type': 'A', + 'value': '1.2.3.4', + }), + Record.new(zone, 'the-aaaa', { + 'ttl': 30, + 'type': 'AAAA', + 'value': '::1', + }), + Record.new(zone, 'the-txt', { + 'ttl': 30, + 'type': 'TXT', + 'value': 'Hello World!', + }), + Record.new(zone, '*', { + 'ttl': 30, + 'type': 'A', + 'value': '4.3.2.1', + }), +]: + records[record.name] = record + zone.add_record(record) + + +class TestOwnershipProcessor(TestCase): + + def test_process_source_zone(self): + ownership = OwnershipProcessor('ownership') + + got = ownership.process_source_zone(zone) + self.assertEquals([ + '', + '*', + '_owner.a', + '_owner.a._wildcard', + '_owner.a.the-a', + '_owner.aaaa.the-aaaa', + '_owner.txt.the-txt', + 'the-a', + 'the-aaaa', + 'the-txt', + ], sorted([r.name for r in got.records])) + + found = False + for record in got.records: + if record.name.startswith(ownership.txt_name): + self.assertEquals([ownership.txt_value], record.values) + # test _is_ownership while we're in here + self.assertTrue(ownership._is_ownership(record)) + found = True + else: + self.assertFalse(ownership._is_ownership(record)) + self.assertTrue(found) + + def test_process_plan(self): + ownership = OwnershipProcessor('ownership') + provider = PlannableProvider('helper') + + # No plan, is a quick noop + self.assertFalse(ownership.process_plan(None)) + + # Nothing exists create both records and ownership + ownership_added = ownership.process_source_zone(zone) + plan = provider.plan(ownership_added) + self.assertTrue(plan) + # Double the number of records + self.assertEquals(len(records) * 2, len(plan.changes)) + # Now process the plan, shouldn't make any changes, we're creating + # everything + got = ownership.process_plan(plan) + self.assertTrue(got) + self.assertEquals(len(records) * 2, len(got.changes)) + + # Something extra exists and doesn't have ownership TXT, leave it + # alone, we don't own it. + extra_a = Record.new(zone, 'extra-a', { + 'ttl': 30, + 'type': 'A', + 'value': '4.4.4.4', + }) + plan.existing.add_record(extra_a) + # If we'd done a "real" plan we'd have a delete for the extra thing. + plan.changes.append(Delete(extra_a)) + # Process the plan, shouldn't make any changes since the extra bit is + # something we don't own + got = ownership.process_plan(plan) + self.assertTrue(got) + self.assertEquals(len(records) * 2, len(got.changes)) + + # Something extra exists and does have an ownership record so we will + # delete it... + copy = Zone('unit.tests.', []) + for record in records.values(): + if record.name != 'the-a': + copy.add_record(record) + # New ownership, without the `the-a` + ownership_added = ownership.process_source_zone(copy) + self.assertEquals(len(records) * 2 - 2, len(ownership_added.records)) + plan = provider.plan(ownership_added) + # Fake the extra existing by adding the record, its ownership, and the + # two delete changes. + the_a = records['the-a'] + plan.existing.add_record(the_a) + name = '{}.a.the-a'.format(ownership.txt_name) + the_a_ownership = Record.new(zone, name, { + 'ttl': 30, + 'type': 'TXT', + 'value': ownership.txt_value, + }) + plan.existing.add_record(the_a_ownership) + plan.changes.append(Delete(the_a)) + plan.changes.append(Delete(the_a_ownership)) + # Finally process the plan, should be a noop and we should get the same + # plan out, meaning the planned deletes were allowed to happen. + got = ownership.process_plan(plan) + self.assertTrue(got) + self.assertEquals(plan, got) + self.assertEquals(len(plan.changes), len(got.changes)) diff --git a/tests/test_octodns_provider_base.py b/tests/test_octodns_provider_base.py index f33db0f..4dfce48 100644 --- a/tests/test_octodns_provider_base.py +++ b/tests/test_octodns_provider_base.py @@ -9,9 +9,10 @@ from logging import getLogger from six import text_type from unittest import TestCase -from octodns.record import Create, Delete, Record, Update +from octodns.processor.base import BaseProcessor from octodns.provider.base import BaseProvider from octodns.provider.plan import Plan, UnsafePlan +from octodns.record import Create, Delete, Record, Update from octodns.zone import Zone @@ -21,7 +22,7 @@ class HelperProvider(BaseProvider): SUPPORTS = set(('A',)) id = 'test' - def __init__(self, extra_changes, apply_disabled=False, + def __init__(self, extra_changes=[], apply_disabled=False, include_change_callback=None): self.__extra_changes = extra_changes self.apply_disabled = apply_disabled @@ -43,6 +44,29 @@ class HelperProvider(BaseProvider): pass +class TrickyProcessor(BaseProcessor): + + def __init__(self, name, add_during_process_target_zone): + super(TrickyProcessor, self).__init__(name) + self.add_during_process_target_zone = add_during_process_target_zone + self.reset() + + def reset(self): + self.existing = None + self.target = None + + def process_target_zone(self, existing, target): + self.existing = existing + self.target = target + + new = self._clone_zone(existing) + for record in existing.records: + new.add_record(record) + for record in self.add_during_process_target_zone: + new.add_record(record) + return new + + class TestBaseProvider(TestCase): def test_base_provider(self): @@ -138,6 +162,45 @@ class TestBaseProvider(TestCase): self.assertTrue(plan) self.assertEquals(1, len(plan.changes)) + def test_plan_with_processors(self): + zone = Zone('unit.tests.', []) + + record = Record.new(zone, 'a', { + 'ttl': 30, + 'type': 'A', + 'value': '1.2.3.4', + }) + provider = HelperProvider() + # Processor that adds a record to the zone, which planning will then + # delete since it won't know anything about it + tricky = TrickyProcessor('tricky', [record]) + plan = provider.plan(zone, processors=[tricky]) + self.assertTrue(plan) + self.assertEquals(1, len(plan.changes)) + self.assertIsInstance(plan.changes[0], Delete) + # Called processor stored its params + self.assertTrue(tricky.existing) + self.assertEquals(zone.name, tricky.existing.name) + + # Chain of processors happen one after the other + other = Record.new(zone, 'b', { + 'ttl': 30, + 'type': 'A', + 'value': '5.6.7.8', + }) + # Another processor will add its record, thus 2 deletes + another = TrickyProcessor('tricky', [other]) + plan = provider.plan(zone, processors=[tricky, another]) + self.assertTrue(plan) + self.assertEquals(2, len(plan.changes)) + self.assertIsInstance(plan.changes[0], Delete) + self.assertIsInstance(plan.changes[1], Delete) + # 2nd processor stored its params, and we'll see the record the + # first one added + self.assertTrue(another.existing) + self.assertEquals(zone.name, another.existing.name) + self.assertEquals(1, len(another.existing.records)) + def test_apply(self): ignored = Zone('unit.tests.', [])