diff --git a/.changelog/5f0c3606f74d45879b4326b65476c208.md b/.changelog/5f0c3606f74d45879b4326b65476c208.md new file mode 100644 index 0000000..1b697a7 --- /dev/null +++ b/.changelog/5f0c3606f74d45879b4326b65476c208.md @@ -0,0 +1,4 @@ +--- +type: none +--- +update docstring examples \ No newline at end of file diff --git a/docs/conf.py b/docs/conf.py index f20ee6c..3eecc1f 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -1,7 +1,7 @@ import sys from pathlib import Path -sys.path.insert(0, str(Path("..", "src").resolve())) +sys.path.insert(0, str(Path("..").resolve())) from octodns.__init__ import __version__ @@ -81,7 +81,7 @@ exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] ### theme ### -# tml_theme = "alabaster" +# html_theme = "alabaster" html_theme = "furo" html_theme_options = { "source_repository": "https://github.com/octodns/octodns/", diff --git a/octodns/processor/acme.py b/octodns/processor/acme.py index c8c7b1e..5b0f174 100644 --- a/octodns/processor/acme.py +++ b/octodns/processor/acme.py @@ -8,36 +8,34 @@ from .base import BaseProcessor class AcmeManagingProcessor(BaseProcessor): - log = getLogger('AcmeManagingProcessor') + log = getLogger("AcmeManagingProcessor") def __init__(self, name): - ''' - processors: - acme: - class: octodns.processor.acme.AcmeManagingProcessor - - ... - - zones: - something.com.: - ... - processors: - - acme - ... - ''' + """ + .. code-block:: yaml + + processors: + acme: + class: octodns.processor.acme.AcmeManagingProcessor + + zones: + something.com.: + processors: + - acme2 + """ super().__init__(name) self._owned = set() def process_source_zone(self, desired, *args, **kwargs): for record in desired.records: - if record._type == 'TXT' and record.name.startswith( - '_acme-challenge' + if record._type == "TXT" and record.name.startswith( + "_acme-challenge" ): # We have a managed acme challenge record (owned by octoDNS) so # we should mark it as such record = record.copy() - record.values.append('*octoDNS*') + record.values.append("*octoDNS*") record.values.sort() # This assumes we'll see things as sources before targets, # which is the case... @@ -50,12 +48,12 @@ class AcmeManagingProcessor(BaseProcessor): # Uses a startswith rather than == to ignore subdomain challenges, # e.g. _acme-challenge.foo.domain.com when managing domain.com if ( - record._type == 'TXT' - and record.name.startswith('_acme-challenge') - and '*octoDNS*' not in record.values + record._type == "TXT" + and record.name.startswith("_acme-challenge") + and "*octoDNS*" not in record.values and record not in self._owned ): - self.log.info('_process: ignoring %s', record.fqdn) + self.log.info("_process: ignoring %s", record.fqdn) existing.remove_record(record) return existing diff --git a/octodns/processor/base.py b/octodns/processor/base.py index eb584d5..ca30417 100644 --- a/octodns/processor/base.py +++ b/octodns/processor/base.py @@ -13,7 +13,7 @@ class BaseProcessor(object): self.id = self.name = name def process_source_zone(self, desired, sources): - ''' + """ Called after all sources have completed populate. Provides an opportunity for the processor to modify the desired `Zone` that targets will receive. @@ -28,11 +28,11 @@ class BaseProcessor(object): be used with `replace=True`. - May call `Zone.remove_record` to remove records from `desired`. - Sources may be empty, as will be the case for aliased zones. - ''' + """ return desired def process_target_zone(self, existing, target): - ''' + """ Called after a target has completed `populate`, before changes are computed between `existing` and `desired`. This provides an opportunity to modify the `existing` `Zone`. @@ -45,11 +45,11 @@ class BaseProcessor(object): the results of which can be modified, and then `Zone.add_record` may be used with `replace=True`. - May call `Zone.remove_record` to remove records from `existing`. - ''' + """ return existing def process_source_and_target_zones(self, desired, existing, target): - ''' + """ Called just prior to computing changes for `target` between `desired` and `existing`. Provides an opportunity for the processor to modify either the desired or existing `Zone`s that will be used to compute the @@ -72,11 +72,11 @@ class BaseProcessor(object): be used with `replace=True`. - May call `Zone.remove_record` to remove records from `desired`. - May call `Zone.remove_record` to remove records from `existing`. - ''' + """ return desired, existing def process_plan(self, plan, sources, target): - ''' + """ Called after the planning phase has completed. Provides an opportunity for the processors to modify the plan thus changing the actions that will be displayed and potentially applied. @@ -90,7 +90,7 @@ class BaseProcessor(object): `plan.delete_pcent_threshold` when creating a new `Plan`. - Must return a `Plan` which may be `plan` or can be a newly created one `plan.desired` and `plan.existing` copied over as-is or modified. - ''' + """ # plan may be None if no changes were detected up until now, the # process may still create a plan. # sources may be empty, as will be the case for aliased zones diff --git a/octodns/processor/filter.py b/octodns/processor/filter.py index 638823d..72cf432 100644 --- a/octodns/processor/filter.py +++ b/octodns/processor/filter.py @@ -57,59 +57,63 @@ class _TypeBaseFilter(_FilterProcessor): class TypeAllowlistFilter(_TypeBaseFilter, AllowsMixin): - '''Only manage records of the specified type(s). + """Only manage records of the specified type(s). Example usage: - processors: - only-a-and-aaaa: - class: octodns.processor.filter.TypeAllowlistFilter - allowlist: - - A - - AAAA - # Optional param that can be set to False to leave the target zone - # alone, thus allowing deletion of existing records - # (default: true) - # include_target: True - - zones: - exxampled.com.: - sources: - - config + .. code-block:: yaml + processors: - - only-a-and-aaaa - targets: - - ns1 - ''' + only-a-and-aaaa: + class: octodns.processor.filter.TypeAllowlistFilter + allowlist: + - A + - AAAA + # Optional param that can be set to False to leave the target zone + # alone, thus allowing deletion of existing records + # (default: true) + # include_target: True + + zones: + exxampled.com.: + sources: + - config + processors: + - only-a-and-aaaa + targets: + - ns1 + """ def __init__(self, name, allowlist, **kwargs): super().__init__(name, allowlist, **kwargs) class TypeRejectlistFilter(_TypeBaseFilter, RejectsMixin): - '''Ignore records of the specified type(s). + """Ignore records of the specified type(s). Example usage: - processors: - ignore-cnames: - class: octodns.processor.filter.TypeRejectlistFilter - rejectlist: - - CNAME - # Optional param that can be set to False to leave the target zone - # alone, thus allowing deletion of existing records - # (default: true) - # include_target: True - - zones: - exxampled.com.: - sources: - - config + .. code-block:: yaml + processors: - - ignore-cnames - targets: - - route53 - ''' + ignore-cnames: + class: octodns.processor.filter.TypeRejectlistFilter + rejectlist: + - CNAME + # Optional param that can be set to False to leave the target zone + # alone, thus allowing deletion of existing records + # (default: true) + # include_target: True + + zones: + exxampled.com.: + sources: + - config + processors: + - ignore-cnames + targets: + - route53 + """ def __init__(self, name, rejectlist, **kwargs): super().__init__(name, rejectlist, **kwargs) @@ -121,7 +125,7 @@ class _NameBaseFilter(_FilterProcessor): exact = set() regex = [] for pattern in _list: - if pattern.startswith('/'): + if pattern.startswith("/"): regex.append(re_compile(pattern[1:-1])) else: exact.add(pattern) @@ -144,72 +148,76 @@ class _NameBaseFilter(_FilterProcessor): class NameAllowlistFilter(_NameBaseFilter, AllowsMixin): - '''Only manage records with names that match the provider patterns + """Only manage records with names that match the provider patterns Example usage: - processors: - only-these: - class: octodns.processor.filter.NameAllowlistFilter - allowlist: - # exact string match - - www - # contains/substring match - - /substring/ - # regex pattern match - - /some-pattern-\\d\\+/ - # regex - anchored so has to match start to end - - /^start-.+-end$/ - # Optional param that can be set to False to leave the target zone - # alone, thus allowing deletion of existing records - # (default: true) - # include_target: True - - zones: - exxampled.com.: - sources: - - config + .. code-block:: yaml + processors: - - only-these - targets: - - route53 - ''' + only-these: + class: octodns.processor.filter.NameAllowlistFilter + allowlist: + # exact string match + - www + # contains/substring match + - /substring/ + # regex pattern match + - /some-pattern-\\d\\+/ + # regex - anchored so has to match start to end + - /^start-.+-end$/ + # Optional param that can be set to False to leave the target zone + # alone, thus allowing deletion of existing records + # (default: true) + # include_target: True + + zones: + exxampled.com.: + sources: + - config + processors: + - only-these + targets: + - route53 + """ def __init__(self, name, allowlist): super().__init__(name, allowlist) class NameRejectlistFilter(_NameBaseFilter, RejectsMixin): - '''Reject managing records with names that match the provider patterns + """Reject managing records with names that match the provider patterns Example usage: - processors: - not-these: - class: octodns.processor.filter.NameRejectlistFilter - rejectlist: - # exact string match - - www - # contains/substring match - - /substring/ - # regex pattern match - - /some-pattern-\\d\\+/ - # regex - anchored so has to match start to end - - /^start-.+-end$/ - # Optional param that can be set to False to leave the target zone - # alone, thus allowing deletion of existing records - # (default: true) - # include_target: True - - zones: - exxampled.com.: - sources: - - config + .. code-block:: yaml + processors: - - not-these - targets: - - route53 - ''' + not-these: + class: octodns.processor.filter.NameRejectlistFilter + rejectlist: + # exact string match + - www + # contains/substring match + - /substring/ + # regex pattern match + - /some-pattern-\\d\\+/ + # regex - anchored so has to match start to end + - /^start-.+-end$/ + # Optional param that can be set to False to leave the target zone + # alone, thus allowing deletion of existing records + # (default: true) + # include_target: True + + zones: + exxampled.com.: + sources: + - config + processors: + - not-these + targets: + - route53 + """ def __init__(self, name, rejectlist): super().__init__(name, rejectlist) @@ -221,7 +229,7 @@ class _ValueBaseFilter(_FilterProcessor): exact = set() regex = [] for pattern in _list: - if pattern.startswith('/'): + if pattern.startswith("/"): regex.append(re_compile(pattern[1:-1])) else: exact.add(pattern) @@ -231,13 +239,13 @@ class _ValueBaseFilter(_FilterProcessor): def _process(self, zone, *args, **kwargs): for record in zone.records: values = [] - if hasattr(record, 'values'): + if hasattr(record, "values"): values = [value.rdata_text for value in record.values] elif record.value is not None: values = [record.value.rdata_text] else: self.log.warning( - 'value for %s is NoneType, ignoring', record.fqdn + "value for %s is NoneType, ignoring", record.fqdn ) if any(value in self.exact for value in values): @@ -253,76 +261,80 @@ class _ValueBaseFilter(_FilterProcessor): class ValueAllowlistFilter(_ValueBaseFilter, AllowsMixin): - '''Only manage records with values that match the provider patterns + """Only manage records with values that match the provider patterns Example usage: - processors: - only-these: - class: octodns.processor.filter.ValueAllowlistFilter - allowlist: - # exact string match - - www - # contains/substring match - - /substring/ - # regex pattern match - - /some-pattern-\\d\\+/ - # regex - anchored so has to match start to end - - /^start-.+-end$/ - # Optional param that can be set to False to leave the target zone - # alone, thus allowing deletion of existing records - # (default: true) - # include_target: True - - zones: - exxampled.com.: - sources: - - config + .. code-block:: yaml + processors: - - only-these - targets: - - route53 - ''' + only-these: + class: octodns.processor.filter.ValueAllowlistFilter + allowlist: + # exact string match + - www + # contains/substring match + - /substring/ + # regex pattern match + - /some-pattern-\\d\\+/ + # regex - anchored so has to match start to end + - /^start-.+-end$/ + # Optional param that can be set to False to leave the target zone + # alone, thus allowing deletion of existing records + # (default: true) + # include_target: True + + zones: + exxampled.com.: + sources: + - config + processors: + - only-these + targets: + - route53 + """ def __init__(self, name, allowlist): - self.log = getLogger(f'ValueAllowlistFilter[{name}]') + self.log = getLogger(f"ValueAllowlistFilter[{name}]") super().__init__(name, allowlist) class ValueRejectlistFilter(_ValueBaseFilter, RejectsMixin): - '''Reject managing records with names that match the provider patterns + """Reject managing records with names that match the provider patterns Example usage: - processors: - not-these: - class: octodns.processor.filter.ValueRejectlistFilter - rejectlist: - # exact string match - - www - # contains/substring match - - /substring/ - # regex pattern match - - /some-pattern-\\d\\+/ - # regex - anchored so has to match start to end - - /^start-.+-end$/ - # Optional param that can be set to False to leave the target zone - # alone, thus allowing deletion of existing records - # (default: true) - # include_target: True - - zones: - exxampled.com.: - sources: - - config + .. code-block:: yaml + processors: - - not-these - targets: - - route53 - ''' + not-these: + class: octodns.processor.filter.ValueRejectlistFilter + rejectlist: + # exact string match + - www + # contains/substring match + - /substring/ + # regex pattern match + - /some-pattern-\\d\\+/ + # regex - anchored so has to match start to end + - /^start-.+-end$/ + # Optional param that can be set to False to leave the target zone + # alone, thus allowing deletion of existing records + # (default: true) + # include_target: True + + zones: + exxampled.com.: + sources: + - config + processors: + - not-these + targets: + - route53 + """ def __init__(self, name, rejectlist): - self.log = getLogger(f'ValueRejectlistFilter[{name}]') + self.log = getLogger(f"ValueRejectlistFilter[{name}]") super().__init__(name, rejectlist) @@ -334,11 +346,11 @@ class _NetworkValueBaseFilter(BaseProcessor): try: self.networks.append(ip_network(value)) except ValueError: - raise ValueError(f'{value} is not a valid CIDR to use') + raise ValueError(f"{value} is not a valid CIDR to use") def _process(self, zone, *args, **kwargs): for record in zone.records: - if record._type not in ['A', 'AAAA']: + if record._type not in ["A", "AAAA"]: continue ips = [ip_address(value) for value in record.values] @@ -356,83 +368,89 @@ class _NetworkValueBaseFilter(BaseProcessor): class NetworkValueAllowlistFilter(_NetworkValueBaseFilter, AllowsMixin): - '''Only manage A and AAAA records with values that match the provider patterns + """Only manage A and AAAA records with values that match the provider patterns All other types will be left as-is. Example usage: - processors: - only-these: - class: octodns.processor.filter.NetworkValueAllowlistFilter - allowlist: - - 127.0.0.1/32 - - 192.168.0.0/16 - - fd00::/8 - - zones: - exxampled.com.: - sources: - - config + .. code-block:: yaml + processors: - - only-these - targets: - - route53 - ''' + only-these: + class: octodns.processor.filter.NetworkValueAllowlistFilter + allowlist: + - 127.0.0.1/32 + - 192.168.0.0/16 + - fd00::/8 + + zones: + exxampled.com.: + sources: + - config + processors: + - only-these + targets: + - route53 + """ def __init__(self, name, allowlist): super().__init__(name, allowlist) class NetworkValueRejectlistFilter(_NetworkValueBaseFilter, RejectsMixin): - '''Reject managing A and AAAA records with value matching a that match the provider patterns + """Reject managing A and AAAA records with value matching a that match the provider patterns All other types will be left as-is. Example usage: - processors: - not-these: - class: octodns.processor.filter.NetworkValueRejectlistFilter - rejectlist: - - 127.0.0.1/32 - - 192.168.0.0/16 - - fd00::/8 - - zones: - exxampled.com.: - sources: - - config + .. code-block:: yaml + processors: - - not-these - targets: - - route53 - ''' + not-these: + class: octodns.processor.filter.NetworkValueRejectlistFilter + rejectlist: + - 127.0.0.1/32 + - 192.168.0.0/16 + - fd00::/8 + + zones: + exxampled.com.: + sources: + - config + processors: + - not-these + targets: + - route53 + """ def __init__(self, name, rejectlist): super().__init__(name, rejectlist) class IgnoreRootNsFilter(BaseProcessor): - '''Do not manage Root NS Records. + """Do not manage Root NS Records. Example usage: - processors: - no-root-ns: - class: octodns.processor.filter.IgnoreRootNsFilter + .. code-block:: yaml - zones: - exxampled.com.: - sources: - - config processors: - - no-root-ns - targets: - - ns1 - ''' + no-root-ns: + class: octodns.processor.filter.IgnoreRootNsFilter + + zones: + exxampled.com.: + sources: + - config + processors: + - no-root-ns + targets: + - ns1 + """ def _process(self, zone, *args, **kwargs): for record in zone.records: - if record._type == 'NS' and not record.name: + if record._type == "NS" and not record.name: zone.remove_record(record) return zone @@ -442,31 +460,33 @@ class IgnoreRootNsFilter(BaseProcessor): class ExcludeRootNsChanges(BaseProcessor): - '''Do not allow root NS record changes + """Do not allow root NS record changes Example usage: - processors: - exclude-root-ns-changes: - class: octodns.processor.filter.ExcludeRootNsChanges - # If true an a change for a root NS is seen an error will be thrown. If - # false a warning will be printed and the change will be removed from - # the plan. - # (default: true) - error: true - - zones: - exxampled.com.: - sources: - - config + .. code-block:: yaml + processors: - - exclude-root-ns-changes - targets: - - ns1 - ''' + exclude-root-ns-changes: + class: octodns.processor.filter.ExcludeRootNsChanges + # If true an a change for a root NS is seen an error will be thrown. If + # false a warning will be printed and the change will be removed from + # the plan. + # (default: true) + error: true + + zones: + exxampled.com.: + sources: + - config + processors: + - exclude-root-ns-changes + targets: + - ns1 + """ def __init__(self, name, error=True): - self.log = getLogger(f'ExcludeRootNsChanges[{name}]') + self.log = getLogger(f"ExcludeRootNsChanges[{name}]") super().__init__(name) self.error = error @@ -474,14 +494,14 @@ class ExcludeRootNsChanges(BaseProcessor): if plan: for change in list(plan.changes): record = change.record - if record._type == 'NS' and record.name == '': + if record._type == "NS" and record.name == "": self.log.warning( - 'root NS changes are disallowed, fqdn=%s', record.fqdn + "root NS changes are disallowed, fqdn=%s", record.fqdn ) if self.error: raise ValidationError( record.fqdn, - ['root NS changes are disallowed'], + ["root NS changes are disallowed"], record.context, ) plan.changes.remove(change) @@ -490,30 +510,32 @@ class ExcludeRootNsChanges(BaseProcessor): class ZoneNameFilter(_FilterProcessor): - '''Filter or error on record names that contain the zone name + """Filter or error on record names that contain the zone name Example usage: - processors: - zone-name: - class: octodns.processor.filter.ZoneNameFilter - # If true a ValidationError will be throw when such records are - # encouterd, if false the records will just be ignored/omitted. - # (default: true) - # Optional param that can be set to False to leave the target zone - # alone, thus allowing deletion of existing records - # (default: true) - # include_target: True - - zones: - exxampled.com.: - sources: - - config + .. code-block:: yaml + processors: - - zone-name - targets: - - azure - ''' + zone-name: + class: octodns.processor.filter.ZoneNameFilter + # If true a ValidationError will be throw when such records are + # encouterd, if false the records will just be ignored/omitted. + # (default: true) + # Optional param that can be set to False to leave the target zone + # alone, thus allowing deletion of existing records + # (default: true) + # include_target: True + + zones: + exxampled.com.: + sources: + - config + processors: + - zone-name + targets: + - azure + """ def __init__(self, name, error=True, **kwargs): super().__init__(name, **kwargs) @@ -530,7 +552,7 @@ class ZoneNameFilter(_FilterProcessor): if self.error: raise ValidationError( record.fqdn, - ['record name ends with zone name'], + ["record name ends with zone name"], record.context, ) else: diff --git a/octodns/processor/meta.py b/octodns/processor/meta.py index ba579d3..4a5f4ad 100644 --- a/octodns/processor/meta.py +++ b/octodns/processor/meta.py @@ -20,11 +20,11 @@ except ImportError: # pragma: no cover def _keys(values): - return set(v.split('=', 1)[0] for v in values) + return set(v.split("=", 1)[0] for v in values) class MetaProcessor(BaseProcessor): - ''' + """ Add a special metadata record with timestamps, UUIDs, versions, and/or provider name. Will only be updated when there are other changes being made. A useful tool to aid in debugging and monitoring of DNS infrastructure. @@ -39,32 +39,34 @@ class MetaProcessor(BaseProcessor): settings. Values are in the form `key=`, e.g. `time=2023-09-10T05:49:04.246953` - processors: - meta: - class: octodns.processor.meta.MetaProcessor - # The name to use for the meta record. - # (default: meta) - record_name: meta - # Include a timestamp with a UTC value indicating the timeframe when the - # last change was made. - # (default: true) - include_time: true - # Include a UUID that can be utilized to uniquely identify the run - # pushing data - # (default: false) - include_uuid: false - # Include the provider id for the target where data is being pushed - # (default: false) - include_provider: false - # Include the octoDNS version being used - # (default: false) - include_version: false - # Extra values to set on the records - # (default: None) - #include_extra: - # key: val - # foo: env/BAR - ''' + .. code-block:: yaml + + processors: + meta: + class: octodns.processor.meta.MetaProcessor + # The name to use for the meta record. + # (default: meta) + record_name: meta + # Include a timestamp with a UTC value indicating the timeframe when the + # last change was made. + # (default: true) + include_time: true + # Include a UUID that can be utilized to uniquely identify the run + # pushing data + # (default: false) + include_uuid: false + # Include the provider id for the target where data is being pushed + # (default: false) + include_provider: false + # Include the octoDNS version being used + # (default: false) + include_version: false + # Extra values to set on the records + # (default: None) + #include_extra: + # key: val + # foo: env/BAR + """ @classmethod def get_time(cls): @@ -77,7 +79,7 @@ class MetaProcessor(BaseProcessor): def __init__( self, id, - record_name='meta', + record_name="meta", include_time=True, include_uuid=False, include_version=False, @@ -85,10 +87,10 @@ class MetaProcessor(BaseProcessor): include_extra=None, ttl=60, ): - self.log = getLogger(f'MetaSource[{id}]') + self.log = getLogger(f"MetaSource[{id}]") super().__init__(id) self.log.info( - '__init__: record_name=%s, include_time=%s, include_uuid=%s, include_version=%s, include_provider=%s, include_extra=%s, ttl=%d', + "__init__: record_name=%s, include_time=%s, include_uuid=%s, include_version=%s, include_provider=%s, include_extra=%s, ttl=%d", record_name, include_time, include_uuid, @@ -103,7 +105,7 @@ class MetaProcessor(BaseProcessor): self.include_version = include_version self.include_provider = include_provider self.include_extra = ( - [f'{key}={val}' for key, val in include_extra.items()] + [f"{key}={val}" for key, val in include_extra.items()] if include_extra is not None else [] ) @@ -112,13 +114,13 @@ class MetaProcessor(BaseProcessor): def values(self, target_id): ret = [] if self.include_version: - ret.append(f'octodns-version={__version__}') + ret.append(f"octodns-version={__version__}") if self.include_provider: - ret.append(f'provider={target_id}') + ret.append(f"provider={target_id}") if self.time: - ret.append(f'time={self.time}') + ret.append(f"time={self.time}") if self.uuid: - ret.append(f'uuid={self.uuid}') + ret.append(f"uuid={self.uuid}") # these were previously converted into key=value or will otherwise be [] ret.extend(self.include_extra) return ret @@ -127,7 +129,7 @@ class MetaProcessor(BaseProcessor): meta = Record.new( desired, self.record_name, - {'ttl': self.ttl, 'type': 'TXT', 'values': self.values(target.id)}, + {"ttl": self.ttl, "type": "TXT", "values": self.values(target.id)}, # we may be passing in empty values here to be filled out later in # process_source_and_target_zones lenient=True, @@ -139,9 +141,9 @@ class MetaProcessor(BaseProcessor): # always something so we can see if its type and name record = change.record # existing state, if there is one - existing = getattr(change, 'existing', None) + existing = getattr(change, "existing", None) return ( - record._type == 'TXT' + record._type == "TXT" and record.name == self.record_name and existing is not None # don't care about the values here, just the fields/keys diff --git a/octodns/processor/restrict.py b/octodns/processor/restrict.py index c11a21b..308675e 100644 --- a/octodns/processor/restrict.py +++ b/octodns/processor/restrict.py @@ -10,7 +10,7 @@ class RestrictionException(ProcessorException): class TtlRestrictionFilter(BaseProcessor): - ''' + """ Ensure that configured TTLs are between a configured minimum and maximum or in an allowed set of values. @@ -20,34 +20,38 @@ class TtlRestrictionFilter(BaseProcessor): Example usage: - processors: - min-max-ttl: - class: octodns.processor.restrict.TtlRestrictionFilter - min_ttl: 60 - max_ttl: 3600 - # allowed_ttls: [300, 900, 3600] - - zones: - exxampled.com.: - sources: - - config + .. code-block:: yaml + processors: - - min-max-ttl - targets: - - azure + min-max-ttl: + class: octodns.processor.restrict.TtlRestrictionFilter + min_ttl: 60 + max_ttl: 3600 + # allowed_ttls: [300, 900, 3600] + + zones: + exxampled.com.: + sources: + - config + processors: + - min-max-ttl + targets: + - azure The restriction can be skipped for specific records by setting the lenient flag, e.g. - a: - octodns: - lenient: true - ttl: 0 - value: 1.2.3.4 + .. code-block:: yaml + + a: + octodns: + lenient: true + ttl: 0 + value: 1.2.3.4 The higher level lenient flags are not checked as it would make more sense to just avoid enabling the processor in those cases. - ''' + """ SEVEN_DAYS = 60 * 60 * 24 * 7 @@ -63,14 +67,14 @@ class TtlRestrictionFilter(BaseProcessor): continue if self.allowed_ttls and record.ttl not in self.allowed_ttls: raise RestrictionException( - f'{record.fqdn} ttl={record.ttl} not an allowed value, allowed_ttls={self.allowed_ttls}' + f"{record.fqdn} ttl={record.ttl} not an allowed value, allowed_ttls={self.allowed_ttls}" ) elif record.ttl < self.min_ttl: raise RestrictionException( - f'{record.fqdn} ttl={record.ttl} too low, min_ttl={self.min_ttl}' + f"{record.fqdn} ttl={record.ttl} too low, min_ttl={self.min_ttl}" ) elif record.ttl > self.max_ttl: raise RestrictionException( - f'{record.fqdn} ttl={record.ttl} too high, max_ttl={self.max_ttl}' + f"{record.fqdn} ttl={record.ttl} too high, max_ttl={self.max_ttl}" ) return zone diff --git a/octodns/processor/spf.py b/octodns/processor/spf.py index f74fc44..02a29f8 100644 --- a/octodns/processor/spf.py +++ b/octodns/processor/spf.py @@ -22,36 +22,38 @@ class SpfDnsLookupException(ProcessorException): class SpfDnsLookupProcessor(BaseProcessor): - ''' + """ Validate that SPF values in TXT records are valid. Example usage: - processors: - spf: - class: octodns.processor.spf.SpfDnsLookupProcessor + .. code-block:: yaml - zones: - example.com.: - sources: - - config processors: - - spf - targets: - - route53 - - The validation can be skipped for specific records by setting the lenient - flag, e.g. - - _spf: - octodns: - lenient: true - ttl: 86400 - type: TXT - value: v=spf1 ptr ~all - ''' - - log = getLogger('SpfDnsLookupProcessor') + spf: + class: octodns.processor.spf.SpfDnsLookupProcessor + + zones: + example.com.: + sources: + - config + processors: + - spf + targets: + - route53 + + The validation can be skipped for specific records by setting the lenient + flag, e.g. + + _spf: + octodns: + lenient: true + ttl: 86400 + type: TXT + value: v=spf1 ptr ~all + """ + + log = getLogger("SpfDnsLookupProcessor") def __init__(self, name): self.log.debug(f"SpfDnsLookupProcessor: {name}") @@ -65,7 +67,7 @@ class SpfDnsLookupProcessor(BaseProcessor): ) # SPF values to validate will begin with 'v=spf1 ' - spf = [value for value in values if value.startswith('v=spf1 ')] + spf = [value for value in values if value.startswith("v=spf1 ")] # No SPF values in the TXT record if len(spf) == 0: @@ -84,7 +86,7 @@ class SpfDnsLookupProcessor(BaseProcessor): for value in answer: text_value = value.to_text() - processed_value = text_value[1:-1].replace('" "', '') + processed_value = text_value[1:-1].replace('" "', "") values.append(processed_value) return values @@ -101,7 +103,7 @@ class SpfDnsLookupProcessor(BaseProcessor): if spf is None: return lookups - terms = spf[len('v=spf1 ') :].split(' ') + terms = spf[len("v=spf1 ") :].split(" ") for term in terms: if lookups > 10: @@ -109,19 +111,19 @@ class SpfDnsLookupProcessor(BaseProcessor): f"{record.fqdn} exceeds the 10 DNS lookup limit in the SPF record" ) - if term.startswith('ptr'): + if term.startswith("ptr"): raise SpfValueException( f"{record.fqdn} uses the deprecated ptr mechanism" ) # These mechanisms cost one DNS lookup each - if term.startswith(('a', 'mx', 'exists:', 'redirect', 'include:')): + if term.startswith(("a", "mx", "exists:", "redirect", "include:")): lookups += 1 # The include mechanism can result in further lookups after resolving the DNS record - if term.startswith('include:'): - domain = term[len('include:') :] - answer = dns.resolver.resolve(domain, 'TXT') + if term.startswith("include:"): + domain = term[len("include:") :] + answer = dns.resolver.resolve(domain, "TXT") answer_values = self._process_answer(answer) lookups = self._check_dns_lookups( record, answer_values, lookups @@ -131,7 +133,7 @@ class SpfDnsLookupProcessor(BaseProcessor): def process_source_zone(self, zone, *args, **kwargs): for record in zone.records: - if record._type != 'TXT': + if record._type != "TXT": continue if record.lenient: diff --git a/octodns/provider/yaml.py b/octodns/provider/yaml.py index 1a2a512..7054226 100644 --- a/octodns/provider/yaml.py +++ b/octodns/provider/yaml.py @@ -15,69 +15,70 @@ from .base import BaseProvider class YamlProvider(BaseProvider): - ''' + """ Core provider for records configured in yaml files on disk. - config: - class: octodns.provider.yaml.YamlProvider - - # The location of yaml config files. By default records are defined in a - # file named for the zone in this directory, the zone file, e.g. - # something.com.yaml. - # (required) - directory: ./config - - # The ttl to use for records when not specified in the data - # (optional, default 3600) - default_ttl: 3600 - - # Whether or not to enforce sorting order when loading yaml - # (optional, default True) - enforce_order: true - # What sort mode to employ when enforcing order - # - simple: `sort` - # - natural: https://pypi.org/project/natsort/ - # (optional, default natural) - order_mode: natural - - # Whether duplicate records should replace rather than error - # (optional, default False) - populate_should_replace: false - - # The file extension used when loading split style zones, Null means - # disabled. When enabled the provider will search for zone records split - # across multiple YAML files in the directory with split_extension - # appended to the zone name, See "Split Details" below. - # split_extension should include the "." - # (optional, default null, "." is the recommended best practice when - # enabling) - split_extension: null - - # When writing YAML records out to disk with split_extension enabled - # each record is written out into its own file with .yaml appended to - # the name of the record. The two exceptions are for the root and - # wildcard nodes. These records are written into a file named - # `$[zone.name].yaml`. If you would prefer this catchall file not be - # used `split_catchall` can be set to False to instead write those - # records out to `.yaml` and `*.yaml` respectively. Note that some - # operating systems may not allow files with those names. - # (optional, default True) - split_catchall: true - - # Optional filename with record data to be included in all zones - # populated by this provider. Has no effect when used as a target. - # (optional, default null) - shared_filename: null - - # Disable loading of the zone .yaml files. - # (optional, default False) - disable_zonefile: false - - Note - ---- - - When using this provider as a target any existing comments or formatting - in the zone files will be lost when changes are applyed. + .. code-block:: yaml + + config: + class: octodns.provider.yaml.YamlProvider + + # The location of yaml config files. By default records are defined in a + # file named for the zone in this directory, the zone file, e.g. + # something.com.yaml. + # (required) + directory: ./config + + # The ttl to use for records when not specified in the data + # (optional, default 3600) + default_ttl: 3600 + + # Whether or not to enforce sorting order when loading yaml + # (optional, default True) + enforce_order: true + # What sort mode to employ when enforcing order + # - simple: `sort` + # - natural: https://pypi.org/project/natsort/ + # (optional, default natural) + order_mode: natural + + # Whether duplicate records should replace rather than error + # (optional, default False) + populate_should_replace: false + + # The file extension used when loading split style zones, Null means + # disabled. When enabled the provider will search for zone records split + # across multiple YAML files in the directory with split_extension + # appended to the zone name, See "Split Details" below. + # split_extension should include the "." + # (optional, default null, "." is the recommended best practice when + # enabling) + split_extension: null + + # When writing YAML records out to disk with split_extension enabled + # each record is written out into its own file with .yaml appended to + # the name of the record. The two exceptions are for the root and + # wildcard nodes. These records are written into a file named + # `$[zone.name].yaml`. If you would prefer this catchall file not be + # used `split_catchall` can be set to False to instead write those + # records out to `.yaml` and `*.yaml` respectively. Note that some + # operating systems may not allow files with those names. + # (optional, default True) + split_catchall: true + + # Optional filename with record data to be included in all zones + # populated by this provider. Has no effect when used as a target. + # (optional, default null) + shared_filename: null + + # Disable loading of the zone .yaml files. + # (optional, default False) + disable_zonefile: false + + .. warning:: + + When using this provider as a target any existing comments or formatting + in the zone files will be lost when changes are applyed. Split Details ------------- @@ -91,11 +92,12 @@ class YamlProvider(BaseProvider): With `split_extension: .` the directory structure for the zone github.com. managed under directory "zones/" would look like: - zones/ - github.com./ - $github.com.yaml - www.yaml - ... + .. code-block:: yaml + zones/ + github.com./ + $github.com.yaml + www.yaml + ... Overriding Values ----------------- @@ -106,68 +108,70 @@ class YamlProvider(BaseProvider): to external DNS providers and internally, but you want to modify some of the records in the internal version. - config/octodns.com.yaml - --- - other: - type: A - values: - - 192.30.252.115 - - 192.30.252.116 - www: - type: A - values: - - 192.30.252.113 - - 192.30.252.114 - - - internal/octodns.com.yaml - --- - 'www': - type: A - values: - - 10.0.0.12 - - 10.0.0.13 - - external.yaml - --- - providers: - config: - class: octodns.provider.yaml.YamlProvider - directory: ./config - - zones: - - octodns.com.: - sources: - - config - targets: - - route53 - - internal.yaml - --- - providers: - config: - class: octodns.provider.yaml.YamlProvider - directory: ./config - - internal: - class: octodns.provider.yaml.YamlProvider - directory: ./internal - populate_should_replace: true - - zones: - - octodns.com.: - sources: - - config - - internal - targets: - - pdns + `config/octodns.com.yaml` + .. code-block:: yaml + --- + other: + type: A + values: + - 192.30.252.115 + - 192.30.252.116 + www: + type: A + values: + - 192.30.252.113 + - 192.30.252.114 + + + `internal/octodns.com.yaml` + .. code-block:: yaml + --- + 'www': + type: A + values: + - 10.0.0.12 + - 10.0.0.13 + + external.yaml + --- + providers: + config: + class: octodns.provider.yaml.YamlProvider + directory: ./config + + zones: + + octodns.com.: + sources: + - config + targets: + - route53 + + internal.yaml + --- + providers: + config: + class: octodns.provider.yaml.YamlProvider + directory: ./config + + internal: + class: octodns.provider.yaml.YamlProvider + directory: ./internal + populate_should_replace: true + + zones: + + octodns.com.: + sources: + - config + - internal + targets: + - pdns You can then sync our records eternally with `--config-file=external.yaml` and internally (with the custom overrides) with `--config-file=internal.yaml` - ''' + """ SUPPORTS_GEO = True SUPPORTS_DYNAMIC = True @@ -177,7 +181,7 @@ class YamlProvider(BaseProvider): # Any record name added to this set will be included in the catch-all file, # instead of a file matching the record name. - CATCHALL_RECORD_NAMES = ('*', '') + CATCHALL_RECORD_NAMES = ("*", "") def __init__( self, @@ -185,7 +189,7 @@ class YamlProvider(BaseProvider): directory, default_ttl=3600, enforce_order=True, - order_mode='natural', + order_mode="natural", populate_should_replace=False, supports_root_ns=True, split_extension=False, @@ -196,9 +200,9 @@ class YamlProvider(BaseProvider): **kwargs, ): klass = self.__class__.__name__ - self.log = logging.getLogger(f'{klass}[{id}]') + self.log = logging.getLogger(f"{klass}[{id}]") self.log.debug( - '__init__: id=%s, directory=%s, default_ttl=%d, enforce_order=%d, order_mode=%s, populate_should_replace=%s, supports_root_ns=%s, split_extension=%s, split_catchall=%s, shared_filename=%s, disable_zonefile=%s', + "__init__: id=%s, directory=%s, default_ttl=%d, enforce_order=%d, order_mode=%s, populate_should_replace=%s, supports_root_ns=%s, split_extension=%s, split_catchall=%s, shared_filename=%s, disable_zonefile=%s", id, directory, default_ttl, @@ -225,8 +229,8 @@ class YamlProvider(BaseProvider): def copy(self): kwargs = dict(self.__dict__) - kwargs['id'] = f'{kwargs["id"]}-copy' - del kwargs['log'] + kwargs["id"] = f"{kwargs['id']}-copy" + del kwargs["log"] return YamlProvider(**kwargs) @property @@ -250,7 +254,7 @@ class YamlProvider(BaseProvider): return self.supports_root_ns def list_zones(self): - self.log.debug('list_zones:') + self.log.debug("list_zones:") zones = set() extension = self.split_extension @@ -258,7 +262,7 @@ class YamlProvider(BaseProvider): # we want to leave the . trim = len(extension) - 1 self.log.debug( - 'list_zones: looking for split zones, trim=%d', trim + "list_zones: looking for split zones, trim=%d", trim ) for dirname in listdir(self.directory): not_ends_with = not dirname.endswith(extension) @@ -270,10 +274,10 @@ class YamlProvider(BaseProvider): zones.add(dirname) if not self.disable_zonefile: - self.log.debug('list_zones: looking for zone files') + self.log.debug("list_zones: looking for zone files") for filename in listdir(self.directory): - not_ends_with = not filename.endswith('.yaml') - too_few_dots = filename.count('.') < 2 + not_ends_with = not filename.endswith(".yaml") + too_few_dots = filename.count(".") < 2 not_file = not isfile(join(self.directory, filename)) if not_file or not_ends_with or too_few_dots: continue @@ -284,8 +288,8 @@ class YamlProvider(BaseProvider): def _split_sources(self, zone): ext = self.split_extension - utf8 = join(self.directory, f'{zone.decoded_name[:-1]}{ext}') - idna = join(self.directory, f'{zone.name[:-1]}{ext}') + utf8 = join(self.directory, f"{zone.decoded_name[:-1]}{ext}") + idna = join(self.directory, f"{zone.name[:-1]}{ext}") directory = None if isdir(utf8): if utf8 != idna and isdir(idna): @@ -299,12 +303,12 @@ class YamlProvider(BaseProvider): return [] for filename in listdir(directory): - if filename.endswith('.yaml'): + if filename.endswith(".yaml"): yield join(directory, filename) def _zone_sources(self, zone): - utf8 = join(self.directory, f'{zone.decoded_name}yaml') - idna = join(self.directory, f'{zone.name}yaml') + utf8 = join(self.directory, f"{zone.decoded_name}yaml") + idna = join(self.directory, f"{zone.name}yaml") if isfile(utf8): if utf8 != idna and isfile(idna): raise ProviderException( @@ -317,7 +321,7 @@ class YamlProvider(BaseProvider): return None def _populate_from_file(self, filename, zone, lenient): - with open(filename, 'r') as fh: + with open(filename, "r") as fh: yaml_data = safe_load( fh, enforce_order=self.enforce_order, order_mode=self.order_mode ) @@ -326,8 +330,8 @@ class YamlProvider(BaseProvider): if not isinstance(data, list): data = [data] for d in data: - if 'ttl' not in d: - d['ttl'] = self.default_ttl + if "ttl" not in d: + d["ttl"] = self.default_ttl record = Record.new( zone, name, d, source=self, lenient=lenient ) @@ -342,7 +346,7 @@ class YamlProvider(BaseProvider): def populate(self, zone, target=False, lenient=False): self.log.debug( - 'populate: name=%s, target=%s, lenient=%s', + "populate: name=%s, target=%s, lenient=%s", zone.decoded_name, target, lenient, @@ -365,7 +369,7 @@ class YamlProvider(BaseProvider): sources.append(join(self.directory, self.shared_filename)) if not sources and not target: - raise ProviderException(f'no YAMLs found for {zone.decoded_name}') + raise ProviderException(f"no YAMLs found for {zone.decoded_name}") # deterministically order our sources sources.sort() @@ -375,7 +379,7 @@ class YamlProvider(BaseProvider): exists = len(sources) > 0 self.log.info( - 'populate: found %s records, exists=%s', + "populate: found %s records, exists=%s", len(zone.records) - before, exists, ) @@ -386,7 +390,7 @@ class YamlProvider(BaseProvider): copy = plan.existing.copy() changes = plan.changes self.log.debug( - '_apply: zone=%s, len(changes)=%d', copy.decoded_name, len(changes) + "_apply: zone=%s, len(changes)=%d", copy.decoded_name, len(changes) ) # apply our pending changes to that copy @@ -398,10 +402,10 @@ class YamlProvider(BaseProvider): data = defaultdict(list) for record in records: d = record.data - d['type'] = record._type + d["type"] = record._type if record.ttl == self.default_ttl: # ttl is the default, we don't need to store it - del d['ttl'] + del d["ttl"] # we want to output the utf-8 version of the name data[record.decoded_name].append(d) @@ -411,18 +415,18 @@ class YamlProvider(BaseProvider): data[k] = data[k][0] if not isdir(self.directory): - self.log.debug('_apply: creating directory=%s', self.directory) + self.log.debug("_apply: creating directory=%s", self.directory) makedirs(self.directory) if self.split_extension: # we're going to do split files decoded_name = copy.decoded_name[:-1] directory = join( - self.directory, f'{decoded_name}{self.split_extension}' + self.directory, f"{decoded_name}{self.split_extension}" ) if not isdir(directory): - self.log.debug('_apply: creating split directory=%s', directory) + self.log.debug("_apply: creating split directory=%s", directory) makedirs(directory) catchall = {} @@ -430,27 +434,27 @@ class YamlProvider(BaseProvider): if self.split_catchall and record in self.CATCHALL_RECORD_NAMES: catchall[record] = config continue - filename = join(directory, f'{record}.yaml') - self.log.debug('_apply: writing filename=%s', filename) + filename = join(directory, f"{record}.yaml") + self.log.debug("_apply: writing filename=%s", filename) - with open(filename, 'w') as fh: + with open(filename, "w") as fh: record_data = {record: config} safe_dump(record_data, fh, order_mode=self.order_mode) if catchall: # Scrub the trailing . to make filenames more sane. - filename = join(directory, f'${decoded_name}.yaml') + filename = join(directory, f"${decoded_name}.yaml") self.log.debug( - '_apply: writing catchall filename=%s', filename + "_apply: writing catchall filename=%s", filename ) - with open(filename, 'w') as fh: + with open(filename, "w") as fh: safe_dump(catchall, fh, order_mode=self.order_mode) else: # single large file - filename = join(self.directory, f'{copy.decoded_name}yaml') - self.log.debug('_apply: writing filename=%s', filename) - with open(filename, 'w') as fh: + filename = join(self.directory, f"{copy.decoded_name}yaml") + self.log.debug("_apply: writing filename=%s", filename) + with open(filename, "w") as fh: safe_dump( dict(data), fh, @@ -460,12 +464,15 @@ class YamlProvider(BaseProvider): class SplitYamlProvider(YamlProvider): - ''' - DEPRECATED: Use YamlProvider with the split_extension parameter instead. + """ + .. deprecated:: + DEPRECATED: Use YamlProvider with the split_extension parameter instead. When migrating the following configuration options would result in the same behavior as SplitYamlProvider + .. code-block:: yaml + config: class: octodns.provider.yaml.YamlProvider # extension is configured as split_extension @@ -474,18 +481,18 @@ class SplitYamlProvider(YamlProvider): disable_zonefile: true TO BE REMOVED: 2.0 - ''' + """ - def __init__(self, id, directory, *args, extension='.', **kwargs): + def __init__(self, id, directory, *args, extension=".", **kwargs): kwargs.update( { - 'split_extension': extension, - 'split_catchall': True, - 'disable_zonefile': True, + "split_extension": extension, + "split_catchall": True, + "disable_zonefile": True, } ) super().__init__(id, directory, *args, **kwargs) deprecated( - 'SplitYamlProvider is DEPRECATED, use YamlProvider with split_extension, split_catchall, and disable_zonefile instead, will go away in v2.0', + "SplitYamlProvider is DEPRECATED, use YamlProvider with split_extension, split_catchall, and disable_zonefile instead, will go away in v2.0", stacklevel=99, ) diff --git a/octodns/source/base.py b/octodns/source/base.py index 0f0b2dd..63daa70 100644 --- a/octodns/source/base.py +++ b/octodns/source/base.py @@ -11,17 +11,17 @@ class BaseSource(object): def __init__(self, id): self.id = id - if not getattr(self, 'log', False): + if not getattr(self, "log", False): raise NotImplementedError( - 'Abstract base class, log property missing' + "Abstract base class, log property missing" ) - if not hasattr(self, 'SUPPORTS_GEO'): + if not hasattr(self, "SUPPORTS_GEO"): raise NotImplementedError( - 'Abstract base class, SUPPORTS_GEO property missing' + "Abstract base class, SUPPORTS_GEO property missing" ) - if not hasattr(self, 'SUPPORTS'): + if not hasattr(self, "SUPPORTS"): raise NotImplementedError( - 'Abstract base class, SUPPORTS property missing' + "Abstract base class, SUPPORTS property missing" ) @property @@ -29,22 +29,22 @@ class BaseSource(object): return False def populate(self, zone, target=False, lenient=False): - ''' + """ Loads all records the provider knows about for the provided zone - When `target` is True the populate call is being made to load the + When `target` is `True` the populate call is being made to load the current state of the provider. - When `lenient` is True the populate call may skip record validation and + When `lenient` is `True` the populate call may skip record validation and do a "best effort" load of data. That will allow through some common, but not best practices stuff that we otherwise would reject. E.g. no trailing . or missing escapes for ;. - When target is True (loading current state) this method should return - True if the zone exists or False if it does not. - ''' + When target is `True` (loading current state) this method should return + `True` if the zone exists or False if it does not. + """ raise NotImplementedError( - 'Abstract base class, populate method missing' + "Abstract base class, populate method missing" ) def supports(self, record): diff --git a/octodns/source/envvar.py b/octodns/source/envvar.py index 6ca80df..498cce3 100644 --- a/octodns/source/envvar.py +++ b/octodns/source/envvar.py @@ -11,11 +11,11 @@ class EnvVarSourceException(Exception): class EnvironmentVariableNotFoundException(EnvVarSourceException): def __init__(self, data): - super().__init__(f'Unknown environment variable {data}') + super().__init__(f"Unknown environment variable {data}") class EnvVarSource(BaseSource): - ''' + """ This source allows for environment variables to be embedded at octodns execution time into zones. Intended to capture artifacts of deployment to facilitate operational objectives. @@ -31,41 +31,45 @@ class EnvVarSource(BaseSource): - Capturing identifying information about the deployment process to record where and when the zone was updated. - version: - class: octodns.source.envvar.EnvVarSource - # The environment variable in question, in this example the username - # currently executing octodns - variable: USER - # The TXT record name to embed the value found at the above - # environment variable - name: deployuser - # The TTL of the TXT record (optional, default 60) - ttl: 3600 + .. code-block:: yaml + + version: + class: octodns.source.envvar.EnvVarSource + # The environment variable in question, in this example the username + # currently executing octodns + variable: USER + # The TXT record name to embed the value found at the above + # environment variable + name: deployuser + # The TTL of the TXT record (optional, default 60) + ttl: 3600 This source is then combined with other sources in the octodns config file: - zones: - netflix.com.: - sources: - - yaml - - version - targets: - - ultra - - ns1 - ''' + .. code-block:: yaml + + zones: + netflix.com.: + sources: + - yaml + - version + targets: + - ultra + - ns1 + """ SUPPORTS_GEO = False SUPPORTS_DYNAMIC = False - SUPPORTS = set(('TXT')) + SUPPORTS = set(("TXT")) DEFAULT_TTL = 60 def __init__(self, id, variable, name, ttl=DEFAULT_TTL): klass = self.__class__.__name__ - self.log = logging.getLogger(f'{klass}[{id}]') + self.log = logging.getLogger(f"{klass}[{id}]") self.log.debug( - '__init__: id=%s, variable=%s, name=%s, ttl=%d', + "__init__: id=%s, variable=%s, name=%s, ttl=%d", id, variable, name, @@ -82,7 +86,7 @@ class EnvVarSource(BaseSource): raise EnvironmentVariableNotFoundException(self.envvar) self.log.debug( - '_read_variable: successfully loaded var=%s val=%s', + "_read_variable: successfully loaded var=%s val=%s", self.envvar, value, ) @@ -90,7 +94,7 @@ class EnvVarSource(BaseSource): def populate(self, zone, target=False, lenient=False): self.log.debug( - 'populate: name=%s, target=%s, lenient=%s', + "populate: name=%s, target=%s, lenient=%s", zone.name, target, lenient, @@ -102,13 +106,13 @@ class EnvVarSource(BaseSource): # We don't need to worry about conflicting records here because the # manager will deconflict sources on our behalf. - payload = {'ttl': self.ttl, 'type': 'TXT', 'values': [value]} + payload = {"ttl": self.ttl, "type": "TXT", "values": [value]} record = Record.new( zone, self.name, payload, source=self, lenient=lenient ) zone.add_record(record, lenient=lenient) self.log.info( - 'populate: found %s records, exists=False', + "populate: found %s records, exists=False", len(zone.records) - before, ) diff --git a/octodns/source/tinydns.py b/octodns/source/tinydns.py index 1faf2f7..caa3aad 100755 --- a/octodns/source/tinydns.py +++ b/octodns/source/tinydns.py @@ -55,7 +55,7 @@ class TinyDnsBaseSource(BaseSource): # no arpa return [] - if not zone.owns('MX', name): + if not zone.owns("MX", name): # if name doesn't live under our zone there's nothing for us to do return @@ -65,11 +65,11 @@ class TinyDnsBaseSource(BaseSource): for line in lines: mx = line[2] # if there's a . in the mx we hit a special case and use it as-is - if '.' not in mx: + if "." not in mx: # otherwise we treat it as the MX hostnam and construct the rest - mx = f'{mx}.mx.{zone.name}' - elif mx[-1] != '.': - mx = f'{mx}.' + mx = f"{mx}.mx.{zone.name}" + elif mx[-1] != ".": + mx = f"{mx}." # default distance is 0 try: @@ -79,12 +79,12 @@ class TinyDnsBaseSource(BaseSource): # if we have an IP then we need to create an A for the MX ip = line[1] - if ip and zone.owns('A', mx): - yield 'A', mx, ttl, [ip] + if ip and zone.owns("A", mx): + yield "A", mx, ttl, [ip] - values.append({'preference': dist, 'exchange': mx}) + values.append({"preference": dist, "exchange": mx}) - yield 'MX', name, ttl, values + yield "MX", name, ttl, values def _records_for_C(self, zone, name, lines, arpa=False): # Cfqdn:p:ttl:timestamp:lo @@ -94,17 +94,17 @@ class TinyDnsBaseSource(BaseSource): # no arpa return [] - if not zone.owns('CNAME', name): + if not zone.owns("CNAME", name): # if name doesn't live under our zone there's nothing for us to do return value = lines[0][1] - if value[-1] != '.': - value = f'{value}.' + if value[-1] != ".": + value = f"{value}." ttl = self._ttl_for(lines, 2) - yield 'CNAME', name, ttl, [value] + yield "CNAME", name, ttl, [value] def _records_for_caret(self, zone, name, lines, arpa=False): # ^fqdn:p:ttl:timestamp:lo @@ -116,8 +116,8 @@ class TinyDnsBaseSource(BaseSource): names = defaultdict(list) for line in lines: - if line[0].endswith('in-addr.arpa') or line[0].endswith( - 'ip6.arpa.' + if line[0].endswith("in-addr.arpa") or line[0].endswith( + "ip6.arpa." ): # it's a straight PTR record, already in in-addr.arpa format, # 2nd item is the name it points to @@ -128,20 +128,20 @@ class TinyDnsBaseSource(BaseSource): # we're given value = line[0] addr = line[1] - if '.' not in addr: - addr = u':'.join(textwrap.wrap(line[1], 4)) + if "." not in addr: + addr = ":".join(textwrap.wrap(line[1], 4)) addr = ip_address(addr) name = addr.reverse_pointer - if value[-1] != '.': - value = f'{value}.' + if value[-1] != ".": + value = f"{value}." names[name].append(value) ttl = self._ttl_for(lines, 2) for name, values in names.items(): - if zone.owns('PTR', name): - yield 'PTR', name, ttl, values + if zone.owns("PTR", name): + yield "PTR", name, ttl, values def _records_for_equal(self, zone, name, lines, arpa=False): # =fqdn:ip:ttl:timestamp:lo @@ -159,7 +159,7 @@ class TinyDnsBaseSource(BaseSource): # no arpa return [] - if not zone.owns('NS', name): + if not zone.owns("NS", name): # if name doesn't live under our zone there's nothing for us to do return @@ -169,20 +169,20 @@ class TinyDnsBaseSource(BaseSource): for line in lines: ns = line[2] # if there's a . in the ns we hit a special case and use it as-is - if '.' not in ns: + if "." not in ns: # otherwise we treat it as the NS hostnam and construct the rest - ns = f'{ns}.ns.{zone.name}' - elif ns[-1] != '.': - ns = f'{ns}.' + ns = f"{ns}.ns.{zone.name}" + elif ns[-1] != ".": + ns = f"{ns}." # if we have an IP then we need to create an A for the MX ip = line[1] - if ip and zone.owns('A', ns): - yield 'A', ns, ttl, [ip] + if ip and zone.owns("A", ns): + yield "A", ns, ttl, [ip] values.append(ns) - yield 'NS', name, ttl, values + yield "NS", name, ttl, values _records_for_amp = _records_for_dot @@ -194,12 +194,12 @@ class TinyDnsBaseSource(BaseSource): # no arpa return [] - if not zone.owns('A', name): + if not zone.owns("A", name): # if name doesn't live under our zone there's nothing for us to do return # collect our ip(s) - ips = [l[1] for l in lines if l[1] != '0.0.0.0'] + ips = [l[1] for l in lines if l[1] != "0.0.0.0"] if not ips: # we didn't find any value ips so nothing to do @@ -207,7 +207,7 @@ class TinyDnsBaseSource(BaseSource): ttl = self._ttl_for(lines, 2) - yield 'A', name, ttl, ips + yield "A", name, ttl, ips def _records_for_quote(self, zone, name, lines, arpa=False): # 'fqdn:s:ttl:timestamp:lo @@ -217,19 +217,19 @@ class TinyDnsBaseSource(BaseSource): # no arpa return [] - if not zone.owns('TXT', name): + if not zone.owns("TXT", name): # if name doesn't live under our zone there's nothing for us to do return # collect our ip(s) values = [ - l[1].encode('latin1').decode('unicode-escape').replace(";", "\\;") + l[1].encode("latin1").decode("unicode-escape").replace(";", "\\;") for l in lines ] ttl = self._ttl_for(lines, 2) - yield 'TXT', name, ttl, values + yield "TXT", name, ttl, values def _records_for_three(self, zone, name, lines, arpa=False): # 3fqdn:ip:ttl:timestamp:lo @@ -239,7 +239,7 @@ class TinyDnsBaseSource(BaseSource): # no arpa return [] - if not zone.owns('AAAA', name): + if not zone.owns("AAAA", name): # if name doesn't live under our zone there's nothing for us to do return @@ -249,11 +249,11 @@ class TinyDnsBaseSource(BaseSource): # TinyDNS files have the ipv6 address written in full, but with the # colons removed. This inserts a colon every 4th character to make # the address correct. - ips.append(u':'.join(textwrap.wrap(line[1], 4))) + ips.append(":".join(textwrap.wrap(line[1], 4))) ttl = self._ttl_for(lines, 2) - yield 'AAAA', name, ttl, ips + yield "AAAA", name, ttl, ips def _records_for_S(self, zone, name, lines, arpa=False): # Sfqdn:ip:x:port:priority:weight:ttl:timestamp:lo @@ -263,7 +263,7 @@ class TinyDnsBaseSource(BaseSource): # no arpa return [] - if not zone.owns('SRV', name): + if not zone.owns("SRV", name): # if name doesn't live under our zone there's nothing for us to do return @@ -273,17 +273,17 @@ class TinyDnsBaseSource(BaseSource): for line in lines: target = line[2] # if there's a . in the mx we hit a special case and use it as-is - if '.' not in target: + if "." not in target: # otherwise we treat it as the MX hostnam and construct the rest - target = f'{target}.srv.{zone.name}' - elif target[-1] != '.': - target = f'{target}.' + target = f"{target}.srv.{zone.name}" + elif target[-1] != ".": + target = f"{target}." # if we have an IP then we need to create an A for the SRV # has to be present, but can be empty ip = line[1] - if ip and zone.owns('A', target): - yield 'A', target, ttl, [ip] + if ip and zone.owns("A", target): + yield "A", target, ttl, [ip] # required port = int(line[3]) @@ -302,14 +302,14 @@ class TinyDnsBaseSource(BaseSource): values.append( { - 'priority': priority, - 'weight': weight, - 'port': port, - 'target': target, + "priority": priority, + "weight": weight, + "port": port, + "target": target, } ) - yield 'SRV', name, ttl, values + yield "SRV", name, ttl, values def _records_for_colon(self, zone, name, lines, arpa=False): # :fqdn:n:rdata:ttl:timestamp:lo @@ -319,7 +319,7 @@ class TinyDnsBaseSource(BaseSource): # no arpa return [] - if not zone.owns('SRV', name): + if not zone.owns("SRV", name): # if name doesn't live under our zone there's nothing for us to do return @@ -333,7 +333,7 @@ class TinyDnsBaseSource(BaseSource): _class = classes.get(_type, None) if not _class: self.log.info( - '_records_for_colon: unrecognized type %s, %s', _type, line + "_records_for_colon: unrecognized type %s, %s", _type, line ) continue @@ -351,18 +351,18 @@ class TinyDnsBaseSource(BaseSource): yield from self._records_for_three(zone, name, lines, arpa) SYMBOL_MAP = { - '=': _records_for_equal, # A - '^': _records_for_caret, # PTR - '.': _records_for_dot, # NS - 'C': _records_for_C, # CNAME - '+': _records_for_plus, # A - '@': _records_for_at, # MX - '&': _records_for_amp, # NS - '\'': _records_for_quote, # TXT - '3': _records_for_three, # AAAA - 'S': _records_for_S, # SRV - ':': _records_for_colon, # arbitrary - '6': _records_for_six, # AAAA + "=": _records_for_equal, # A + "^": _records_for_caret, # PTR + ".": _records_for_dot, # NS + "C": _records_for_C, # CNAME + "+": _records_for_plus, # A + "@": _records_for_at, # MX + "&": _records_for_amp, # NS + "'": _records_for_quote, # TXT + "3": _records_for_three, # AAAA + "S": _records_for_S, # SRV + ":": _records_for_colon, # arbitrary + "6": _records_for_six, # AAAA } def _process_lines(self, zone, lines): @@ -371,9 +371,9 @@ class TinyDnsBaseSource(BaseSource): symbol = line[0] # Skip type, remove trailing comments, and omit newline - line = line[1:].split('#', 1)[0] + line = line[1:].split("#", 1)[0] # Split on :'s including :: and strip leading/trailing ws - line = [p.strip() for p in line.split(':')] + line = [p.strip() for p in line.split(":")] data[symbol][line[0]].append(line) return data @@ -386,7 +386,7 @@ class TinyDnsBaseSource(BaseSource): if not records_for: # Something we don't care about self.log.info( - 'skipping type %s, not supported/interested', symbol + "skipping type %s, not supported/interested", symbol ) continue @@ -406,7 +406,7 @@ class TinyDnsBaseSource(BaseSource): def populate(self, zone, target=False, lenient=False): self.log.debug( - 'populate: name=%s, target=%s, lenient=%s', + "populate: name=%s, target=%s, lenient=%s", zone.name, target, lenient, @@ -425,8 +425,8 @@ class TinyDnsBaseSource(BaseSource): # then work through those to group values by their _type and name zone_name = zone.name - arpa = zone_name.endswith('in-addr.arpa.') or zone_name.endswith( - 'ip6.arpa.' + arpa = zone_name.endswith("in-addr.arpa.") or zone_name.endswith( + "ip6.arpa." ) types, ttls = self._process_symbols(zone, symbols, arpa) @@ -436,32 +436,34 @@ class TinyDnsBaseSource(BaseSource): for _type, names in types.items(): for name, values in names.items(): data = { - 'ttl': ttls[_type].get(name, self.default_ttl), - 'type': _type, + "ttl": ttls[_type].get(name, self.default_ttl), + "type": _type, } if len(values) > 1: - data['values'] = _unique(values) + data["values"] = _unique(values) else: - data['value'] = values[0] + data["value"] = values[0] record = Record.new(zone, name, data, lenient=lenient) zone.add_record(record, lenient=lenient) self.log.info( - 'populate: found %s records', len(zone.records) - before + "populate: found %s records", len(zone.records) - before ) class TinyDnsFileSource(TinyDnsBaseSource): - ''' + """ A basic TinyDNS zonefile importer created to import legacy data. - tinydns: - class: octodns.source.tinydns.TinyDnsFileSource - # The location of the TinyDNS zone files - directory: ./zones - # The ttl to use for records when not specified in the data - # (optional, default 3600) - default_ttl: 3600 + .. code-block:: yaml + + tinydns: + class: octodns.source.tinydns.TinyDnsFileSource + # The location of the TinyDNS zone files + directory: ./zones + # The ttl to use for records when not specified in the data + # (optional, default 3600) + default_ttl: 3600 NOTE: timestamps & lo fields are ignored if present. @@ -469,12 +471,12 @@ class TinyDnsFileSource(TinyDnsBaseSource): https://cr.yp.to/djbdns/tinydns-data.html and the common patch/extensions to support IPv6 and a few other record types, https://docs.bytemark.co.uk/article/tinydns-format/. - ''' + """ def __init__(self, id, directory, default_ttl=3600): - self.log = logging.getLogger(f'TinyDnsFileSource[{id}]') + self.log = logging.getLogger(f"TinyDnsFileSource[{id}]") self.log.debug( - '__init__: id=%s, directory=%s, default_ttl=%d', + "__init__: id=%s, directory=%s, default_ttl=%d", id, directory, default_ttl, @@ -489,11 +491,11 @@ class TinyDnsFileSource(TinyDnsBaseSource): # be defined anywhere so we'll just read all files lines = [] for filename in listdir(self.directory): - if filename[0] == '.': + if filename[0] == ".": # Ignore hidden files continue - with open(join(self.directory, filename), 'r') as fh: - lines += [l for l in fh.read().split('\n') if l] + with open(join(self.directory, filename), "r") as fh: + lines += [l for l in fh.read().split("\n") if l] self._cache = lines