Browse Source

update docstring examples

Signed-off-by: Ivan Schaller <ivan@schaller.sh>
pull/1261/head
Ivan Schaller 6 months ago
parent
commit
a960b451a7
12 changed files with 713 additions and 668 deletions
  1. +4
    -0
      .changelog/5f0c3606f74d45879b4326b65476c208.md
  2. +2
    -2
      docs/conf.py
  3. +20
    -22
      octodns/processor/acme.py
  4. +8
    -8
      octodns/processor/base.py
  5. +263
    -241
      octodns/processor/filter.py
  6. +41
    -39
      octodns/processor/meta.py
  7. +28
    -24
      octodns/processor/restrict.py
  8. +35
    -33
      octodns/processor/spf.py
  9. +176
    -169
      octodns/provider/yaml.py
  10. +13
    -13
      octodns/source/base.py
  11. +32
    -28
      octodns/source/envvar.py
  12. +91
    -89
      octodns/source/tinydns.py

+ 4
- 0
.changelog/5f0c3606f74d45879b4326b65476c208.md View File

@ -0,0 +1,4 @@
---
type: none
---
update docstring examples

+ 2
- 2
docs/conf.py View File

@ -1,7 +1,7 @@
import sys import sys
from pathlib import Path from pathlib import Path
sys.path.insert(0, str(Path("..", "src").resolve()))
sys.path.insert(0, str(Path("..").resolve()))
from octodns.__init__ import __version__ from octodns.__init__ import __version__
@ -81,7 +81,7 @@ exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
### theme ### ### theme ###
# tml_theme = "alabaster"
# html_theme = "alabaster"
html_theme = "furo" html_theme = "furo"
html_theme_options = { html_theme_options = {
"source_repository": "https://github.com/octodns/octodns/", "source_repository": "https://github.com/octodns/octodns/",


+ 20
- 22
octodns/processor/acme.py View File

@ -8,36 +8,34 @@ from .base import BaseProcessor
class AcmeManagingProcessor(BaseProcessor): class AcmeManagingProcessor(BaseProcessor):
log = getLogger('AcmeManagingProcessor')
log = getLogger("AcmeManagingProcessor")
def __init__(self, name): def __init__(self, name):
'''
processors:
acme:
class: octodns.processor.acme.AcmeManagingProcessor
...
zones:
something.com.:
...
processors:
- acme
...
'''
"""
.. code-block:: yaml
processors:
acme:
class: octodns.processor.acme.AcmeManagingProcessor
zones:
something.com.:
processors:
- acme2
"""
super().__init__(name) super().__init__(name)
self._owned = set() self._owned = set()
def process_source_zone(self, desired, *args, **kwargs): def process_source_zone(self, desired, *args, **kwargs):
for record in desired.records: for record in desired.records:
if record._type == 'TXT' and record.name.startswith(
'_acme-challenge'
if record._type == "TXT" and record.name.startswith(
"_acme-challenge"
): ):
# We have a managed acme challenge record (owned by octoDNS) so # We have a managed acme challenge record (owned by octoDNS) so
# we should mark it as such # we should mark it as such
record = record.copy() record = record.copy()
record.values.append('*octoDNS*')
record.values.append("*octoDNS*")
record.values.sort() record.values.sort()
# This assumes we'll see things as sources before targets, # This assumes we'll see things as sources before targets,
# which is the case... # which is the case...
@ -50,12 +48,12 @@ class AcmeManagingProcessor(BaseProcessor):
# Uses a startswith rather than == to ignore subdomain challenges, # Uses a startswith rather than == to ignore subdomain challenges,
# e.g. _acme-challenge.foo.domain.com when managing domain.com # e.g. _acme-challenge.foo.domain.com when managing domain.com
if ( if (
record._type == 'TXT'
and record.name.startswith('_acme-challenge')
and '*octoDNS*' not in record.values
record._type == "TXT"
and record.name.startswith("_acme-challenge")
and "*octoDNS*" not in record.values
and record not in self._owned and record not in self._owned
): ):
self.log.info('_process: ignoring %s', record.fqdn)
self.log.info("_process: ignoring %s", record.fqdn)
existing.remove_record(record) existing.remove_record(record)
return existing return existing


+ 8
- 8
octodns/processor/base.py View File

@ -13,7 +13,7 @@ class BaseProcessor(object):
self.id = self.name = name self.id = self.name = name
def process_source_zone(self, desired, sources): def process_source_zone(self, desired, sources):
'''
"""
Called after all sources have completed populate. Provides an Called after all sources have completed populate. Provides an
opportunity for the processor to modify the desired `Zone` that targets opportunity for the processor to modify the desired `Zone` that targets
will receive. will receive.
@ -28,11 +28,11 @@ class BaseProcessor(object):
be used with `replace=True`. be used with `replace=True`.
- May call `Zone.remove_record` to remove records from `desired`. - May call `Zone.remove_record` to remove records from `desired`.
- Sources may be empty, as will be the case for aliased zones. - Sources may be empty, as will be the case for aliased zones.
'''
"""
return desired return desired
def process_target_zone(self, existing, target): def process_target_zone(self, existing, target):
'''
"""
Called after a target has completed `populate`, before changes are Called after a target has completed `populate`, before changes are
computed between `existing` and `desired`. This provides an opportunity computed between `existing` and `desired`. This provides an opportunity
to modify the `existing` `Zone`. to modify the `existing` `Zone`.
@ -45,11 +45,11 @@ class BaseProcessor(object):
the results of which can be modified, and then `Zone.add_record` may the results of which can be modified, and then `Zone.add_record` may
be used with `replace=True`. be used with `replace=True`.
- May call `Zone.remove_record` to remove records from `existing`. - May call `Zone.remove_record` to remove records from `existing`.
'''
"""
return existing return existing
def process_source_and_target_zones(self, desired, existing, target): def process_source_and_target_zones(self, desired, existing, target):
'''
"""
Called just prior to computing changes for `target` between `desired` Called just prior to computing changes for `target` between `desired`
and `existing`. Provides an opportunity for the processor to modify and `existing`. Provides an opportunity for the processor to modify
either the desired or existing `Zone`s that will be used to compute the either the desired or existing `Zone`s that will be used to compute the
@ -72,11 +72,11 @@ class BaseProcessor(object):
be used with `replace=True`. be used with `replace=True`.
- May call `Zone.remove_record` to remove records from `desired`. - May call `Zone.remove_record` to remove records from `desired`.
- May call `Zone.remove_record` to remove records from `existing`. - May call `Zone.remove_record` to remove records from `existing`.
'''
"""
return desired, existing return desired, existing
def process_plan(self, plan, sources, target): def process_plan(self, plan, sources, target):
'''
"""
Called after the planning phase has completed. Provides an opportunity Called after the planning phase has completed. Provides an opportunity
for the processors to modify the plan thus changing the actions that for the processors to modify the plan thus changing the actions that
will be displayed and potentially applied. will be displayed and potentially applied.
@ -90,7 +90,7 @@ class BaseProcessor(object):
`plan.delete_pcent_threshold` when creating a new `Plan`. `plan.delete_pcent_threshold` when creating a new `Plan`.
- Must return a `Plan` which may be `plan` or can be a newly created - Must return a `Plan` which may be `plan` or can be a newly created
one `plan.desired` and `plan.existing` copied over as-is or modified. one `plan.desired` and `plan.existing` copied over as-is or modified.
'''
"""
# plan may be None if no changes were detected up until now, the # plan may be None if no changes were detected up until now, the
# process may still create a plan. # process may still create a plan.
# sources may be empty, as will be the case for aliased zones # sources may be empty, as will be the case for aliased zones


+ 263
- 241
octodns/processor/filter.py View File

@ -57,59 +57,63 @@ class _TypeBaseFilter(_FilterProcessor):
class TypeAllowlistFilter(_TypeBaseFilter, AllowsMixin): class TypeAllowlistFilter(_TypeBaseFilter, AllowsMixin):
'''Only manage records of the specified type(s).
"""Only manage records of the specified type(s).
Example usage: Example usage:
processors:
only-a-and-aaaa:
class: octodns.processor.filter.TypeAllowlistFilter
allowlist:
- A
- AAAA
# Optional param that can be set to False to leave the target zone
# alone, thus allowing deletion of existing records
# (default: true)
# include_target: True
zones:
exxampled.com.:
sources:
- config
.. code-block:: yaml
processors: processors:
- only-a-and-aaaa
targets:
- ns1
'''
only-a-and-aaaa:
class: octodns.processor.filter.TypeAllowlistFilter
allowlist:
- A
- AAAA
# Optional param that can be set to False to leave the target zone
# alone, thus allowing deletion of existing records
# (default: true)
# include_target: True
zones:
exxampled.com.:
sources:
- config
processors:
- only-a-and-aaaa
targets:
- ns1
"""
def __init__(self, name, allowlist, **kwargs): def __init__(self, name, allowlist, **kwargs):
super().__init__(name, allowlist, **kwargs) super().__init__(name, allowlist, **kwargs)
class TypeRejectlistFilter(_TypeBaseFilter, RejectsMixin): class TypeRejectlistFilter(_TypeBaseFilter, RejectsMixin):
'''Ignore records of the specified type(s).
"""Ignore records of the specified type(s).
Example usage: Example usage:
processors:
ignore-cnames:
class: octodns.processor.filter.TypeRejectlistFilter
rejectlist:
- CNAME
# Optional param that can be set to False to leave the target zone
# alone, thus allowing deletion of existing records
# (default: true)
# include_target: True
zones:
exxampled.com.:
sources:
- config
.. code-block:: yaml
processors: processors:
- ignore-cnames
targets:
- route53
'''
ignore-cnames:
class: octodns.processor.filter.TypeRejectlistFilter
rejectlist:
- CNAME
# Optional param that can be set to False to leave the target zone
# alone, thus allowing deletion of existing records
# (default: true)
# include_target: True
zones:
exxampled.com.:
sources:
- config
processors:
- ignore-cnames
targets:
- route53
"""
def __init__(self, name, rejectlist, **kwargs): def __init__(self, name, rejectlist, **kwargs):
super().__init__(name, rejectlist, **kwargs) super().__init__(name, rejectlist, **kwargs)
@ -121,7 +125,7 @@ class _NameBaseFilter(_FilterProcessor):
exact = set() exact = set()
regex = [] regex = []
for pattern in _list: for pattern in _list:
if pattern.startswith('/'):
if pattern.startswith("/"):
regex.append(re_compile(pattern[1:-1])) regex.append(re_compile(pattern[1:-1]))
else: else:
exact.add(pattern) exact.add(pattern)
@ -144,72 +148,76 @@ class _NameBaseFilter(_FilterProcessor):
class NameAllowlistFilter(_NameBaseFilter, AllowsMixin): class NameAllowlistFilter(_NameBaseFilter, AllowsMixin):
'''Only manage records with names that match the provider patterns
"""Only manage records with names that match the provider patterns
Example usage: Example usage:
processors:
only-these:
class: octodns.processor.filter.NameAllowlistFilter
allowlist:
# exact string match
- www
# contains/substring match
- /substring/
# regex pattern match
- /some-pattern-\\d\\+/
# regex - anchored so has to match start to end
- /^start-.+-end$/
# Optional param that can be set to False to leave the target zone
# alone, thus allowing deletion of existing records
# (default: true)
# include_target: True
zones:
exxampled.com.:
sources:
- config
.. code-block:: yaml
processors: processors:
- only-these
targets:
- route53
'''
only-these:
class: octodns.processor.filter.NameAllowlistFilter
allowlist:
# exact string match
- www
# contains/substring match
- /substring/
# regex pattern match
- /some-pattern-\\d\\+/
# regex - anchored so has to match start to end
- /^start-.+-end$/
# Optional param that can be set to False to leave the target zone
# alone, thus allowing deletion of existing records
# (default: true)
# include_target: True
zones:
exxampled.com.:
sources:
- config
processors:
- only-these
targets:
- route53
"""
def __init__(self, name, allowlist): def __init__(self, name, allowlist):
super().__init__(name, allowlist) super().__init__(name, allowlist)
class NameRejectlistFilter(_NameBaseFilter, RejectsMixin): class NameRejectlistFilter(_NameBaseFilter, RejectsMixin):
'''Reject managing records with names that match the provider patterns
"""Reject managing records with names that match the provider patterns
Example usage: Example usage:
processors:
not-these:
class: octodns.processor.filter.NameRejectlistFilter
rejectlist:
# exact string match
- www
# contains/substring match
- /substring/
# regex pattern match
- /some-pattern-\\d\\+/
# regex - anchored so has to match start to end
- /^start-.+-end$/
# Optional param that can be set to False to leave the target zone
# alone, thus allowing deletion of existing records
# (default: true)
# include_target: True
zones:
exxampled.com.:
sources:
- config
.. code-block:: yaml
processors: processors:
- not-these
targets:
- route53
'''
not-these:
class: octodns.processor.filter.NameRejectlistFilter
rejectlist:
# exact string match
- www
# contains/substring match
- /substring/
# regex pattern match
- /some-pattern-\\d\\+/
# regex - anchored so has to match start to end
- /^start-.+-end$/
# Optional param that can be set to False to leave the target zone
# alone, thus allowing deletion of existing records
# (default: true)
# include_target: True
zones:
exxampled.com.:
sources:
- config
processors:
- not-these
targets:
- route53
"""
def __init__(self, name, rejectlist): def __init__(self, name, rejectlist):
super().__init__(name, rejectlist) super().__init__(name, rejectlist)
@ -221,7 +229,7 @@ class _ValueBaseFilter(_FilterProcessor):
exact = set() exact = set()
regex = [] regex = []
for pattern in _list: for pattern in _list:
if pattern.startswith('/'):
if pattern.startswith("/"):
regex.append(re_compile(pattern[1:-1])) regex.append(re_compile(pattern[1:-1]))
else: else:
exact.add(pattern) exact.add(pattern)
@ -231,13 +239,13 @@ class _ValueBaseFilter(_FilterProcessor):
def _process(self, zone, *args, **kwargs): def _process(self, zone, *args, **kwargs):
for record in zone.records: for record in zone.records:
values = [] values = []
if hasattr(record, 'values'):
if hasattr(record, "values"):
values = [value.rdata_text for value in record.values] values = [value.rdata_text for value in record.values]
elif record.value is not None: elif record.value is not None:
values = [record.value.rdata_text] values = [record.value.rdata_text]
else: else:
self.log.warning( self.log.warning(
'value for %s is NoneType, ignoring', record.fqdn
"value for %s is NoneType, ignoring", record.fqdn
) )
if any(value in self.exact for value in values): if any(value in self.exact for value in values):
@ -253,76 +261,80 @@ class _ValueBaseFilter(_FilterProcessor):
class ValueAllowlistFilter(_ValueBaseFilter, AllowsMixin): class ValueAllowlistFilter(_ValueBaseFilter, AllowsMixin):
'''Only manage records with values that match the provider patterns
"""Only manage records with values that match the provider patterns
Example usage: Example usage:
processors:
only-these:
class: octodns.processor.filter.ValueAllowlistFilter
allowlist:
# exact string match
- www
# contains/substring match
- /substring/
# regex pattern match
- /some-pattern-\\d\\+/
# regex - anchored so has to match start to end
- /^start-.+-end$/
# Optional param that can be set to False to leave the target zone
# alone, thus allowing deletion of existing records
# (default: true)
# include_target: True
zones:
exxampled.com.:
sources:
- config
.. code-block:: yaml
processors: processors:
- only-these
targets:
- route53
'''
only-these:
class: octodns.processor.filter.ValueAllowlistFilter
allowlist:
# exact string match
- www
# contains/substring match
- /substring/
# regex pattern match
- /some-pattern-\\d\\+/
# regex - anchored so has to match start to end
- /^start-.+-end$/
# Optional param that can be set to False to leave the target zone
# alone, thus allowing deletion of existing records
# (default: true)
# include_target: True
zones:
exxampled.com.:
sources:
- config
processors:
- only-these
targets:
- route53
"""
def __init__(self, name, allowlist): def __init__(self, name, allowlist):
self.log = getLogger(f'ValueAllowlistFilter[{name}]')
self.log = getLogger(f"ValueAllowlistFilter[{name}]")
super().__init__(name, allowlist) super().__init__(name, allowlist)
class ValueRejectlistFilter(_ValueBaseFilter, RejectsMixin): class ValueRejectlistFilter(_ValueBaseFilter, RejectsMixin):
'''Reject managing records with names that match the provider patterns
"""Reject managing records with names that match the provider patterns
Example usage: Example usage:
processors:
not-these:
class: octodns.processor.filter.ValueRejectlistFilter
rejectlist:
# exact string match
- www
# contains/substring match
- /substring/
# regex pattern match
- /some-pattern-\\d\\+/
# regex - anchored so has to match start to end
- /^start-.+-end$/
# Optional param that can be set to False to leave the target zone
# alone, thus allowing deletion of existing records
# (default: true)
# include_target: True
zones:
exxampled.com.:
sources:
- config
.. code-block:: yaml
processors: processors:
- not-these
targets:
- route53
'''
not-these:
class: octodns.processor.filter.ValueRejectlistFilter
rejectlist:
# exact string match
- www
# contains/substring match
- /substring/
# regex pattern match
- /some-pattern-\\d\\+/
# regex - anchored so has to match start to end
- /^start-.+-end$/
# Optional param that can be set to False to leave the target zone
# alone, thus allowing deletion of existing records
# (default: true)
# include_target: True
zones:
exxampled.com.:
sources:
- config
processors:
- not-these
targets:
- route53
"""
def __init__(self, name, rejectlist): def __init__(self, name, rejectlist):
self.log = getLogger(f'ValueRejectlistFilter[{name}]')
self.log = getLogger(f"ValueRejectlistFilter[{name}]")
super().__init__(name, rejectlist) super().__init__(name, rejectlist)
@ -334,11 +346,11 @@ class _NetworkValueBaseFilter(BaseProcessor):
try: try:
self.networks.append(ip_network(value)) self.networks.append(ip_network(value))
except ValueError: except ValueError:
raise ValueError(f'{value} is not a valid CIDR to use')
raise ValueError(f"{value} is not a valid CIDR to use")
def _process(self, zone, *args, **kwargs): def _process(self, zone, *args, **kwargs):
for record in zone.records: for record in zone.records:
if record._type not in ['A', 'AAAA']:
if record._type not in ["A", "AAAA"]:
continue continue
ips = [ip_address(value) for value in record.values] ips = [ip_address(value) for value in record.values]
@ -356,83 +368,89 @@ class _NetworkValueBaseFilter(BaseProcessor):
class NetworkValueAllowlistFilter(_NetworkValueBaseFilter, AllowsMixin): class NetworkValueAllowlistFilter(_NetworkValueBaseFilter, AllowsMixin):
'''Only manage A and AAAA records with values that match the provider patterns
"""Only manage A and AAAA records with values that match the provider patterns
All other types will be left as-is. All other types will be left as-is.
Example usage: Example usage:
processors:
only-these:
class: octodns.processor.filter.NetworkValueAllowlistFilter
allowlist:
- 127.0.0.1/32
- 192.168.0.0/16
- fd00::/8
zones:
exxampled.com.:
sources:
- config
.. code-block:: yaml
processors: processors:
- only-these
targets:
- route53
'''
only-these:
class: octodns.processor.filter.NetworkValueAllowlistFilter
allowlist:
- 127.0.0.1/32
- 192.168.0.0/16
- fd00::/8
zones:
exxampled.com.:
sources:
- config
processors:
- only-these
targets:
- route53
"""
def __init__(self, name, allowlist): def __init__(self, name, allowlist):
super().__init__(name, allowlist) super().__init__(name, allowlist)
class NetworkValueRejectlistFilter(_NetworkValueBaseFilter, RejectsMixin): class NetworkValueRejectlistFilter(_NetworkValueBaseFilter, RejectsMixin):
'''Reject managing A and AAAA records with value matching a that match the provider patterns
"""Reject managing A and AAAA records with value matching a that match the provider patterns
All other types will be left as-is. All other types will be left as-is.
Example usage: Example usage:
processors:
not-these:
class: octodns.processor.filter.NetworkValueRejectlistFilter
rejectlist:
- 127.0.0.1/32
- 192.168.0.0/16
- fd00::/8
zones:
exxampled.com.:
sources:
- config
.. code-block:: yaml
processors: processors:
- not-these
targets:
- route53
'''
not-these:
class: octodns.processor.filter.NetworkValueRejectlistFilter
rejectlist:
- 127.0.0.1/32
- 192.168.0.0/16
- fd00::/8
zones:
exxampled.com.:
sources:
- config
processors:
- not-these
targets:
- route53
"""
def __init__(self, name, rejectlist): def __init__(self, name, rejectlist):
super().__init__(name, rejectlist) super().__init__(name, rejectlist)
class IgnoreRootNsFilter(BaseProcessor): class IgnoreRootNsFilter(BaseProcessor):
'''Do not manage Root NS Records.
"""Do not manage Root NS Records.
Example usage: Example usage:
processors:
no-root-ns:
class: octodns.processor.filter.IgnoreRootNsFilter
.. code-block:: yaml
zones:
exxampled.com.:
sources:
- config
processors: processors:
- no-root-ns
targets:
- ns1
'''
no-root-ns:
class: octodns.processor.filter.IgnoreRootNsFilter
zones:
exxampled.com.:
sources:
- config
processors:
- no-root-ns
targets:
- ns1
"""
def _process(self, zone, *args, **kwargs): def _process(self, zone, *args, **kwargs):
for record in zone.records: for record in zone.records:
if record._type == 'NS' and not record.name:
if record._type == "NS" and not record.name:
zone.remove_record(record) zone.remove_record(record)
return zone return zone
@ -442,31 +460,33 @@ class IgnoreRootNsFilter(BaseProcessor):
class ExcludeRootNsChanges(BaseProcessor): class ExcludeRootNsChanges(BaseProcessor):
'''Do not allow root NS record changes
"""Do not allow root NS record changes
Example usage: Example usage:
processors:
exclude-root-ns-changes:
class: octodns.processor.filter.ExcludeRootNsChanges
# If true an a change for a root NS is seen an error will be thrown. If
# false a warning will be printed and the change will be removed from
# the plan.
# (default: true)
error: true
zones:
exxampled.com.:
sources:
- config
.. code-block:: yaml
processors: processors:
- exclude-root-ns-changes
targets:
- ns1
'''
exclude-root-ns-changes:
class: octodns.processor.filter.ExcludeRootNsChanges
# If true an a change for a root NS is seen an error will be thrown. If
# false a warning will be printed and the change will be removed from
# the plan.
# (default: true)
error: true
zones:
exxampled.com.:
sources:
- config
processors:
- exclude-root-ns-changes
targets:
- ns1
"""
def __init__(self, name, error=True): def __init__(self, name, error=True):
self.log = getLogger(f'ExcludeRootNsChanges[{name}]')
self.log = getLogger(f"ExcludeRootNsChanges[{name}]")
super().__init__(name) super().__init__(name)
self.error = error self.error = error
@ -474,14 +494,14 @@ class ExcludeRootNsChanges(BaseProcessor):
if plan: if plan:
for change in list(plan.changes): for change in list(plan.changes):
record = change.record record = change.record
if record._type == 'NS' and record.name == '':
if record._type == "NS" and record.name == "":
self.log.warning( self.log.warning(
'root NS changes are disallowed, fqdn=%s', record.fqdn
"root NS changes are disallowed, fqdn=%s", record.fqdn
) )
if self.error: if self.error:
raise ValidationError( raise ValidationError(
record.fqdn, record.fqdn,
['root NS changes are disallowed'],
["root NS changes are disallowed"],
record.context, record.context,
) )
plan.changes.remove(change) plan.changes.remove(change)
@ -490,30 +510,32 @@ class ExcludeRootNsChanges(BaseProcessor):
class ZoneNameFilter(_FilterProcessor): class ZoneNameFilter(_FilterProcessor):
'''Filter or error on record names that contain the zone name
"""Filter or error on record names that contain the zone name
Example usage: Example usage:
processors:
zone-name:
class: octodns.processor.filter.ZoneNameFilter
# If true a ValidationError will be throw when such records are
# encouterd, if false the records will just be ignored/omitted.
# (default: true)
# Optional param that can be set to False to leave the target zone
# alone, thus allowing deletion of existing records
# (default: true)
# include_target: True
zones:
exxampled.com.:
sources:
- config
.. code-block:: yaml
processors: processors:
- zone-name
targets:
- azure
'''
zone-name:
class: octodns.processor.filter.ZoneNameFilter
# If true a ValidationError will be throw when such records are
# encouterd, if false the records will just be ignored/omitted.
# (default: true)
# Optional param that can be set to False to leave the target zone
# alone, thus allowing deletion of existing records
# (default: true)
# include_target: True
zones:
exxampled.com.:
sources:
- config
processors:
- zone-name
targets:
- azure
"""
def __init__(self, name, error=True, **kwargs): def __init__(self, name, error=True, **kwargs):
super().__init__(name, **kwargs) super().__init__(name, **kwargs)
@ -530,7 +552,7 @@ class ZoneNameFilter(_FilterProcessor):
if self.error: if self.error:
raise ValidationError( raise ValidationError(
record.fqdn, record.fqdn,
['record name ends with zone name'],
["record name ends with zone name"],
record.context, record.context,
) )
else: else:


+ 41
- 39
octodns/processor/meta.py View File

@ -20,11 +20,11 @@ except ImportError: # pragma: no cover
def _keys(values): def _keys(values):
return set(v.split('=', 1)[0] for v in values)
return set(v.split("=", 1)[0] for v in values)
class MetaProcessor(BaseProcessor): class MetaProcessor(BaseProcessor):
'''
"""
Add a special metadata record with timestamps, UUIDs, versions, and/or Add a special metadata record with timestamps, UUIDs, versions, and/or
provider name. Will only be updated when there are other changes being made. provider name. Will only be updated when there are other changes being made.
A useful tool to aid in debugging and monitoring of DNS infrastructure. A useful tool to aid in debugging and monitoring of DNS infrastructure.
@ -39,32 +39,34 @@ class MetaProcessor(BaseProcessor):
settings. Values are in the form `key=<value>`, e.g. settings. Values are in the form `key=<value>`, e.g.
`time=2023-09-10T05:49:04.246953` `time=2023-09-10T05:49:04.246953`
processors:
meta:
class: octodns.processor.meta.MetaProcessor
# The name to use for the meta record.
# (default: meta)
record_name: meta
# Include a timestamp with a UTC value indicating the timeframe when the
# last change was made.
# (default: true)
include_time: true
# Include a UUID that can be utilized to uniquely identify the run
# pushing data
# (default: false)
include_uuid: false
# Include the provider id for the target where data is being pushed
# (default: false)
include_provider: false
# Include the octoDNS version being used
# (default: false)
include_version: false
# Extra values to set on the records
# (default: None)
#include_extra:
# key: val
# foo: env/BAR
'''
.. code-block:: yaml
processors:
meta:
class: octodns.processor.meta.MetaProcessor
# The name to use for the meta record.
# (default: meta)
record_name: meta
# Include a timestamp with a UTC value indicating the timeframe when the
# last change was made.
# (default: true)
include_time: true
# Include a UUID that can be utilized to uniquely identify the run
# pushing data
# (default: false)
include_uuid: false
# Include the provider id for the target where data is being pushed
# (default: false)
include_provider: false
# Include the octoDNS version being used
# (default: false)
include_version: false
# Extra values to set on the records
# (default: None)
#include_extra:
# key: val
# foo: env/BAR
"""
@classmethod @classmethod
def get_time(cls): def get_time(cls):
@ -77,7 +79,7 @@ class MetaProcessor(BaseProcessor):
def __init__( def __init__(
self, self,
id, id,
record_name='meta',
record_name="meta",
include_time=True, include_time=True,
include_uuid=False, include_uuid=False,
include_version=False, include_version=False,
@ -85,10 +87,10 @@ class MetaProcessor(BaseProcessor):
include_extra=None, include_extra=None,
ttl=60, ttl=60,
): ):
self.log = getLogger(f'MetaSource[{id}]')
self.log = getLogger(f"MetaSource[{id}]")
super().__init__(id) super().__init__(id)
self.log.info( self.log.info(
'__init__: record_name=%s, include_time=%s, include_uuid=%s, include_version=%s, include_provider=%s, include_extra=%s, ttl=%d',
"__init__: record_name=%s, include_time=%s, include_uuid=%s, include_version=%s, include_provider=%s, include_extra=%s, ttl=%d",
record_name, record_name,
include_time, include_time,
include_uuid, include_uuid,
@ -103,7 +105,7 @@ class MetaProcessor(BaseProcessor):
self.include_version = include_version self.include_version = include_version
self.include_provider = include_provider self.include_provider = include_provider
self.include_extra = ( self.include_extra = (
[f'{key}={val}' for key, val in include_extra.items()]
[f"{key}={val}" for key, val in include_extra.items()]
if include_extra is not None if include_extra is not None
else [] else []
) )
@ -112,13 +114,13 @@ class MetaProcessor(BaseProcessor):
def values(self, target_id): def values(self, target_id):
ret = [] ret = []
if self.include_version: if self.include_version:
ret.append(f'octodns-version={__version__}')
ret.append(f"octodns-version={__version__}")
if self.include_provider: if self.include_provider:
ret.append(f'provider={target_id}')
ret.append(f"provider={target_id}")
if self.time: if self.time:
ret.append(f'time={self.time}')
ret.append(f"time={self.time}")
if self.uuid: if self.uuid:
ret.append(f'uuid={self.uuid}')
ret.append(f"uuid={self.uuid}")
# these were previously converted into key=value or will otherwise be [] # these were previously converted into key=value or will otherwise be []
ret.extend(self.include_extra) ret.extend(self.include_extra)
return ret return ret
@ -127,7 +129,7 @@ class MetaProcessor(BaseProcessor):
meta = Record.new( meta = Record.new(
desired, desired,
self.record_name, self.record_name,
{'ttl': self.ttl, 'type': 'TXT', 'values': self.values(target.id)},
{"ttl": self.ttl, "type": "TXT", "values": self.values(target.id)},
# we may be passing in empty values here to be filled out later in # we may be passing in empty values here to be filled out later in
# process_source_and_target_zones # process_source_and_target_zones
lenient=True, lenient=True,
@ -139,9 +141,9 @@ class MetaProcessor(BaseProcessor):
# always something so we can see if its type and name # always something so we can see if its type and name
record = change.record record = change.record
# existing state, if there is one # existing state, if there is one
existing = getattr(change, 'existing', None)
existing = getattr(change, "existing", None)
return ( return (
record._type == 'TXT'
record._type == "TXT"
and record.name == self.record_name and record.name == self.record_name
and existing is not None and existing is not None
# don't care about the values here, just the fields/keys # don't care about the values here, just the fields/keys


+ 28
- 24
octodns/processor/restrict.py View File

@ -10,7 +10,7 @@ class RestrictionException(ProcessorException):
class TtlRestrictionFilter(BaseProcessor): class TtlRestrictionFilter(BaseProcessor):
'''
"""
Ensure that configured TTLs are between a configured minimum and maximum or Ensure that configured TTLs are between a configured minimum and maximum or
in an allowed set of values. in an allowed set of values.
@ -20,34 +20,38 @@ class TtlRestrictionFilter(BaseProcessor):
Example usage: Example usage:
processors:
min-max-ttl:
class: octodns.processor.restrict.TtlRestrictionFilter
min_ttl: 60
max_ttl: 3600
# allowed_ttls: [300, 900, 3600]
zones:
exxampled.com.:
sources:
- config
.. code-block:: yaml
processors: processors:
- min-max-ttl
targets:
- azure
min-max-ttl:
class: octodns.processor.restrict.TtlRestrictionFilter
min_ttl: 60
max_ttl: 3600
# allowed_ttls: [300, 900, 3600]
zones:
exxampled.com.:
sources:
- config
processors:
- min-max-ttl
targets:
- azure
The restriction can be skipped for specific records by setting the lenient The restriction can be skipped for specific records by setting the lenient
flag, e.g. flag, e.g.
a:
octodns:
lenient: true
ttl: 0
value: 1.2.3.4
.. code-block:: yaml
a:
octodns:
lenient: true
ttl: 0
value: 1.2.3.4
The higher level lenient flags are not checked as it would make more sense The higher level lenient flags are not checked as it would make more sense
to just avoid enabling the processor in those cases. to just avoid enabling the processor in those cases.
'''
"""
SEVEN_DAYS = 60 * 60 * 24 * 7 SEVEN_DAYS = 60 * 60 * 24 * 7
@ -63,14 +67,14 @@ class TtlRestrictionFilter(BaseProcessor):
continue continue
if self.allowed_ttls and record.ttl not in self.allowed_ttls: if self.allowed_ttls and record.ttl not in self.allowed_ttls:
raise RestrictionException( raise RestrictionException(
f'{record.fqdn} ttl={record.ttl} not an allowed value, allowed_ttls={self.allowed_ttls}'
f"{record.fqdn} ttl={record.ttl} not an allowed value, allowed_ttls={self.allowed_ttls}"
) )
elif record.ttl < self.min_ttl: elif record.ttl < self.min_ttl:
raise RestrictionException( raise RestrictionException(
f'{record.fqdn} ttl={record.ttl} too low, min_ttl={self.min_ttl}'
f"{record.fqdn} ttl={record.ttl} too low, min_ttl={self.min_ttl}"
) )
elif record.ttl > self.max_ttl: elif record.ttl > self.max_ttl:
raise RestrictionException( raise RestrictionException(
f'{record.fqdn} ttl={record.ttl} too high, max_ttl={self.max_ttl}'
f"{record.fqdn} ttl={record.ttl} too high, max_ttl={self.max_ttl}"
) )
return zone return zone

+ 35
- 33
octodns/processor/spf.py View File

@ -22,36 +22,38 @@ class SpfDnsLookupException(ProcessorException):
class SpfDnsLookupProcessor(BaseProcessor): class SpfDnsLookupProcessor(BaseProcessor):
'''
"""
Validate that SPF values in TXT records are valid. Validate that SPF values in TXT records are valid.
Example usage: Example usage:
processors:
spf:
class: octodns.processor.spf.SpfDnsLookupProcessor
.. code-block:: yaml
zones:
example.com.:
sources:
- config
processors: processors:
- spf
targets:
- route53
The validation can be skipped for specific records by setting the lenient
flag, e.g.
_spf:
octodns:
lenient: true
ttl: 86400
type: TXT
value: v=spf1 ptr ~all
'''
log = getLogger('SpfDnsLookupProcessor')
spf:
class: octodns.processor.spf.SpfDnsLookupProcessor
zones:
example.com.:
sources:
- config
processors:
- spf
targets:
- route53
The validation can be skipped for specific records by setting the lenient
flag, e.g.
_spf:
octodns:
lenient: true
ttl: 86400
type: TXT
value: v=spf1 ptr ~all
"""
log = getLogger("SpfDnsLookupProcessor")
def __init__(self, name): def __init__(self, name):
self.log.debug(f"SpfDnsLookupProcessor: {name}") self.log.debug(f"SpfDnsLookupProcessor: {name}")
@ -65,7 +67,7 @@ class SpfDnsLookupProcessor(BaseProcessor):
) )
# SPF values to validate will begin with 'v=spf1 ' # SPF values to validate will begin with 'v=spf1 '
spf = [value for value in values if value.startswith('v=spf1 ')]
spf = [value for value in values if value.startswith("v=spf1 ")]
# No SPF values in the TXT record # No SPF values in the TXT record
if len(spf) == 0: if len(spf) == 0:
@ -84,7 +86,7 @@ class SpfDnsLookupProcessor(BaseProcessor):
for value in answer: for value in answer:
text_value = value.to_text() text_value = value.to_text()
processed_value = text_value[1:-1].replace('" "', '')
processed_value = text_value[1:-1].replace('" "', "")
values.append(processed_value) values.append(processed_value)
return values return values
@ -101,7 +103,7 @@ class SpfDnsLookupProcessor(BaseProcessor):
if spf is None: if spf is None:
return lookups return lookups
terms = spf[len('v=spf1 ') :].split(' ')
terms = spf[len("v=spf1 ") :].split(" ")
for term in terms: for term in terms:
if lookups > 10: if lookups > 10:
@ -109,19 +111,19 @@ class SpfDnsLookupProcessor(BaseProcessor):
f"{record.fqdn} exceeds the 10 DNS lookup limit in the SPF record" f"{record.fqdn} exceeds the 10 DNS lookup limit in the SPF record"
) )
if term.startswith('ptr'):
if term.startswith("ptr"):
raise SpfValueException( raise SpfValueException(
f"{record.fqdn} uses the deprecated ptr mechanism" f"{record.fqdn} uses the deprecated ptr mechanism"
) )
# These mechanisms cost one DNS lookup each # These mechanisms cost one DNS lookup each
if term.startswith(('a', 'mx', 'exists:', 'redirect', 'include:')):
if term.startswith(("a", "mx", "exists:", "redirect", "include:")):
lookups += 1 lookups += 1
# The include mechanism can result in further lookups after resolving the DNS record # The include mechanism can result in further lookups after resolving the DNS record
if term.startswith('include:'):
domain = term[len('include:') :]
answer = dns.resolver.resolve(domain, 'TXT')
if term.startswith("include:"):
domain = term[len("include:") :]
answer = dns.resolver.resolve(domain, "TXT")
answer_values = self._process_answer(answer) answer_values = self._process_answer(answer)
lookups = self._check_dns_lookups( lookups = self._check_dns_lookups(
record, answer_values, lookups record, answer_values, lookups
@ -131,7 +133,7 @@ class SpfDnsLookupProcessor(BaseProcessor):
def process_source_zone(self, zone, *args, **kwargs): def process_source_zone(self, zone, *args, **kwargs):
for record in zone.records: for record in zone.records:
if record._type != 'TXT':
if record._type != "TXT":
continue continue
if record.lenient: if record.lenient:


+ 176
- 169
octodns/provider/yaml.py View File

@ -15,69 +15,70 @@ from .base import BaseProvider
class YamlProvider(BaseProvider): class YamlProvider(BaseProvider):
'''
"""
Core provider for records configured in yaml files on disk. Core provider for records configured in yaml files on disk.
config:
class: octodns.provider.yaml.YamlProvider
# The location of yaml config files. By default records are defined in a
# file named for the zone in this directory, the zone file, e.g.
# something.com.yaml.
# (required)
directory: ./config
# The ttl to use for records when not specified in the data
# (optional, default 3600)
default_ttl: 3600
# Whether or not to enforce sorting order when loading yaml
# (optional, default True)
enforce_order: true
# What sort mode to employ when enforcing order
# - simple: `sort`
# - natural: https://pypi.org/project/natsort/
# (optional, default natural)
order_mode: natural
# Whether duplicate records should replace rather than error
# (optional, default False)
populate_should_replace: false
# The file extension used when loading split style zones, Null means
# disabled. When enabled the provider will search for zone records split
# across multiple YAML files in the directory with split_extension
# appended to the zone name, See "Split Details" below.
# split_extension should include the "."
# (optional, default null, "." is the recommended best practice when
# enabling)
split_extension: null
# When writing YAML records out to disk with split_extension enabled
# each record is written out into its own file with .yaml appended to
# the name of the record. The two exceptions are for the root and
# wildcard nodes. These records are written into a file named
# `$[zone.name].yaml`. If you would prefer this catchall file not be
# used `split_catchall` can be set to False to instead write those
# records out to `.yaml` and `*.yaml` respectively. Note that some
# operating systems may not allow files with those names.
# (optional, default True)
split_catchall: true
# Optional filename with record data to be included in all zones
# populated by this provider. Has no effect when used as a target.
# (optional, default null)
shared_filename: null
# Disable loading of the zone .yaml files.
# (optional, default False)
disable_zonefile: false
Note
----
When using this provider as a target any existing comments or formatting
in the zone files will be lost when changes are applyed.
.. code-block:: yaml
config:
class: octodns.provider.yaml.YamlProvider
# The location of yaml config files. By default records are defined in a
# file named for the zone in this directory, the zone file, e.g.
# something.com.yaml.
# (required)
directory: ./config
# The ttl to use for records when not specified in the data
# (optional, default 3600)
default_ttl: 3600
# Whether or not to enforce sorting order when loading yaml
# (optional, default True)
enforce_order: true
# What sort mode to employ when enforcing order
# - simple: `sort`
# - natural: https://pypi.org/project/natsort/
# (optional, default natural)
order_mode: natural
# Whether duplicate records should replace rather than error
# (optional, default False)
populate_should_replace: false
# The file extension used when loading split style zones, Null means
# disabled. When enabled the provider will search for zone records split
# across multiple YAML files in the directory with split_extension
# appended to the zone name, See "Split Details" below.
# split_extension should include the "."
# (optional, default null, "." is the recommended best practice when
# enabling)
split_extension: null
# When writing YAML records out to disk with split_extension enabled
# each record is written out into its own file with .yaml appended to
# the name of the record. The two exceptions are for the root and
# wildcard nodes. These records are written into a file named
# `$[zone.name].yaml`. If you would prefer this catchall file not be
# used `split_catchall` can be set to False to instead write those
# records out to `.yaml` and `*.yaml` respectively. Note that some
# operating systems may not allow files with those names.
# (optional, default True)
split_catchall: true
# Optional filename with record data to be included in all zones
# populated by this provider. Has no effect when used as a target.
# (optional, default null)
shared_filename: null
# Disable loading of the zone .yaml files.
# (optional, default False)
disable_zonefile: false
.. warning::
When using this provider as a target any existing comments or formatting
in the zone files will be lost when changes are applyed.
Split Details Split Details
------------- -------------
@ -91,11 +92,12 @@ class YamlProvider(BaseProvider):
With `split_extension: .` the directory structure for the zone github.com. With `split_extension: .` the directory structure for the zone github.com.
managed under directory "zones/" would look like: managed under directory "zones/" would look like:
zones/
github.com./
$github.com.yaml
www.yaml
...
.. code-block:: yaml
zones/
github.com./
$github.com.yaml
www.yaml
...
Overriding Values Overriding Values
----------------- -----------------
@ -106,68 +108,70 @@ class YamlProvider(BaseProvider):
to external DNS providers and internally, but you want to modify some of to external DNS providers and internally, but you want to modify some of
the records in the internal version. the records in the internal version.
config/octodns.com.yaml
---
other:
type: A
values:
- 192.30.252.115
- 192.30.252.116
www:
type: A
values:
- 192.30.252.113
- 192.30.252.114
internal/octodns.com.yaml
---
'www':
type: A
values:
- 10.0.0.12
- 10.0.0.13
external.yaml
---
providers:
config:
class: octodns.provider.yaml.YamlProvider
directory: ./config
zones:
octodns.com.:
sources:
- config
targets:
- route53
internal.yaml
---
providers:
config:
class: octodns.provider.yaml.YamlProvider
directory: ./config
internal:
class: octodns.provider.yaml.YamlProvider
directory: ./internal
populate_should_replace: true
zones:
octodns.com.:
sources:
- config
- internal
targets:
- pdns
`config/octodns.com.yaml`
.. code-block:: yaml
---
other:
type: A
values:
- 192.30.252.115
- 192.30.252.116
www:
type: A
values:
- 192.30.252.113
- 192.30.252.114
`internal/octodns.com.yaml`
.. code-block:: yaml
---
'www':
type: A
values:
- 10.0.0.12
- 10.0.0.13
external.yaml
---
providers:
config:
class: octodns.provider.yaml.YamlProvider
directory: ./config
zones:
octodns.com.:
sources:
- config
targets:
- route53
internal.yaml
---
providers:
config:
class: octodns.provider.yaml.YamlProvider
directory: ./config
internal:
class: octodns.provider.yaml.YamlProvider
directory: ./internal
populate_should_replace: true
zones:
octodns.com.:
sources:
- config
- internal
targets:
- pdns
You can then sync our records eternally with `--config-file=external.yaml` You can then sync our records eternally with `--config-file=external.yaml`
and internally (with the custom overrides) with and internally (with the custom overrides) with
`--config-file=internal.yaml` `--config-file=internal.yaml`
'''
"""
SUPPORTS_GEO = True SUPPORTS_GEO = True
SUPPORTS_DYNAMIC = True SUPPORTS_DYNAMIC = True
@ -177,7 +181,7 @@ class YamlProvider(BaseProvider):
# Any record name added to this set will be included in the catch-all file, # Any record name added to this set will be included in the catch-all file,
# instead of a file matching the record name. # instead of a file matching the record name.
CATCHALL_RECORD_NAMES = ('*', '')
CATCHALL_RECORD_NAMES = ("*", "")
def __init__( def __init__(
self, self,
@ -185,7 +189,7 @@ class YamlProvider(BaseProvider):
directory, directory,
default_ttl=3600, default_ttl=3600,
enforce_order=True, enforce_order=True,
order_mode='natural',
order_mode="natural",
populate_should_replace=False, populate_should_replace=False,
supports_root_ns=True, supports_root_ns=True,
split_extension=False, split_extension=False,
@ -196,9 +200,9 @@ class YamlProvider(BaseProvider):
**kwargs, **kwargs,
): ):
klass = self.__class__.__name__ klass = self.__class__.__name__
self.log = logging.getLogger(f'{klass}[{id}]')
self.log = logging.getLogger(f"{klass}[{id}]")
self.log.debug( self.log.debug(
'__init__: id=%s, directory=%s, default_ttl=%d, enforce_order=%d, order_mode=%s, populate_should_replace=%s, supports_root_ns=%s, split_extension=%s, split_catchall=%s, shared_filename=%s, disable_zonefile=%s',
"__init__: id=%s, directory=%s, default_ttl=%d, enforce_order=%d, order_mode=%s, populate_should_replace=%s, supports_root_ns=%s, split_extension=%s, split_catchall=%s, shared_filename=%s, disable_zonefile=%s",
id, id,
directory, directory,
default_ttl, default_ttl,
@ -225,8 +229,8 @@ class YamlProvider(BaseProvider):
def copy(self): def copy(self):
kwargs = dict(self.__dict__) kwargs = dict(self.__dict__)
kwargs['id'] = f'{kwargs["id"]}-copy'
del kwargs['log']
kwargs["id"] = f"{kwargs['id']}-copy"
del kwargs["log"]
return YamlProvider(**kwargs) return YamlProvider(**kwargs)
@property @property
@ -250,7 +254,7 @@ class YamlProvider(BaseProvider):
return self.supports_root_ns return self.supports_root_ns
def list_zones(self): def list_zones(self):
self.log.debug('list_zones:')
self.log.debug("list_zones:")
zones = set() zones = set()
extension = self.split_extension extension = self.split_extension
@ -258,7 +262,7 @@ class YamlProvider(BaseProvider):
# we want to leave the . # we want to leave the .
trim = len(extension) - 1 trim = len(extension) - 1
self.log.debug( self.log.debug(
'list_zones: looking for split zones, trim=%d', trim
"list_zones: looking for split zones, trim=%d", trim
) )
for dirname in listdir(self.directory): for dirname in listdir(self.directory):
not_ends_with = not dirname.endswith(extension) not_ends_with = not dirname.endswith(extension)
@ -270,10 +274,10 @@ class YamlProvider(BaseProvider):
zones.add(dirname) zones.add(dirname)
if not self.disable_zonefile: if not self.disable_zonefile:
self.log.debug('list_zones: looking for zone files')
self.log.debug("list_zones: looking for zone files")
for filename in listdir(self.directory): for filename in listdir(self.directory):
not_ends_with = not filename.endswith('.yaml')
too_few_dots = filename.count('.') < 2
not_ends_with = not filename.endswith(".yaml")
too_few_dots = filename.count(".") < 2
not_file = not isfile(join(self.directory, filename)) not_file = not isfile(join(self.directory, filename))
if not_file or not_ends_with or too_few_dots: if not_file or not_ends_with or too_few_dots:
continue continue
@ -284,8 +288,8 @@ class YamlProvider(BaseProvider):
def _split_sources(self, zone): def _split_sources(self, zone):
ext = self.split_extension ext = self.split_extension
utf8 = join(self.directory, f'{zone.decoded_name[:-1]}{ext}')
idna = join(self.directory, f'{zone.name[:-1]}{ext}')
utf8 = join(self.directory, f"{zone.decoded_name[:-1]}{ext}")
idna = join(self.directory, f"{zone.name[:-1]}{ext}")
directory = None directory = None
if isdir(utf8): if isdir(utf8):
if utf8 != idna and isdir(idna): if utf8 != idna and isdir(idna):
@ -299,12 +303,12 @@ class YamlProvider(BaseProvider):
return [] return []
for filename in listdir(directory): for filename in listdir(directory):
if filename.endswith('.yaml'):
if filename.endswith(".yaml"):
yield join(directory, filename) yield join(directory, filename)
def _zone_sources(self, zone): def _zone_sources(self, zone):
utf8 = join(self.directory, f'{zone.decoded_name}yaml')
idna = join(self.directory, f'{zone.name}yaml')
utf8 = join(self.directory, f"{zone.decoded_name}yaml")
idna = join(self.directory, f"{zone.name}yaml")
if isfile(utf8): if isfile(utf8):
if utf8 != idna and isfile(idna): if utf8 != idna and isfile(idna):
raise ProviderException( raise ProviderException(
@ -317,7 +321,7 @@ class YamlProvider(BaseProvider):
return None return None
def _populate_from_file(self, filename, zone, lenient): def _populate_from_file(self, filename, zone, lenient):
with open(filename, 'r') as fh:
with open(filename, "r") as fh:
yaml_data = safe_load( yaml_data = safe_load(
fh, enforce_order=self.enforce_order, order_mode=self.order_mode fh, enforce_order=self.enforce_order, order_mode=self.order_mode
) )
@ -326,8 +330,8 @@ class YamlProvider(BaseProvider):
if not isinstance(data, list): if not isinstance(data, list):
data = [data] data = [data]
for d in data: for d in data:
if 'ttl' not in d:
d['ttl'] = self.default_ttl
if "ttl" not in d:
d["ttl"] = self.default_ttl
record = Record.new( record = Record.new(
zone, name, d, source=self, lenient=lenient zone, name, d, source=self, lenient=lenient
) )
@ -342,7 +346,7 @@ class YamlProvider(BaseProvider):
def populate(self, zone, target=False, lenient=False): def populate(self, zone, target=False, lenient=False):
self.log.debug( self.log.debug(
'populate: name=%s, target=%s, lenient=%s',
"populate: name=%s, target=%s, lenient=%s",
zone.decoded_name, zone.decoded_name,
target, target,
lenient, lenient,
@ -365,7 +369,7 @@ class YamlProvider(BaseProvider):
sources.append(join(self.directory, self.shared_filename)) sources.append(join(self.directory, self.shared_filename))
if not sources and not target: if not sources and not target:
raise ProviderException(f'no YAMLs found for {zone.decoded_name}')
raise ProviderException(f"no YAMLs found for {zone.decoded_name}")
# deterministically order our sources # deterministically order our sources
sources.sort() sources.sort()
@ -375,7 +379,7 @@ class YamlProvider(BaseProvider):
exists = len(sources) > 0 exists = len(sources) > 0
self.log.info( self.log.info(
'populate: found %s records, exists=%s',
"populate: found %s records, exists=%s",
len(zone.records) - before, len(zone.records) - before,
exists, exists,
) )
@ -386,7 +390,7 @@ class YamlProvider(BaseProvider):
copy = plan.existing.copy() copy = plan.existing.copy()
changes = plan.changes changes = plan.changes
self.log.debug( self.log.debug(
'_apply: zone=%s, len(changes)=%d', copy.decoded_name, len(changes)
"_apply: zone=%s, len(changes)=%d", copy.decoded_name, len(changes)
) )
# apply our pending changes to that copy # apply our pending changes to that copy
@ -398,10 +402,10 @@ class YamlProvider(BaseProvider):
data = defaultdict(list) data = defaultdict(list)
for record in records: for record in records:
d = record.data d = record.data
d['type'] = record._type
d["type"] = record._type
if record.ttl == self.default_ttl: if record.ttl == self.default_ttl:
# ttl is the default, we don't need to store it # ttl is the default, we don't need to store it
del d['ttl']
del d["ttl"]
# we want to output the utf-8 version of the name # we want to output the utf-8 version of the name
data[record.decoded_name].append(d) data[record.decoded_name].append(d)
@ -411,18 +415,18 @@ class YamlProvider(BaseProvider):
data[k] = data[k][0] data[k] = data[k][0]
if not isdir(self.directory): if not isdir(self.directory):
self.log.debug('_apply: creating directory=%s', self.directory)
self.log.debug("_apply: creating directory=%s", self.directory)
makedirs(self.directory) makedirs(self.directory)
if self.split_extension: if self.split_extension:
# we're going to do split files # we're going to do split files
decoded_name = copy.decoded_name[:-1] decoded_name = copy.decoded_name[:-1]
directory = join( directory = join(
self.directory, f'{decoded_name}{self.split_extension}'
self.directory, f"{decoded_name}{self.split_extension}"
) )
if not isdir(directory): if not isdir(directory):
self.log.debug('_apply: creating split directory=%s', directory)
self.log.debug("_apply: creating split directory=%s", directory)
makedirs(directory) makedirs(directory)
catchall = {} catchall = {}
@ -430,27 +434,27 @@ class YamlProvider(BaseProvider):
if self.split_catchall and record in self.CATCHALL_RECORD_NAMES: if self.split_catchall and record in self.CATCHALL_RECORD_NAMES:
catchall[record] = config catchall[record] = config
continue continue
filename = join(directory, f'{record}.yaml')
self.log.debug('_apply: writing filename=%s', filename)
filename = join(directory, f"{record}.yaml")
self.log.debug("_apply: writing filename=%s", filename)
with open(filename, 'w') as fh:
with open(filename, "w") as fh:
record_data = {record: config} record_data = {record: config}
safe_dump(record_data, fh, order_mode=self.order_mode) safe_dump(record_data, fh, order_mode=self.order_mode)
if catchall: if catchall:
# Scrub the trailing . to make filenames more sane. # Scrub the trailing . to make filenames more sane.
filename = join(directory, f'${decoded_name}.yaml')
filename = join(directory, f"${decoded_name}.yaml")
self.log.debug( self.log.debug(
'_apply: writing catchall filename=%s', filename
"_apply: writing catchall filename=%s", filename
) )
with open(filename, 'w') as fh:
with open(filename, "w") as fh:
safe_dump(catchall, fh, order_mode=self.order_mode) safe_dump(catchall, fh, order_mode=self.order_mode)
else: else:
# single large file # single large file
filename = join(self.directory, f'{copy.decoded_name}yaml')
self.log.debug('_apply: writing filename=%s', filename)
with open(filename, 'w') as fh:
filename = join(self.directory, f"{copy.decoded_name}yaml")
self.log.debug("_apply: writing filename=%s", filename)
with open(filename, "w") as fh:
safe_dump( safe_dump(
dict(data), dict(data),
fh, fh,
@ -460,12 +464,15 @@ class YamlProvider(BaseProvider):
class SplitYamlProvider(YamlProvider): class SplitYamlProvider(YamlProvider):
'''
DEPRECATED: Use YamlProvider with the split_extension parameter instead.
"""
.. deprecated::
DEPRECATED: Use YamlProvider with the split_extension parameter instead.
When migrating the following configuration options would result in the same When migrating the following configuration options would result in the same
behavior as SplitYamlProvider behavior as SplitYamlProvider
.. code-block:: yaml
config: config:
class: octodns.provider.yaml.YamlProvider class: octodns.provider.yaml.YamlProvider
# extension is configured as split_extension # extension is configured as split_extension
@ -474,18 +481,18 @@ class SplitYamlProvider(YamlProvider):
disable_zonefile: true disable_zonefile: true
TO BE REMOVED: 2.0 TO BE REMOVED: 2.0
'''
"""
def __init__(self, id, directory, *args, extension='.', **kwargs):
def __init__(self, id, directory, *args, extension=".", **kwargs):
kwargs.update( kwargs.update(
{ {
'split_extension': extension,
'split_catchall': True,
'disable_zonefile': True,
"split_extension": extension,
"split_catchall": True,
"disable_zonefile": True,
} }
) )
super().__init__(id, directory, *args, **kwargs) super().__init__(id, directory, *args, **kwargs)
deprecated( deprecated(
'SplitYamlProvider is DEPRECATED, use YamlProvider with split_extension, split_catchall, and disable_zonefile instead, will go away in v2.0',
"SplitYamlProvider is DEPRECATED, use YamlProvider with split_extension, split_catchall, and disable_zonefile instead, will go away in v2.0",
stacklevel=99, stacklevel=99,
) )

+ 13
- 13
octodns/source/base.py View File

@ -11,17 +11,17 @@ class BaseSource(object):
def __init__(self, id): def __init__(self, id):
self.id = id self.id = id
if not getattr(self, 'log', False):
if not getattr(self, "log", False):
raise NotImplementedError( raise NotImplementedError(
'Abstract base class, log property missing'
"Abstract base class, log property missing"
) )
if not hasattr(self, 'SUPPORTS_GEO'):
if not hasattr(self, "SUPPORTS_GEO"):
raise NotImplementedError( raise NotImplementedError(
'Abstract base class, SUPPORTS_GEO property missing'
"Abstract base class, SUPPORTS_GEO property missing"
) )
if not hasattr(self, 'SUPPORTS'):
if not hasattr(self, "SUPPORTS"):
raise NotImplementedError( raise NotImplementedError(
'Abstract base class, SUPPORTS property missing'
"Abstract base class, SUPPORTS property missing"
) )
@property @property
@ -29,22 +29,22 @@ class BaseSource(object):
return False return False
def populate(self, zone, target=False, lenient=False): def populate(self, zone, target=False, lenient=False):
'''
"""
Loads all records the provider knows about for the provided zone Loads all records the provider knows about for the provided zone
When `target` is True the populate call is being made to load the
When `target` is `True` the populate call is being made to load the
current state of the provider. current state of the provider.
When `lenient` is True the populate call may skip record validation and
When `lenient` is `True` the populate call may skip record validation and
do a "best effort" load of data. That will allow through some common, do a "best effort" load of data. That will allow through some common,
but not best practices stuff that we otherwise would reject. E.g. no but not best practices stuff that we otherwise would reject. E.g. no
trailing . or missing escapes for ;. trailing . or missing escapes for ;.
When target is True (loading current state) this method should return
True if the zone exists or False if it does not.
'''
When target is `True` (loading current state) this method should return
`True` if the zone exists or False if it does not.
"""
raise NotImplementedError( raise NotImplementedError(
'Abstract base class, populate method missing'
"Abstract base class, populate method missing"
) )
def supports(self, record): def supports(self, record):


+ 32
- 28
octodns/source/envvar.py View File

@ -11,11 +11,11 @@ class EnvVarSourceException(Exception):
class EnvironmentVariableNotFoundException(EnvVarSourceException): class EnvironmentVariableNotFoundException(EnvVarSourceException):
def __init__(self, data): def __init__(self, data):
super().__init__(f'Unknown environment variable {data}')
super().__init__(f"Unknown environment variable {data}")
class EnvVarSource(BaseSource): class EnvVarSource(BaseSource):
'''
"""
This source allows for environment variables to be embedded at octodns This source allows for environment variables to be embedded at octodns
execution time into zones. Intended to capture artifacts of deployment to execution time into zones. Intended to capture artifacts of deployment to
facilitate operational objectives. facilitate operational objectives.
@ -31,41 +31,45 @@ class EnvVarSource(BaseSource):
- Capturing identifying information about the deployment process to - Capturing identifying information about the deployment process to
record where and when the zone was updated. record where and when the zone was updated.
version:
class: octodns.source.envvar.EnvVarSource
# The environment variable in question, in this example the username
# currently executing octodns
variable: USER
# The TXT record name to embed the value found at the above
# environment variable
name: deployuser
# The TTL of the TXT record (optional, default 60)
ttl: 3600
.. code-block:: yaml
version:
class: octodns.source.envvar.EnvVarSource
# The environment variable in question, in this example the username
# currently executing octodns
variable: USER
# The TXT record name to embed the value found at the above
# environment variable
name: deployuser
# The TTL of the TXT record (optional, default 60)
ttl: 3600
This source is then combined with other sources in the octodns config This source is then combined with other sources in the octodns config
file: file:
zones:
netflix.com.:
sources:
- yaml
- version
targets:
- ultra
- ns1
'''
.. code-block:: yaml
zones:
netflix.com.:
sources:
- yaml
- version
targets:
- ultra
- ns1
"""
SUPPORTS_GEO = False SUPPORTS_GEO = False
SUPPORTS_DYNAMIC = False SUPPORTS_DYNAMIC = False
SUPPORTS = set(('TXT'))
SUPPORTS = set(("TXT"))
DEFAULT_TTL = 60 DEFAULT_TTL = 60
def __init__(self, id, variable, name, ttl=DEFAULT_TTL): def __init__(self, id, variable, name, ttl=DEFAULT_TTL):
klass = self.__class__.__name__ klass = self.__class__.__name__
self.log = logging.getLogger(f'{klass}[{id}]')
self.log = logging.getLogger(f"{klass}[{id}]")
self.log.debug( self.log.debug(
'__init__: id=%s, variable=%s, name=%s, ttl=%d',
"__init__: id=%s, variable=%s, name=%s, ttl=%d",
id, id,
variable, variable,
name, name,
@ -82,7 +86,7 @@ class EnvVarSource(BaseSource):
raise EnvironmentVariableNotFoundException(self.envvar) raise EnvironmentVariableNotFoundException(self.envvar)
self.log.debug( self.log.debug(
'_read_variable: successfully loaded var=%s val=%s',
"_read_variable: successfully loaded var=%s val=%s",
self.envvar, self.envvar,
value, value,
) )
@ -90,7 +94,7 @@ class EnvVarSource(BaseSource):
def populate(self, zone, target=False, lenient=False): def populate(self, zone, target=False, lenient=False):
self.log.debug( self.log.debug(
'populate: name=%s, target=%s, lenient=%s',
"populate: name=%s, target=%s, lenient=%s",
zone.name, zone.name,
target, target,
lenient, lenient,
@ -102,13 +106,13 @@ class EnvVarSource(BaseSource):
# We don't need to worry about conflicting records here because the # We don't need to worry about conflicting records here because the
# manager will deconflict sources on our behalf. # manager will deconflict sources on our behalf.
payload = {'ttl': self.ttl, 'type': 'TXT', 'values': [value]}
payload = {"ttl": self.ttl, "type": "TXT", "values": [value]}
record = Record.new( record = Record.new(
zone, self.name, payload, source=self, lenient=lenient zone, self.name, payload, source=self, lenient=lenient
) )
zone.add_record(record, lenient=lenient) zone.add_record(record, lenient=lenient)
self.log.info( self.log.info(
'populate: found %s records, exists=False',
"populate: found %s records, exists=False",
len(zone.records) - before, len(zone.records) - before,
) )

+ 91
- 89
octodns/source/tinydns.py View File

@ -55,7 +55,7 @@ class TinyDnsBaseSource(BaseSource):
# no arpa # no arpa
return [] return []
if not zone.owns('MX', name):
if not zone.owns("MX", name):
# if name doesn't live under our zone there's nothing for us to do # if name doesn't live under our zone there's nothing for us to do
return return
@ -65,11 +65,11 @@ class TinyDnsBaseSource(BaseSource):
for line in lines: for line in lines:
mx = line[2] mx = line[2]
# if there's a . in the mx we hit a special case and use it as-is # if there's a . in the mx we hit a special case and use it as-is
if '.' not in mx:
if "." not in mx:
# otherwise we treat it as the MX hostnam and construct the rest # otherwise we treat it as the MX hostnam and construct the rest
mx = f'{mx}.mx.{zone.name}'
elif mx[-1] != '.':
mx = f'{mx}.'
mx = f"{mx}.mx.{zone.name}"
elif mx[-1] != ".":
mx = f"{mx}."
# default distance is 0 # default distance is 0
try: try:
@ -79,12 +79,12 @@ class TinyDnsBaseSource(BaseSource):
# if we have an IP then we need to create an A for the MX # if we have an IP then we need to create an A for the MX
ip = line[1] ip = line[1]
if ip and zone.owns('A', mx):
yield 'A', mx, ttl, [ip]
if ip and zone.owns("A", mx):
yield "A", mx, ttl, [ip]
values.append({'preference': dist, 'exchange': mx})
values.append({"preference": dist, "exchange": mx})
yield 'MX', name, ttl, values
yield "MX", name, ttl, values
def _records_for_C(self, zone, name, lines, arpa=False): def _records_for_C(self, zone, name, lines, arpa=False):
# Cfqdn:p:ttl:timestamp:lo # Cfqdn:p:ttl:timestamp:lo
@ -94,17 +94,17 @@ class TinyDnsBaseSource(BaseSource):
# no arpa # no arpa
return [] return []
if not zone.owns('CNAME', name):
if not zone.owns("CNAME", name):
# if name doesn't live under our zone there's nothing for us to do # if name doesn't live under our zone there's nothing for us to do
return return
value = lines[0][1] value = lines[0][1]
if value[-1] != '.':
value = f'{value}.'
if value[-1] != ".":
value = f"{value}."
ttl = self._ttl_for(lines, 2) ttl = self._ttl_for(lines, 2)
yield 'CNAME', name, ttl, [value]
yield "CNAME", name, ttl, [value]
def _records_for_caret(self, zone, name, lines, arpa=False): def _records_for_caret(self, zone, name, lines, arpa=False):
# ^fqdn:p:ttl:timestamp:lo # ^fqdn:p:ttl:timestamp:lo
@ -116,8 +116,8 @@ class TinyDnsBaseSource(BaseSource):
names = defaultdict(list) names = defaultdict(list)
for line in lines: for line in lines:
if line[0].endswith('in-addr.arpa') or line[0].endswith(
'ip6.arpa.'
if line[0].endswith("in-addr.arpa") or line[0].endswith(
"ip6.arpa."
): ):
# it's a straight PTR record, already in in-addr.arpa format, # it's a straight PTR record, already in in-addr.arpa format,
# 2nd item is the name it points to # 2nd item is the name it points to
@ -128,20 +128,20 @@ class TinyDnsBaseSource(BaseSource):
# we're given # we're given
value = line[0] value = line[0]
addr = line[1] addr = line[1]
if '.' not in addr:
addr = u':'.join(textwrap.wrap(line[1], 4))
if "." not in addr:
addr = ":".join(textwrap.wrap(line[1], 4))
addr = ip_address(addr) addr = ip_address(addr)
name = addr.reverse_pointer name = addr.reverse_pointer
if value[-1] != '.':
value = f'{value}.'
if value[-1] != ".":
value = f"{value}."
names[name].append(value) names[name].append(value)
ttl = self._ttl_for(lines, 2) ttl = self._ttl_for(lines, 2)
for name, values in names.items(): for name, values in names.items():
if zone.owns('PTR', name):
yield 'PTR', name, ttl, values
if zone.owns("PTR", name):
yield "PTR", name, ttl, values
def _records_for_equal(self, zone, name, lines, arpa=False): def _records_for_equal(self, zone, name, lines, arpa=False):
# =fqdn:ip:ttl:timestamp:lo # =fqdn:ip:ttl:timestamp:lo
@ -159,7 +159,7 @@ class TinyDnsBaseSource(BaseSource):
# no arpa # no arpa
return [] return []
if not zone.owns('NS', name):
if not zone.owns("NS", name):
# if name doesn't live under our zone there's nothing for us to do # if name doesn't live under our zone there's nothing for us to do
return return
@ -169,20 +169,20 @@ class TinyDnsBaseSource(BaseSource):
for line in lines: for line in lines:
ns = line[2] ns = line[2]
# if there's a . in the ns we hit a special case and use it as-is # if there's a . in the ns we hit a special case and use it as-is
if '.' not in ns:
if "." not in ns:
# otherwise we treat it as the NS hostnam and construct the rest # otherwise we treat it as the NS hostnam and construct the rest
ns = f'{ns}.ns.{zone.name}'
elif ns[-1] != '.':
ns = f'{ns}.'
ns = f"{ns}.ns.{zone.name}"
elif ns[-1] != ".":
ns = f"{ns}."
# if we have an IP then we need to create an A for the MX # if we have an IP then we need to create an A for the MX
ip = line[1] ip = line[1]
if ip and zone.owns('A', ns):
yield 'A', ns, ttl, [ip]
if ip and zone.owns("A", ns):
yield "A", ns, ttl, [ip]
values.append(ns) values.append(ns)
yield 'NS', name, ttl, values
yield "NS", name, ttl, values
_records_for_amp = _records_for_dot _records_for_amp = _records_for_dot
@ -194,12 +194,12 @@ class TinyDnsBaseSource(BaseSource):
# no arpa # no arpa
return [] return []
if not zone.owns('A', name):
if not zone.owns("A", name):
# if name doesn't live under our zone there's nothing for us to do # if name doesn't live under our zone there's nothing for us to do
return return
# collect our ip(s) # collect our ip(s)
ips = [l[1] for l in lines if l[1] != '0.0.0.0']
ips = [l[1] for l in lines if l[1] != "0.0.0.0"]
if not ips: if not ips:
# we didn't find any value ips so nothing to do # we didn't find any value ips so nothing to do
@ -207,7 +207,7 @@ class TinyDnsBaseSource(BaseSource):
ttl = self._ttl_for(lines, 2) ttl = self._ttl_for(lines, 2)
yield 'A', name, ttl, ips
yield "A", name, ttl, ips
def _records_for_quote(self, zone, name, lines, arpa=False): def _records_for_quote(self, zone, name, lines, arpa=False):
# 'fqdn:s:ttl:timestamp:lo # 'fqdn:s:ttl:timestamp:lo
@ -217,19 +217,19 @@ class TinyDnsBaseSource(BaseSource):
# no arpa # no arpa
return [] return []
if not zone.owns('TXT', name):
if not zone.owns("TXT", name):
# if name doesn't live under our zone there's nothing for us to do # if name doesn't live under our zone there's nothing for us to do
return return
# collect our ip(s) # collect our ip(s)
values = [ values = [
l[1].encode('latin1').decode('unicode-escape').replace(";", "\\;")
l[1].encode("latin1").decode("unicode-escape").replace(";", "\\;")
for l in lines for l in lines
] ]
ttl = self._ttl_for(lines, 2) ttl = self._ttl_for(lines, 2)
yield 'TXT', name, ttl, values
yield "TXT", name, ttl, values
def _records_for_three(self, zone, name, lines, arpa=False): def _records_for_three(self, zone, name, lines, arpa=False):
# 3fqdn:ip:ttl:timestamp:lo # 3fqdn:ip:ttl:timestamp:lo
@ -239,7 +239,7 @@ class TinyDnsBaseSource(BaseSource):
# no arpa # no arpa
return [] return []
if not zone.owns('AAAA', name):
if not zone.owns("AAAA", name):
# if name doesn't live under our zone there's nothing for us to do # if name doesn't live under our zone there's nothing for us to do
return return
@ -249,11 +249,11 @@ class TinyDnsBaseSource(BaseSource):
# TinyDNS files have the ipv6 address written in full, but with the # TinyDNS files have the ipv6 address written in full, but with the
# colons removed. This inserts a colon every 4th character to make # colons removed. This inserts a colon every 4th character to make
# the address correct. # the address correct.
ips.append(u':'.join(textwrap.wrap(line[1], 4)))
ips.append(":".join(textwrap.wrap(line[1], 4)))
ttl = self._ttl_for(lines, 2) ttl = self._ttl_for(lines, 2)
yield 'AAAA', name, ttl, ips
yield "AAAA", name, ttl, ips
def _records_for_S(self, zone, name, lines, arpa=False): def _records_for_S(self, zone, name, lines, arpa=False):
# Sfqdn:ip:x:port:priority:weight:ttl:timestamp:lo # Sfqdn:ip:x:port:priority:weight:ttl:timestamp:lo
@ -263,7 +263,7 @@ class TinyDnsBaseSource(BaseSource):
# no arpa # no arpa
return [] return []
if not zone.owns('SRV', name):
if not zone.owns("SRV", name):
# if name doesn't live under our zone there's nothing for us to do # if name doesn't live under our zone there's nothing for us to do
return return
@ -273,17 +273,17 @@ class TinyDnsBaseSource(BaseSource):
for line in lines: for line in lines:
target = line[2] target = line[2]
# if there's a . in the mx we hit a special case and use it as-is # if there's a . in the mx we hit a special case and use it as-is
if '.' not in target:
if "." not in target:
# otherwise we treat it as the MX hostnam and construct the rest # otherwise we treat it as the MX hostnam and construct the rest
target = f'{target}.srv.{zone.name}'
elif target[-1] != '.':
target = f'{target}.'
target = f"{target}.srv.{zone.name}"
elif target[-1] != ".":
target = f"{target}."
# if we have an IP then we need to create an A for the SRV # if we have an IP then we need to create an A for the SRV
# has to be present, but can be empty # has to be present, but can be empty
ip = line[1] ip = line[1]
if ip and zone.owns('A', target):
yield 'A', target, ttl, [ip]
if ip and zone.owns("A", target):
yield "A", target, ttl, [ip]
# required # required
port = int(line[3]) port = int(line[3])
@ -302,14 +302,14 @@ class TinyDnsBaseSource(BaseSource):
values.append( values.append(
{ {
'priority': priority,
'weight': weight,
'port': port,
'target': target,
"priority": priority,
"weight": weight,
"port": port,
"target": target,
} }
) )
yield 'SRV', name, ttl, values
yield "SRV", name, ttl, values
def _records_for_colon(self, zone, name, lines, arpa=False): def _records_for_colon(self, zone, name, lines, arpa=False):
# :fqdn:n:rdata:ttl:timestamp:lo # :fqdn:n:rdata:ttl:timestamp:lo
@ -319,7 +319,7 @@ class TinyDnsBaseSource(BaseSource):
# no arpa # no arpa
return [] return []
if not zone.owns('SRV', name):
if not zone.owns("SRV", name):
# if name doesn't live under our zone there's nothing for us to do # if name doesn't live under our zone there's nothing for us to do
return return
@ -333,7 +333,7 @@ class TinyDnsBaseSource(BaseSource):
_class = classes.get(_type, None) _class = classes.get(_type, None)
if not _class: if not _class:
self.log.info( self.log.info(
'_records_for_colon: unrecognized type %s, %s', _type, line
"_records_for_colon: unrecognized type %s, %s", _type, line
) )
continue continue
@ -351,18 +351,18 @@ class TinyDnsBaseSource(BaseSource):
yield from self._records_for_three(zone, name, lines, arpa) yield from self._records_for_three(zone, name, lines, arpa)
SYMBOL_MAP = { SYMBOL_MAP = {
'=': _records_for_equal, # A
'^': _records_for_caret, # PTR
'.': _records_for_dot, # NS
'C': _records_for_C, # CNAME
'+': _records_for_plus, # A
'@': _records_for_at, # MX
'&': _records_for_amp, # NS
'\'': _records_for_quote, # TXT
'3': _records_for_three, # AAAA
'S': _records_for_S, # SRV
':': _records_for_colon, # arbitrary
'6': _records_for_six, # AAAA
"=": _records_for_equal, # A
"^": _records_for_caret, # PTR
".": _records_for_dot, # NS
"C": _records_for_C, # CNAME
"+": _records_for_plus, # A
"@": _records_for_at, # MX
"&": _records_for_amp, # NS
"'": _records_for_quote, # TXT
"3": _records_for_three, # AAAA
"S": _records_for_S, # SRV
":": _records_for_colon, # arbitrary
"6": _records_for_six, # AAAA
} }
def _process_lines(self, zone, lines): def _process_lines(self, zone, lines):
@ -371,9 +371,9 @@ class TinyDnsBaseSource(BaseSource):
symbol = line[0] symbol = line[0]
# Skip type, remove trailing comments, and omit newline # Skip type, remove trailing comments, and omit newline
line = line[1:].split('#', 1)[0]
line = line[1:].split("#", 1)[0]
# Split on :'s including :: and strip leading/trailing ws # Split on :'s including :: and strip leading/trailing ws
line = [p.strip() for p in line.split(':')]
line = [p.strip() for p in line.split(":")]
data[symbol][line[0]].append(line) data[symbol][line[0]].append(line)
return data return data
@ -386,7 +386,7 @@ class TinyDnsBaseSource(BaseSource):
if not records_for: if not records_for:
# Something we don't care about # Something we don't care about
self.log.info( self.log.info(
'skipping type %s, not supported/interested', symbol
"skipping type %s, not supported/interested", symbol
) )
continue continue
@ -406,7 +406,7 @@ class TinyDnsBaseSource(BaseSource):
def populate(self, zone, target=False, lenient=False): def populate(self, zone, target=False, lenient=False):
self.log.debug( self.log.debug(
'populate: name=%s, target=%s, lenient=%s',
"populate: name=%s, target=%s, lenient=%s",
zone.name, zone.name,
target, target,
lenient, lenient,
@ -425,8 +425,8 @@ class TinyDnsBaseSource(BaseSource):
# then work through those to group values by their _type and name # then work through those to group values by their _type and name
zone_name = zone.name zone_name = zone.name
arpa = zone_name.endswith('in-addr.arpa.') or zone_name.endswith(
'ip6.arpa.'
arpa = zone_name.endswith("in-addr.arpa.") or zone_name.endswith(
"ip6.arpa."
) )
types, ttls = self._process_symbols(zone, symbols, arpa) types, ttls = self._process_symbols(zone, symbols, arpa)
@ -436,32 +436,34 @@ class TinyDnsBaseSource(BaseSource):
for _type, names in types.items(): for _type, names in types.items():
for name, values in names.items(): for name, values in names.items():
data = { data = {
'ttl': ttls[_type].get(name, self.default_ttl),
'type': _type,
"ttl": ttls[_type].get(name, self.default_ttl),
"type": _type,
} }
if len(values) > 1: if len(values) > 1:
data['values'] = _unique(values)
data["values"] = _unique(values)
else: else:
data['value'] = values[0]
data["value"] = values[0]
record = Record.new(zone, name, data, lenient=lenient) record = Record.new(zone, name, data, lenient=lenient)
zone.add_record(record, lenient=lenient) zone.add_record(record, lenient=lenient)
self.log.info( self.log.info(
'populate: found %s records', len(zone.records) - before
"populate: found %s records", len(zone.records) - before
) )
class TinyDnsFileSource(TinyDnsBaseSource): class TinyDnsFileSource(TinyDnsBaseSource):
'''
"""
A basic TinyDNS zonefile importer created to import legacy data. A basic TinyDNS zonefile importer created to import legacy data.
tinydns:
class: octodns.source.tinydns.TinyDnsFileSource
# The location of the TinyDNS zone files
directory: ./zones
# The ttl to use for records when not specified in the data
# (optional, default 3600)
default_ttl: 3600
.. code-block:: yaml
tinydns:
class: octodns.source.tinydns.TinyDnsFileSource
# The location of the TinyDNS zone files
directory: ./zones
# The ttl to use for records when not specified in the data
# (optional, default 3600)
default_ttl: 3600
NOTE: timestamps & lo fields are ignored if present. NOTE: timestamps & lo fields are ignored if present.
@ -469,12 +471,12 @@ class TinyDnsFileSource(TinyDnsBaseSource):
https://cr.yp.to/djbdns/tinydns-data.html and the common patch/extensions to https://cr.yp.to/djbdns/tinydns-data.html and the common patch/extensions to
support IPv6 and a few other record types, support IPv6 and a few other record types,
https://docs.bytemark.co.uk/article/tinydns-format/. https://docs.bytemark.co.uk/article/tinydns-format/.
'''
"""
def __init__(self, id, directory, default_ttl=3600): def __init__(self, id, directory, default_ttl=3600):
self.log = logging.getLogger(f'TinyDnsFileSource[{id}]')
self.log = logging.getLogger(f"TinyDnsFileSource[{id}]")
self.log.debug( self.log.debug(
'__init__: id=%s, directory=%s, default_ttl=%d',
"__init__: id=%s, directory=%s, default_ttl=%d",
id, id,
directory, directory,
default_ttl, default_ttl,
@ -489,11 +491,11 @@ class TinyDnsFileSource(TinyDnsBaseSource):
# be defined anywhere so we'll just read all files # be defined anywhere so we'll just read all files
lines = [] lines = []
for filename in listdir(self.directory): for filename in listdir(self.directory):
if filename[0] == '.':
if filename[0] == ".":
# Ignore hidden files # Ignore hidden files
continue continue
with open(join(self.directory, filename), 'r') as fh:
lines += [l for l in fh.read().split('\n') if l]
with open(join(self.directory, filename), "r") as fh:
lines += [l for l in fh.read().split("\n") if l]
self._cache = lines self._cache = lines


Loading…
Cancel
Save