Browse Source

Merge remote-tracking branch 'origin' into examples

pull/1053/head
Ross McFarland 2 years ago
parent
commit
74755c632e
No known key found for this signature in database GPG Key ID: 943B179E15D3B22A
69 changed files with 2159 additions and 534 deletions
  1. +4
    -0
      .ci-config.json
  2. +25
    -7
      .github/workflows/main.yml
  3. +22
    -13
      .github/workflows/modules.yml
  4. +1
    -1
      .github/workflows/stale.yml
  5. +76
    -1
      CHANGELOG.md
  6. +3
    -0
      LICENSE
  7. +20
    -0
      README.md
  8. +29
    -26
      docs/dynamic_records.md
  9. +2
    -1
      octodns/__init__.py
  10. +2
    -2
      octodns/cmds/args.py
  11. +37
    -21
      octodns/manager.py
  12. +2
    -1
      octodns/processor/base.py
  13. +266
    -55
      octodns/processor/filter.py
  14. +145
    -0
      octodns/processor/meta.py
  15. +1
    -1
      octodns/processor/restrict.py
  16. +4
    -1
      octodns/processor/spf.py
  17. +228
    -143
      octodns/provider/yaml.py
  18. +27
    -5
      octodns/record/base.py
  19. +3
    -1
      octodns/record/caa.py
  20. +5
    -1
      octodns/record/chunked.py
  21. +98
    -55
      octodns/record/ds.py
  22. +10
    -4
      octodns/record/dynamic.py
  23. +7
    -5
      octodns/record/loc.py
  24. +2
    -1
      octodns/record/mx.py
  25. +5
    -1
      octodns/record/naptr.py
  26. +6
    -0
      octodns/record/spf.py
  27. +2
    -1
      octodns/record/srv.py
  28. +2
    -1
      octodns/record/sshfp.py
  29. +2
    -1
      octodns/record/tlsa.py
  30. +12
    -0
      octodns/yaml.py
  31. +17
    -18
      requirements-dev.txt
  32. +1
    -1
      script/changelog
  33. +2
    -1
      script/cibuild
  34. +1
    -0
      script/cibuild-module
  35. +1
    -0
      script/cibuild-setup-py
  36. +11
    -2
      script/release
  37. +0
    -15
      script/sdist
  38. +1
    -16
      setup.py
  39. +11
    -1
      tests/config/dynamic-config.yaml
  40. +4
    -0
      tests/config/hybrid/one.test.yaml
  41. +4
    -0
      tests/config/hybrid/two.test./$two.test.yaml
  42. +4
    -0
      tests/config/hybrid/two.test./split-zone-file.yaml
  43. +5
    -0
      tests/config/include/array.yaml
  44. +3
    -0
      tests/config/include/dict.yaml
  45. +1
    -0
      tests/config/include/empty.yaml
  46. +2
    -0
      tests/config/include/include-doesnt-exist.yaml
  47. +8
    -0
      tests/config/include/main.yaml
  48. +2
    -0
      tests/config/include/nested.yaml
  49. +2
    -0
      tests/config/include/subdir/value.yaml
  50. +1
    -0
      tests/config/simple-arpa.yaml
  51. +4
    -0
      tests/config/split/shared.yaml
  52. +4
    -0
      tests/config/split/unit.tests.yaml
  53. +21
    -5
      tests/test_octodns_manager.py
  54. +201
    -1
      tests/test_octodns_processor_filter.py
  55. +202
    -0
      tests/test_octodns_processor_meta.py
  56. +214
    -38
      tests/test_octodns_provider_yaml.py
  57. +65
    -1
      tests/test_octodns_record.py
  58. +6
    -0
      tests/test_octodns_record_caa.py
  59. +2
    -0
      tests/test_octodns_record_chunked.py
  60. +140
    -85
      tests/test_octodns_record_ds.py
  61. +43
    -0
      tests/test_octodns_record_dynamic.py
  62. +21
    -1
      tests/test_octodns_record_loc.py
  63. +6
    -0
      tests/test_octodns_record_mx.py
  64. +13
    -0
      tests/test_octodns_record_naptr.py
  65. +11
    -0
      tests/test_octodns_record_srv.py
  66. +6
    -0
      tests/test_octodns_record_sshfp.py
  67. +11
    -0
      tests/test_octodns_record_tlsa.py
  68. +36
    -0
      tests/test_octodns_record_txt.py
  69. +24
    -0
      tests/test_octodns_yaml.py

+ 4
- 0
.ci-config.json View File

@ -0,0 +1,4 @@
{
"python_version_current": "3.12",
"python_versions_active": ["3.8", "3.9", "3.10", "3.11", "3.12"]
}

+ 25
- 7
.github/workflows/main.yml View File

@ -2,17 +2,33 @@ name: OctoDNS
on: [pull_request]
jobs:
config:
runs-on: ubuntu-latest
outputs:
json: ${{ steps.load.outputs.json }}
steps:
- uses: actions/checkout@v4
- id: load
# based on https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#multiline-strings
run: |
{
echo 'json<<EOF'
cat ./.ci-config.json
echo EOF
} >> $GITHUB_OUTPUT
ci:
needs: config
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
# Tested versions based on dates in https://devguide.python.org/versions/#versions
python-version: ['3.8', '3.9', '3.10', '3.11']
# Defined in a file that resides in the top level of octodns/octodns,
# based on dates in https://devguide.python.org/versions/#versions
python-version: ${{ fromJson(needs.config.outputs.json).python_versions_active }}
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v4
- name: Setup python
uses: actions/setup-python@v2
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
architecture: x64
@ -35,13 +51,15 @@ jobs:
coverage.xml
htmlcov
setup-py:
needs: config
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v4
- name: Setup python
uses: actions/setup-python@v2
uses: actions/setup-python@v4
with:
python-version: '3.11'
# Most recent release from https://devguide.python.org/versions/#versions
python-version: ${{ fromJson(needs.config.outputs.json).python_version_current }}
architecture: x64
- name: CI setup.py
run: |


+ 22
- 13
.github/workflows/modules.yml View File

@ -5,7 +5,22 @@ on:
types: [submitted]
jobs:
config:
runs-on: ubuntu-latest
outputs:
json: ${{ steps.load.outputs.json }}
steps:
- uses: actions/checkout@v4
- id: load
# based on https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#multiline-strings
run: |
{
echo 'json<<EOF'
cat ./.ci-config.json
echo EOF
} >> $GITHUB_OUTPUT
ci:
needs: config
runs-on: ubuntu-latest
strategy:
fail-fast: false
@ -20,10 +35,11 @@ jobs:
- octodns/octodns-digitalocean
- octodns/octodns-dnsimple
- octodns/octodns-dnsmadeeasy
- octodns/octodns-dyn
- octodns/octodns-easydns
- octodns/octodns-edgecenter
- octodns/octodns-edgedns
- octodns/octodns-etchosts
- octodns/octodns-fastly
- octodns/octodns-gandi
- octodns/octodns-gcore
- octodns/octodns-googlecloud
@ -35,23 +51,16 @@ jobs:
- octodns/octodns-rackspace
- octodns/octodns-route53
- octodns/octodns-selectel
- octodns/octodns-spf
- octodns/octodns-transip
- octodns/octodns-ultra
# has been failing for a while now and afaict not related to octoDNS
# changes commenting out on 2023-07-30, will check on it again in at
# some point in the future and either re-enable or delete it.
#- sukiyaki/octodns-netbox
steps:
- uses: actions/checkout@master
- uses: actions/checkout@v4
- name: Setup python
uses: actions/setup-python@v1
uses: actions/setup-python@v4
with:
# This should generally be the latest stable release of python, but
# dyn and ovh don't currently support changes made in 3.10 so we'll
# leave it 3.9 for now. Once 3.11 lands though we'll bump to it and
# if they haven't updated they'll be removed from the matrix
python-version: '3.9'
python-version: ${{ fromJson(needs.config.outputs.json).python_version_current }}
architecture: x64
- name: Test Module
run: |
./script/test-module ${{ matrix.module }}
./script/cibuild-module ${{ matrix.module }}

+ 1
- 1
.github/workflows/stale.yml View File

@ -6,7 +6,7 @@ jobs:
stale:
runs-on: ubuntu-latest
steps:
- uses: actions/stale@v4
- uses: actions/stale@v8
with:
stale-issue-message: 'This issue is stale because it has been open 90 days with no activity. Remove stale label or comment or this will be closed in 7 days.'
days-before-stale: 90


+ 76
- 1
CHANGELOG.md View File

@ -1,10 +1,82 @@
## v1.1.0 - 2023-??-?? - ???
## v1.?.0 - 2023-??-?? -
* Record.lenient property added similar to other common/standard _octodns data
## v1.3.0 - 2023-11-14 - New and improved processors
#### Noteworthy changes
* Added `octodns.__version__` to replace `octodns.__VERSION__` as the former is
more of a standard, per pep-8. `__VERSION__` is deprecated and will go away
in 2.x
* Fixed issues with handling of chunking large TXT values for providers that use
the in-built `rrs` method
* Removed code that included sha in module version number when installing from
repo as it caused problems with non-binary installs.
#### Stuff
* Added ZoneNameFilter processor to enable ignoring/alerting on type-os like
octodns.com.octodns.com
* NetworkValueAllowlistFilter/NetworkValueRejectlistFilter added to
processors.filter to enable filtering A/AAAA records based on value. Can be
useful if you have records with non-routable values in an internal copy of a
zone, but want to exclude them when pushing the same zone publically (split
horizon)
* ExcludeRootNsChanges processor that will error (or warn) if plan includes a
change to root NS records
* Include the octodns special section info in `Record.__repr__`, makes it easier
to debug things with providers that have special functionality configured
there.
* Most processor.filter processors now support an include_target flag that can
be set to False to leave the target zone data untouched, thus remove any
existing filtered records. Default behavior is unchanged and filtered records
will be completely invisible to octoDNS
## v1.2.1 - 2023-09-29 - Now with fewer stale files
* Update script/release to do clean room dist builds
## v1.2.0 - 2023-09-28 - Bunch more bug fixes
* Record.from_rrs supports `source` parameter
* Record.parse_rdata_text unquotes any quoted (string) values
* Fix crash bug when using the YamlProvider with a directory that contains a
mix of split and non-split zone yamls. See https://github.com/octodns/octodns/issues/1066
* Fix discovery of zones from different sources when there are multiple dynamic
zones. See https://github.com/octodns/octodns/issues/1068
## v1.1.1 - 2023-09-16 - Doh! Fix that one little thing
* Address a bug in the handling of loading auto-arpa manager configuration.
## v1.1.0 - 2023-09-13 - More than enough for a minor release
#### Noteworthy changes
* New dynamic zone config support that allows wildcard entries in the octoDNS
config to be expanded by the source provider(s). See
[Dynamic Zone Config](/README.md#dynamic-zone-config) for more information.
* SplitYamlProvider has been deprecated and will be removed in 2.0. YamlProvider
now includes the ability to process split zones when configured to do so and
allows for more flexibility in how things are laid out than was previously
possible. This includes the ability to split some zones and not others and
even to have partially split zones with some records in the primary zone YAML
and others in a split directory. See YamlProvider documentation for more info.
* YamlProvider now supports a `shared_filename` that can be used to add a set of
common records across all zones using the provider. It can be used stand-alone
or in combination with zone files and/or split configs to aid in DRYing up DNS
* YamlProvider now supports an `!include` directive which enables shared
snippets of config to be reused across many records, e.g. common dynamic rules
across a set of services with service-specific pool values or a unified SFP
value included in TXT records at the root of all zones.
* SpfRecord is formally deprecated with an warning and will become a
ValidationError in 2.x
* SpfDnsLookupProcessor is formally deprcated in favor of the version relocated
into https://github.com/octodns/octodns-spf and will be removed in 2.x
* MetaProcessor added to enable some useful/cool options for debugging/tracking
DNS changes. Specifically timestamps/uuid so you can track whether changes
that have been pushed to providers have propogated/transferred correctly.
#### Stuff
@ -15,6 +87,9 @@
* Add --all option to octodns-validate to enable showing all record validation
errors (as warnings) rather than exiting on the first. Exit code is non-zero
when there are any validation errors.
* New `post_processors` manager configuration parameter to add global processors
that run AFTER zone-specific processors. This should allow more complete
control over when processors are run.
## v1.0.0 - 2023-07-30 - The One


+ 3
- 0
LICENSE View File

@ -1,3 +1,6 @@
MIT License
Copyright (c) 2021 Ross McFarland & the octoDNS Maintainers
Copyright (c) 2017 GitHub, Inc.
Permission is hereby granted, free of charge, to any person


+ 20
- 0
README.md View File

@ -25,6 +25,7 @@ The architecture is pluggable and the tooling is flexible to make it applicable
* [Updating to use extracted providers](#updating-to-use-extracted-providers)
* [Sources](#sources)
* [Notes](#notes)
* [Processors](#processors)
* [Automatic PTR generation](#automatic-ptr-generation)
* [Compatibility and Compliance](#compatibility-and-compliance)
* [`lenient`](#lenient)
@ -293,6 +294,7 @@ The table below lists the providers octoDNS supports. They are maintained in the
| [Rackspace](https://www.rackspace.com/library/what-is-dns) | [octodns_rackspace](https://github.com/octodns/octodns-rackspace/) | |
| [Scaleway](https://www.scaleway.com/en/dns/) | [octodns_scaleway](https://github.com/scaleway/octodns-scaleway) | |
| [Selectel](https://selectel.ru/en/services/additional/dns/) | [octodns_selectel](https://github.com/octodns/octodns-selectel/) | |
| [SPF Value Management](https://github.com/octodns/octodns-spf) | [octodns_spf](https://github.com/octodns/octodns-spf/) | |
| [TransIP](https://www.transip.eu/knowledgebase/entry/155-dns-and-nameservers/) | [octodns_transip](https://github.com/octodns/octodns-transip/) | |
| [UltraDNS](https://vercara.com/authoritative-dns) | [octodns_ultra](https://github.com/octodns/octodns-ultra/) | |
| [YamlProvider](/octodns/provider/yaml.py) | built-in | Supports all record types and core functionality |
@ -322,6 +324,24 @@ Similar to providers, but can only serve to populate records into a zone, cannot
* Dnsimple's uses the configured TTL when serving things through the ALIAS, there's also a secondary TXT record created alongside the ALIAS that octoDNS ignores
* octoDNS itself supports non-ASCII character sets, but in testing Cloudflare is the only provider where that is currently functional end-to-end. Others have failures either in the client libraries or API calls
## Processors
| Processor | Description |
|--|--|
| [AcmeMangingProcessor](/octodns/processor/acme.py) | Useful when processes external to octoDNS are managing acme challenge DNS records, e.g. LetsEncrypt |
| [AutoArpa](/octodns/processor/arpa.py) | See [Automatic PTR generation](#automatic-ptr-generation) below |
| [ExcludeRootNsChanges](/octodns/processor/filter.py) | Filter that errors or warns on planned root/APEX NS records changes. |
| [IgnoreRootNsFilter](/octodns/processor/filter.py) | Filter that INGORES root/APEX NS records and prevents octoDNS from trying to manage them (where supported.) |
| [MetaProcessor](/octodns/processor/meta.py) | Adds a special meta record with timing, UUID, providers, and/or version to aid in debugging and monitoring. |
| [NameAllowlistFilter](/octodns/processor/filter.py) | Filter that ONLY manages records that match specified naming patterns, all others will be ignored |
| [NameRejectlistFilter](/octodns/processor/filter.py) | Filter that INGORES records that match specified naming patterns, all others will be managed |
| [OwnershipProcessor](/octodns/processor/ownership.py) | Processor that implements ownership in octoDNS so that it can manage only the records in a zone in sources and will ignore all others. |
| [SpfDnsLookupProcessor](/octodns/processor/spf.py) | Processor that checks SPF values for violations of DNS query limits |
| [TtlRestrictionFilter](/octodns/processor/restrict.py) | Processor that restricts the allow TTL values to a specified range or list of specific values |
| [TypeAllowlistFilter](/octodns/processor/filter.py) | Filter that ONLY manages records of specified types, all others will be ignored |
| [TypeRejectlistFilter](/octodns/processor/filter.py) | Filter that INGORES records of specified types, all others will be managed |
| [octodns-spf](https://github.com/octodns/octodns-spf) | SPF Value Management for octoDNS |
## Automatic PTR generation
octoDNS supports automatically generating PTR records from the `A`/`AAAA` records it manages. For more information see the [auto-arpa documentation](/docs/auto_arpa.md).


+ 29
- 26
docs/dynamic_records.md View File

@ -30,7 +30,8 @@ test:
- value: 4.4.4.4
weight: 3
na:
# Implicit fallback to the default pool (below)
# Implicitly goes to the backup pool (below) if all values are failing
# health checks
values:
- value: 5.5.5.5
- value: 6.6.6.6
@ -59,11 +60,13 @@ test:
- pool: na
ttl: 60
type: A
# These values become a non-healthchecked default pool, generally it should be
# a superset of the catch-all pool and include enough capacity to try and
# serve all global requests (with degraded performance.) The main case they
# will come into play is if all dynamic healthchecks are failing, either on
# the service side or if the providers systems are expeiencing problems.
# These values become a non-healthchecked backup/default pool, generally it
# should be a superset of the catch-all pool and include enough capacity to
# try and serve all global requests (with degraded performance.) The main
# case they will come into play is if all dynamic healthchecks are failing,
# either on the service side or if the providers systems are expeiencing
# problems. They will also be used for when the record is pushed to a
# provider that doesn't support dynamic records.
values:
- 3.3.3.3
- 4.4.4.4
@ -81,26 +84,26 @@ If you encounter validation errors in dynamic records suggesting best practices
title: Visual Representation of the Rules and Pools
---
flowchart LR
query((Query)) --> rule_0[Rule 0<br>AF-ZA<br>AS<br>OC]
rule_0 --no match--> rule_1[Rule 1<br>AF<br>EU]
rule_1 --no match--> rule_2["Rule 2<br>(catch all)"]
rule_0 --match--> pool_apac[Pool apac<br>1.1.1.1<br>2.2.2.2]
pool_apac --fallback--> pool_na
rule_1 --match--> pool_eu["Pool eu<br>3.3.3.3 (2/5)<br>4.4.4.4 (3/5)"]
pool_eu --fallback--> pool_na
rule_2 --> pool_na[Pool na<br>5.5.5.5<br>6.6.6.6<br>7.7.7.7]
pool_na --fallback--> values[values<br>3.3.3.3<br>4.4.4.4<br>5.5.5.5<br>6.6.6.6<br>7.7.7.7]
classDef queryColor fill:#3B67A8,color:#ffffff
classDef ruleColor fill:#D8F57A,color:#000000
classDef poolColor fill:#F57261,color:#000000
classDef valueColor fill:#498FF5,color:#000000
class query queryColor
class rule_0,rule_1,rule_2 ruleColor
class pool_apac,pool_eu,pool_na poolColor
class values valueColor
query((Query)) --> rule_0[Rule 0<br>AF-ZA<br>AS<br>OC]
rule_0 --no match--> rule_1[Rule 1<br>AF<br>EU]
rule_1 --no match--> rule_2["Rule 2<br>(catch all)"]
rule_0 --match--> pool_apac[Pool apac<br>1.1.1.1<br>2.2.2.2]
pool_apac --fallback--> pool_na
rule_1 --match--> pool_eu["Pool eu<br>3.3.3.3 (2/5)<br>4.4.4.4 (3/5)"]
pool_eu --fallback--> pool_na
rule_2 --> pool_na[Pool na<br>5.5.5.5<br>6.6.6.6<br>7.7.7.7]
pool_na --backup--> values[values<br>3.3.3.3<br>4.4.4.4<br>5.5.5.5<br>6.6.6.6<br>7.7.7.7]
classDef queryColor fill:#3B67A8,color:#ffffff
classDef ruleColor fill:#D8F57A,color:#000000
classDef poolColor fill:#F57261,color:#000000
classDef valueColor fill:#498FF5,color:#000000
class query queryColor
class rule_0,rule_1,rule_2 ruleColor
class pool_apac,pool_eu,pool_na poolColor
class values valueColor
```


+ 2
- 1
octodns/__init__.py View File

@ -1,3 +1,4 @@
'OctoDNS: DNS as code - Tools for managing DNS across multiple providers'
__VERSION__ = '1.0.0'
# TODO: remove __VERSION__ w/2.x
__version__ = __VERSION__ = '1.3.0'

+ 2
- 2
octodns/cmds/args.py View File

@ -10,7 +10,7 @@ from sys import stderr, stdout
from yaml import safe_load
from octodns import __VERSION__
from octodns import __version__
class ArgumentParser(_Base):
@ -24,7 +24,7 @@ class ArgumentParser(_Base):
super().__init__(*args, **kwargs)
def parse_args(self, default_log_level=INFO):
version = f'octoDNS {__VERSION__}'
version = f'octoDNS {__version__}'
self.add_argument(
'--version',
action='version',


+ 37
- 21
octodns/manager.py View File

@ -11,13 +11,13 @@ from logging import getLogger
from os import environ
from sys import stdout
from . import __VERSION__
from . import __version__
from .idna import IdnaDict, idna_decode, idna_encode
from .processor.arpa import AutoArpa
from .processor.meta import MetaProcessor
from .provider.base import BaseProvider
from .provider.plan import Plan
from .provider.yaml import SplitYamlProvider, YamlProvider
from .record import Record
from .yaml import safe_load
from .zone import Zone
@ -89,7 +89,7 @@ class Manager(object):
def __init__(
self, config_file, max_workers=None, include_meta=False, auto_arpa=False
):
version = self._try_version('octodns', version=__VERSION__)
version = self._try_version('octodns', version=__version__)
self.log.info(
'__init__: config_file=%s, (octoDNS %s)', config_file, version
)
@ -114,6 +114,11 @@ class Manager(object):
self.global_processors = manager_config.get('processors', [])
self.log.info('__init__: global_processors=%s', self.global_processors)
self.global_post_processors = manager_config.get('post_processors', [])
self.log.info(
'__init__: global_post_processors=%s', self.global_post_processors
)
providers_config = self.config['providers']
self.providers = self._config_providers(providers_config)
@ -122,13 +127,28 @@ class Manager(object):
if self.auto_arpa:
self.log.info(
'__init__: adding auto-arpa to processors and providers, appending it to global_processors list'
'__init__: adding auto-arpa to processors and providers, prepending it to global_post_processors list'
)
kwargs = self.auto_arpa if isinstance(auto_arpa, dict) else {}
kwargs = self.auto_arpa if isinstance(self.auto_arpa, dict) else {}
auto_arpa = AutoArpa('auto-arpa', **kwargs)
self.providers[auto_arpa.name] = auto_arpa
self.processors[auto_arpa.name] = auto_arpa
self.global_processors.append(auto_arpa.name)
self.global_post_processors = [
auto_arpa.name
] + self.global_post_processors
if self.include_meta:
self.log.info(
'__init__: adding meta to processors and providers, appending it to global_post_processors list'
)
meta = MetaProcessor(
'meta',
record_name='octodns-meta',
include_time=False,
include_provider=True,
)
self.processors[meta.id] = meta
self.global_post_processors.append(meta.id)
plan_outputs_config = manager_config.get(
'plan_outputs',
@ -288,7 +308,10 @@ class Manager(object):
# finally try and import the module and see if it has a __VERSION__
if module is None:
module = import_module(module_name)
return getattr(module, '__VERSION__', None)
# TODO: remove the __VERSION__ fallback eventually?
return getattr(
module, '__version__', getattr(module, '__VERSION__', None)
)
def _import_module(self, module_name):
current = module_name
@ -433,17 +456,6 @@ class Manager(object):
plans = []
for target in targets:
if self.include_meta:
meta = Record.new(
zone,
'octodns-meta',
{
'type': 'TXT',
'ttl': 60,
'value': f'provider={target.id}',
},
)
zone.add_record(meta, replace=True)
try:
plan = target.plan(zone, processors=processors)
except TypeError as e:
@ -507,11 +519,11 @@ class Manager(object):
# we've found a dynamic config element
# find its sources
sources = sources or self._get_sources(
found_sources = sources or self._get_sources(
name, config, eligible_sources
)
self.log.info('sync: dynamic zone=%s, sources=%s', name, sources)
for source in sources:
for source in found_sources:
if not hasattr(source, 'list_zones'):
raise ManagerException(
f'dynamic zone={name} includes a source, {source.id}, that does not support `list_zones`'
@ -634,7 +646,11 @@ class Manager(object):
try:
collected = []
for processor in self.global_processors + processors:
for processor in (
self.global_processors
+ processors
+ self.global_post_processors
):
collected.append(self.processors[processor])
processors = collected
except KeyError:


+ 2
- 1
octodns/processor/base.py View File

@ -9,7 +9,8 @@ class ProcessorException(Exception):
class BaseProcessor(object):
def __init__(self, name):
self.name = name
# TODO: name is DEPRECATED, remove in 2.0
self.id = self.name = name
def process_source_zone(self, desired, sources):
'''


+ 266
- 55
octodns/processor/filter.py View File

@ -2,12 +2,61 @@
#
#
from ipaddress import ip_address, ip_network
from itertools import product
from logging import getLogger
from re import compile as re_compile
from ..record.exception import ValidationError
from .base import BaseProcessor
class TypeAllowlistFilter(BaseProcessor):
class _FilterProcessor(BaseProcessor):
def __init__(self, name, include_target=True, **kwargs):
super().__init__(name, **kwargs)
self.include_target = include_target
def process_source_zone(self, *args, **kwargs):
return self._process(*args, **kwargs)
def process_target_zone(self, existing, *args, **kwargs):
if self.include_target:
return self._process(existing, *args, **kwargs)
return existing
class AllowsMixin:
def matches(self, zone, record):
pass
def doesnt_match(self, zone, record):
zone.remove_record(record)
class RejectsMixin:
def matches(self, zone, record):
zone.remove_record(record)
def doesnt_match(self, zone, record):
pass
class _TypeBaseFilter(_FilterProcessor):
def __init__(self, name, _list, **kwargs):
super().__init__(name, **kwargs)
self._list = set(_list)
def _process(self, zone, *args, **kwargs):
for record in zone.records:
if record._type in self._list:
self.matches(zone, record)
else:
self.doesnt_match(zone, record)
return zone
class TypeAllowlistFilter(_TypeBaseFilter, AllowsMixin):
'''Only manage records of the specified type(s).
Example usage:
@ -18,6 +67,10 @@ class TypeAllowlistFilter(BaseProcessor):
allowlist:
- A
- AAAA
# Optional param that can be set to False to leave the target zone
# alone, thus allowing deletion of existing records
# (default: true)
# include_target: True
zones:
exxampled.com.:
@ -29,22 +82,11 @@ class TypeAllowlistFilter(BaseProcessor):
- ns1
'''
def __init__(self, name, allowlist):
super().__init__(name)
self.allowlist = set(allowlist)
def _process(self, zone, *args, **kwargs):
for record in zone.records:
if record._type not in self.allowlist:
zone.remove_record(record)
return zone
def __init__(self, name, allowlist, **kwargs):
super().__init__(name, allowlist, **kwargs)
process_source_zone = _process
process_target_zone = _process
class TypeRejectlistFilter(BaseProcessor):
class TypeRejectlistFilter(_TypeBaseFilter, RejectsMixin):
'''Ignore records of the specified type(s).
Example usage:
@ -54,6 +96,10 @@ class TypeRejectlistFilter(BaseProcessor):
class: octodns.processor.filter.TypeRejectlistFilter
rejectlist:
- CNAME
# Optional param that can be set to False to leave the target zone
# alone, thus allowing deletion of existing records
# (default: true)
# include_target: True
zones:
exxampled.com.:
@ -65,24 +111,13 @@ class TypeRejectlistFilter(BaseProcessor):
- route53
'''
def __init__(self, name, rejectlist):
super().__init__(name)
self.rejectlist = set(rejectlist)
def _process(self, zone, *args, **kwargs):
for record in zone.records:
if record._type in self.rejectlist:
zone.remove_record(record)
return zone
process_source_zone = _process
process_target_zone = _process
def __init__(self, name, rejectlist, **kwargs):
super().__init__(name, rejectlist, **kwargs)
class _NameBaseFilter(BaseProcessor):
def __init__(self, name, _list):
super().__init__(name)
class _NameBaseFilter(_FilterProcessor):
def __init__(self, name, _list, **kwargs):
super().__init__(name, **kwargs)
exact = set()
regex = []
for pattern in _list:
@ -93,8 +128,22 @@ class _NameBaseFilter(BaseProcessor):
self.exact = exact
self.regex = regex
def _process(self, zone, *args, **kwargs):
for record in zone.records:
name = record.name
if name in self.exact:
self.matches(zone, record)
continue
elif any(r.search(name) for r in self.regex):
self.matches(zone, record)
continue
self.doesnt_match(zone, record)
class NameAllowlistFilter(_NameBaseFilter):
return zone
class NameAllowlistFilter(_NameBaseFilter, AllowsMixin):
'''Only manage records with names that match the provider patterns
Example usage:
@ -111,6 +160,10 @@ class NameAllowlistFilter(_NameBaseFilter):
- /some-pattern-\\d\\+/
# regex - anchored so has to match start to end
- /^start-.+-end$/
# Optional param that can be set to False to leave the target zone
# alone, thus allowing deletion of existing records
# (default: true)
# include_target: True
zones:
exxampled.com.:
@ -125,23 +178,8 @@ class NameAllowlistFilter(_NameBaseFilter):
def __init__(self, name, allowlist):
super().__init__(name, allowlist)
def _process(self, zone, *args, **kwargs):
for record in zone.records:
name = record.name
if name in self.exact:
continue
elif any(r.search(name) for r in self.regex):
continue
zone.remove_record(record)
return zone
process_source_zone = _process
process_target_zone = _process
class NameRejectlistFilter(_NameBaseFilter):
class NameRejectlistFilter(_NameBaseFilter, RejectsMixin):
'''Reject managing records with names that match the provider patterns
Example usage:
@ -158,6 +196,10 @@ class NameRejectlistFilter(_NameBaseFilter):
- /some-pattern-\\d\\+/
# regex - anchored so has to match start to end
- /^start-.+-end$/
# Optional param that can be set to False to leave the target zone
# alone, thus allowing deletion of existing records
# (default: true)
# include_target: True
zones:
exxampled.com.:
@ -172,17 +214,29 @@ class NameRejectlistFilter(_NameBaseFilter):
def __init__(self, name, rejectlist):
super().__init__(name, rejectlist)
class _NetworkValueBaseFilter(BaseProcessor):
def __init__(self, name, _list):
super().__init__(name)
self.networks = []
for value in _list:
try:
self.networks.append(ip_network(value))
except ValueError:
raise ValueError(f'{value} is not a valid CIDR to use')
def _process(self, zone, *args, **kwargs):
for record in zone.records:
name = record.name
if name in self.exact:
zone.remove_record(record)
if record._type not in ['A', 'AAAA']:
continue
for regex in self.regex:
if regex.search(name):
zone.remove_record(record)
break
ips = [ip_address(value) for value in record.values]
if any(
ip in network for ip, network in product(ips, self.networks)
):
self.matches(zone, record)
else:
self.doesnt_match(zone, record)
return zone
@ -190,6 +244,62 @@ class NameRejectlistFilter(_NameBaseFilter):
process_target_zone = _process
class NetworkValueAllowlistFilter(_NetworkValueBaseFilter, AllowsMixin):
'''Only manage A and AAAA records with values that match the provider patterns
All other types will be left as-is.
Example usage:
processors:
only-these:
class: octodns.processor.filter.NetworkValueAllowlistFilter
allowlist:
- 127.0.0.1/32
- 192.168.0.0/16
- fd00::/8
zones:
exxampled.com.:
sources:
- config
processors:
- only-these
targets:
- route53
'''
def __init__(self, name, allowlist):
super().__init__(name, allowlist)
class NetworkValueRejectlistFilter(_NetworkValueBaseFilter, RejectsMixin):
'''Reject managing A and AAAA records with value matching a that match the provider patterns
All other types will be left as-is.
Example usage:
processors:
not-these:
class: octodns.processor.filter.NetworkValueRejectlistFilter
rejectlist:
- 127.0.0.1/32
- 192.168.0.0/16
- fd00::/8
zones:
exxampled.com.:
sources:
- config
processors:
- not-these
targets:
- route53
'''
def __init__(self, name, rejectlist):
super().__init__(name, rejectlist)
class IgnoreRootNsFilter(BaseProcessor):
'''Do not manage Root NS Records.
@ -218,3 +328,104 @@ class IgnoreRootNsFilter(BaseProcessor):
process_source_zone = _process
process_target_zone = _process
class ExcludeRootNsChanges(BaseProcessor):
'''Do not allow root NS record changes
Example usage:
processors:
exclude-root-ns-changes:
class: octodns.processor.filter.ExcludeRootNsChanges
# If true an a change for a root NS is seen an error will be thrown. If
# false a warning will be printed and the change will be removed from
# the plan.
# (default: true)
error: true
zones:
exxampled.com.:
sources:
- config
processors:
- exclude-root-ns-changes
targets:
- ns1
'''
def __init__(self, name, error=True):
self.log = getLogger(f'ExcludeRootNsChanges[{name}]')
super().__init__(name)
self.error = error
def process_plan(self, plan, sources, target):
if plan:
for change in list(plan.changes):
record = change.record
if record._type == 'NS' and record.name == '':
self.log.warning(
'root NS changes are disallowed, fqdn=%s', record.fqdn
)
if self.error:
raise ValidationError(
record.fqdn,
['root NS changes are disallowed'],
record.context,
)
plan.changes.remove(change)
print(len(plan.changes))
return plan
class ZoneNameFilter(_FilterProcessor):
'''Filter or error on record names that contain the zone name
Example usage:
processors:
zone-name:
class: octodns.processor.filter.ZoneNameFilter
# If true a ValidationError will be throw when such records are
# encouterd, if false the records will just be ignored/omitted.
# (default: true)
# Optional param that can be set to False to leave the target zone
# alone, thus allowing deletion of existing records
# (default: true)
# include_target: True
zones:
exxampled.com.:
sources:
- config
processors:
- zone-name
targets:
- azure
'''
def __init__(self, name, error=True, **kwargs):
super().__init__(name, **kwargs)
self.error = error
def _process(self, zone, *args, **kwargs):
zone_name_with_dot = zone.name
zone_name_without_dot = zone_name_with_dot[:-1]
for record in zone.records:
name = record.name
if name.endswith(zone_name_with_dot) or name.endswith(
zone_name_without_dot
):
if self.error:
raise ValidationError(
record.fqdn,
['record name ends with zone name'],
record.context,
)
else:
# just remove it
zone.remove_record(record)
return zone

+ 145
- 0
octodns/processor/meta.py View File

@ -0,0 +1,145 @@
#
#
#
from datetime import datetime
from logging import getLogger
from uuid import uuid4
from .. import __version__
from ..record import Record
from .base import BaseProcessor
def _keys(values):
return set(v.split('=', 1)[0] for v in values)
class MetaProcessor(BaseProcessor):
'''
Add a special metadata record with timestamps, UUIDs, versions, and/or
provider name. Will only be updated when there are other changes being made.
A useful tool to aid in debugging and monitoring of DNS infrastructure.
Timestamps or UUIDs can be useful in checking whether changes are
propagating, either from a provider's backend to their servers or via AXFRs.
Provider can be utilized to determine which DNS system responded to a query
when things are operating in dual authority or split horizon setups.
Creates a TXT record with the name configured with values based on processor
settings. Values are in the form `key=<value>`, e.g.
`time=2023-09-10T05:49:04.246953`
processors:
meta:
class: octodns.processor.meta.MetaProcessor
# The name to use for the meta record.
# (default: meta)
record_name: meta
# Include a timestamp with a UTC value indicating the timeframe when the
# last change was made.
# (default: true)
include_time: true
# Include a UUID that can be utilized to uniquely identify the run
# pushing data
# (default: false)
include_uuid: false
# Include the provider id for the target where data is being pushed
# (default: false)
include_provider: false
# Include the octoDNS version being used
# (default: false)
include_version: false
'''
@classmethod
def now(cls):
return datetime.utcnow().isoformat()
@classmethod
def uuid(cls):
return str(uuid4())
def __init__(
self,
id,
record_name='meta',
include_time=True,
include_uuid=False,
include_version=False,
include_provider=False,
ttl=60,
):
self.log = getLogger(f'MetaSource[{id}]')
super().__init__(id)
self.log.info(
'__init__: record_name=%s, include_time=%s, include_uuid=%s, include_version=%s, include_provider=%s, ttl=%d',
record_name,
include_time,
include_uuid,
include_version,
include_provider,
ttl,
)
self.record_name = record_name
values = []
if include_time:
time = self.now()
values.append(f'time={time}')
if include_uuid:
uuid = self.uuid() if include_uuid else None
values.append(f'uuid={uuid}')
if include_version:
values.append(f'octodns-version={__version__}')
self.include_provider = include_provider
values.sort()
self.values = values
self.ttl = ttl
def process_source_zone(self, desired, sources):
meta = Record.new(
desired,
self.record_name,
{'ttl': self.ttl, 'type': 'TXT', 'values': self.values},
# we may be passing in empty values here to be filled out later in
# process_target_zone
lenient=True,
)
desired.add_record(meta)
return desired
def process_target_zone(self, existing, target):
if self.include_provider:
# look for the meta record
for record in sorted(existing.records):
if record.name == self.record_name and record._type == 'TXT':
# we've found it, make a copy we can modify
record = record.copy()
record.values = record.values + [f'provider={target.id}']
record.values.sort()
existing.add_record(record, replace=True)
break
return existing
def _up_to_date(self, change):
# existing state, if there is one
existing = getattr(change, 'existing', None)
return existing is not None and _keys(existing.values) == _keys(
self.values
)
def process_plan(self, plan, sources, target):
if (
plan
and len(plan.changes) == 1
and self._up_to_date(plan.changes[0])
):
# the only change is the meta record, and it's not meaningfully
# changing so we don't actually want to make the change
return None
# There's more than one thing changing so meta should update and/or meta
# is meaningfully changing or being created...
return plan

+ 1
- 1
octodns/processor/restrict.py View File

@ -59,7 +59,7 @@ class TtlRestrictionFilter(BaseProcessor):
def process_source_zone(self, zone, *args, **kwargs):
for record in zone.records:
if record._octodns.get('lenient'):
if record.lenient:
continue
if self.allowed_ttls and record.ttl not in self.allowed_ttls:
raise RestrictionException(


+ 4
- 1
octodns/processor/spf.py View File

@ -55,6 +55,9 @@ class SpfDnsLookupProcessor(BaseProcessor):
def __init__(self, name):
self.log.debug(f"SpfDnsLookupProcessor: {name}")
self.log.warning(
'SpfDnsLookupProcessor is DEPRECATED in favor of the version relocated into octodns-spf and will be removed in 2.0'
)
super().__init__(name)
def _get_spf_from_txt_values(
@ -134,7 +137,7 @@ class SpfDnsLookupProcessor(BaseProcessor):
if record._type != 'TXT':
continue
if record._octodns.get('lenient'):
if record.lenient:
continue
self._check_dns_lookups(record, record.values, 0)


+ 228
- 143
octodns/provider/yaml.py View File

@ -19,18 +19,75 @@ class YamlProvider(BaseProvider):
config:
class: octodns.provider.yaml.YamlProvider
# The location of yaml config files (required)
# The location of yaml config files. By default records are defined in a
# file named for the zone in this directory, the zone file, e.g.
# something.com.yaml.
# (required)
directory: ./config
# The ttl to use for records when not specified in the data
# (optional, default 3600)
default_ttl: 3600
# Whether or not to enforce sorting order on the yaml config
# Whether or not to enforce sorting order when loading yaml
# (optional, default True)
enforce_order: true
# Whether duplicate records should replace rather than error
# (optiona, default False)
# (optional, default False)
populate_should_replace: false
# The file extension used when loading split style zones, Null means
# disabled. When enabled the provider will search for zone records split
# across multiple YAML files in the directory with split_extension
# appended to the zone name, See "Split Details" below.
# split_extension should include the "."
# (optional, default null, "." is the recommended best practice when
# enabling)
split_extension: null
# When writing YAML records out to disk with split_extension enabled
# each record is written out into its own file with .yaml appended to
# the name of the record. The two exceptions are for the root and
# wildcard nodes. These records are written into a file named
# `$[zone.name].yaml`. If you would prefer this catchall file not be
# used `split_catchall` can be set to False to instead write those
# records out to `.yaml` and `*.yaml` respectively. Note that some
# operating systems may not allow files with those names.
# (optional, default True)
split_catchall: true
# Optional filename with record data to be included in all zones
# populated by this provider. Has no effect when used as a target.
# (optional, default null)
shared_filename: null
# Disable loading of the zone .yaml files.
# (optional, default False)
disable_zonefile: false
Split Details
-------------
All files are stored in a subdirectory matching the name of the zone
(including the trailing .) of the directory config. It is a recommended
best practice that the files be named RECORD.yaml, but all files are
sourced and processed ignoring the filenames so it is up to you how to
organize them.
With `split_extension: .` the directory structure for the zone github.com.
managed under directory "zones/" would look like:
zones/
github.com./
$github.com.yaml
www.yaml
...
Overriding Values
-----------------
Overriding values can be accomplished using multiple yaml providers in the
`sources` list where subsequent providers have `populate_should_replace`
set to `true`. An example use of this would be a zone that you want to push
@ -98,7 +155,6 @@ class YamlProvider(BaseProvider):
You can then sync our records eternally with `--config-file=external.yaml`
and internally (with the custom overrides) with
`--config-file=internal.yaml`
'''
SUPPORTS_GEO = True
@ -107,6 +163,10 @@ class YamlProvider(BaseProvider):
SUPPORTS_DYNAMIC_SUBNETS = True
SUPPORTS_MULTIVALUE_PTR = True
# Any record name added to this set will be included in the catch-all file,
# instead of a file matching the record name.
CATCHALL_RECORD_NAMES = ('*', '')
def __init__(
self,
id,
@ -115,19 +175,27 @@ class YamlProvider(BaseProvider):
enforce_order=True,
populate_should_replace=False,
supports_root_ns=True,
split_extension=False,
split_catchall=True,
shared_filename=False,
disable_zonefile=False,
*args,
**kwargs,
):
klass = self.__class__.__name__
self.log = logging.getLogger(f'{klass}[{id}]')
self.log.debug(
'__init__: id=%s, directory=%s, default_ttl=%d, '
'enforce_order=%d, populate_should_replace=%d',
'__init__: id=%s, directory=%s, default_ttl=%d, enforce_order=%d, populate_should_replace=%s, supports_root_ns=%s, split_extension=%s, split_catchall=%s, shared_filename=%s, disable_zonefile=%s',
id,
directory,
default_ttl,
enforce_order,
populate_should_replace,
supports_root_ns,
split_extension,
split_catchall,
shared_filename,
disable_zonefile,
)
super().__init__(id, *args, **kwargs)
self.directory = directory
@ -135,12 +203,16 @@ class YamlProvider(BaseProvider):
self.enforce_order = enforce_order
self.populate_should_replace = populate_should_replace
self.supports_root_ns = supports_root_ns
self.split_extension = split_extension
self.split_catchall = split_catchall
self.shared_filename = shared_filename
self.disable_zonefile = disable_zonefile
def copy(self):
args = dict(self.__dict__)
args['id'] = f'{args["id"]}-copy'
del args['log']
return self.__class__(**args)
kwargs = dict(self.__dict__)
kwargs['id'] = f'{kwargs["id"]}-copy'
del kwargs['log']
return YamlProvider(**kwargs)
@property
def SUPPORTS(self):
@ -162,6 +234,73 @@ class YamlProvider(BaseProvider):
def SUPPORTS_ROOT_NS(self):
return self.supports_root_ns
def list_zones(self):
self.log.debug('list_zones:')
zones = set()
extension = self.split_extension
if extension:
# we want to leave the .
trim = len(extension) - 1
self.log.debug(
'list_zones: looking for split zones, trim=%d', trim
)
for dirname in listdir(self.directory):
not_ends_with = not dirname.endswith(extension)
not_dir = not isdir(join(self.directory, dirname))
if not_dir or not_ends_with:
continue
if trim:
dirname = dirname[:-trim]
zones.add(dirname)
if not self.disable_zonefile:
self.log.debug('list_zones: looking for zone files')
for filename in listdir(self.directory):
not_ends_with = not filename.endswith('.yaml')
too_few_dots = filename.count('.') < 2
not_file = not isfile(join(self.directory, filename))
if not_file or not_ends_with or too_few_dots:
continue
# trim off the yaml, leave the .
zones.add(filename[:-4])
return sorted(zones)
def _split_sources(self, zone):
ext = self.split_extension
utf8 = join(self.directory, f'{zone.decoded_name[:-1]}{ext}')
idna = join(self.directory, f'{zone.name[:-1]}{ext}')
directory = None
if isdir(utf8):
if utf8 != idna and isdir(idna):
raise ProviderException(
f'Both UTF-8 "{utf8}" and IDNA "{idna}" exist for {zone.decoded_name}'
)
directory = utf8
elif isdir(idna):
directory = idna
else:
return []
for filename in listdir(directory):
if filename.endswith('.yaml'):
yield join(directory, filename)
def _zone_sources(self, zone):
utf8 = join(self.directory, f'{zone.decoded_name}yaml')
idna = join(self.directory, f'{zone.name}yaml')
if isfile(utf8):
if utf8 != idna and isfile(idna):
raise ProviderException(
f'Both UTF-8 "{utf8}" and IDNA "{idna}" exist for {zone.decoded_name}'
)
return utf8
elif isfile(idna):
return idna
return None
def _populate_from_file(self, filename, zone, lenient):
with open(filename, 'r') as fh:
yaml_data = safe_load(fh, enforce_order=self.enforce_order)
@ -184,18 +323,6 @@ class YamlProvider(BaseProvider):
'_populate_from_file: successfully loaded "%s"', filename
)
def get_filenames(self, zone):
return (
join(self.directory, f'{zone.decoded_name}yaml'),
join(self.directory, f'{zone.name}yaml'),
)
def list_zones(self):
for filename in listdir(self.directory):
if not filename.endswith('.yaml') or filename.count('.') < 2:
continue
yield filename[:-4]
def populate(self, zone, target=False, lenient=False):
self.log.debug(
'populate: name=%s, target=%s, lenient=%s',
@ -210,23 +337,29 @@ class YamlProvider(BaseProvider):
return False
before = len(zone.records)
utf8_filename, idna_filename = self.get_filenames(zone)
# we prefer utf8
if isfile(utf8_filename):
if utf8_filename != idna_filename and isfile(idna_filename):
raise ProviderException(
f'Both UTF-8 "{utf8_filename}" and IDNA "{idna_filename}" exist for {zone.decoded_name}'
)
filename = utf8_filename
else:
self.log.warning(
'populate: "%s" does not exist, falling back to try idna version "%s"',
utf8_filename,
idna_filename,
)
filename = idna_filename
self._populate_from_file(filename, zone, lenient)
sources = []
split_extension = self.split_extension
if split_extension:
sources.extend(self._split_sources(zone))
if not self.disable_zonefile:
source = self._zone_sources(zone)
if source:
sources.append(source)
if self.shared_filename:
sources.append(join(self.directory, self.shared_filename))
if not sources:
raise ProviderException(f'no YAMLs found for {zone.decoded_name}')
# determinstically order our sources
sources.sort()
for source in sources:
self._populate_from_file(source, zone, lenient)
self.log.info(
'populate: found %s records, exists=False',
@ -264,123 +397,75 @@ class YamlProvider(BaseProvider):
data[k] = data[k][0]
if not isdir(self.directory):
self.log.debug('_apply: creating directory=%s', self.directory)
makedirs(self.directory)
self._do_apply(desired, data)
def _do_apply(self, desired, data):
filename = join(self.directory, f'{desired.decoded_name}yaml')
self.log.debug('_apply: writing filename=%s', filename)
with open(filename, 'w') as fh:
safe_dump(dict(data), fh, allow_unicode=True)
if self.split_extension:
# we're going to do split files
decoded_name = desired.decoded_name[:-1]
directory = join(
self.directory, f'{decoded_name}{self.split_extension}'
)
if not isdir(directory):
self.log.debug('_apply: creating split directory=%s', directory)
makedirs(directory)
catchall = {}
for record, config in data.items():
if self.split_catchall and record in self.CATCHALL_RECORD_NAMES:
catchall[record] = config
continue
filename = join(directory, f'{record}.yaml')
self.log.debug('_apply: writing filename=%s', filename)
with open(filename, 'w') as fh:
record_data = {record: config}
safe_dump(record_data, fh)
if catchall:
# Scrub the trailing . to make filenames more sane.
filename = join(directory, f'${decoded_name}.yaml')
self.log.debug(
'_apply: writing catchall filename=%s', filename
)
with open(filename, 'w') as fh:
safe_dump(catchall, fh)
def _list_all_yaml_files(directory):
yaml_files = set()
for f in listdir(directory):
filename = join(directory, f)
if f.endswith('.yaml') and isfile(filename):
yaml_files.add(filename)
return list(yaml_files)
else:
# single large file
filename = join(self.directory, f'{desired.decoded_name}yaml')
self.log.debug('_apply: writing filename=%s', filename)
with open(filename, 'w') as fh:
safe_dump(dict(data), fh, allow_unicode=True)
class SplitYamlProvider(YamlProvider):
'''
Core provider for records configured in multiple YAML files on disk.
DEPRECATED: Use YamlProvider with the split_extension parameter instead.
Behaves mostly similarly to YamlConfig, but interacts with multiple YAML
files, instead of a single monolitic one. All files are stored in a
subdirectory matching the name of the zone (including the trailing .) of
the directory config. The files are named RECORD.yaml, except for any
record which cannot be represented easily as a file; these are stored in
the catchall file, which is a YAML file the zone name, prepended with '$'.
For example, a zone, 'github.com.' would have a catch-all file named
'$github.com.yaml'.
When migrating the following configuration options would result in the same
behavior as SplitYamlProvider
A full directory structure for the zone github.com. managed under directory
"zones/" would be:
config:
class: octodns.provider.yaml.YamlProvider
# extension is configured as split_extension
split_extension: .
split_catchall: true
disable_zonefile: true
zones/
github.com./
$github.com.yaml
www.yaml
...
config:
class: octodns.provider.yaml.SplitYamlProvider
# The location of yaml config files (required)
directory: ./config
# The ttl to use for records when not specified in the data
# (optional, default 3600)
default_ttl: 3600
# Whether or not to enforce sorting order on the yaml config
# (optional, default True)
enforce_order: True
TO BE REMOVED: 2.0
'''
# Any record name added to this set will be included in the catch-all file,
# instead of a file matching the record name.
CATCHALL_RECORD_NAMES = ('*', '')
def __init__(self, id, directory, extension='.', *args, **kwargs):
super().__init__(id, directory, *args, **kwargs)
self.extension = extension
def _zone_directory(self, zone):
filename = f'{zone.name[:-1]}{self.extension}'
return join(self.directory, filename)
def list_zones(self):
n = len(self.extension) - 1
for filename in listdir(self.directory):
if not filename.endswith(self.extension):
continue
yield filename[:-n]
def populate(self, zone, target=False, lenient=False):
self.log.debug(
'populate: name=%s, target=%s, lenient=%s',
zone.name,
target,
lenient,
def __init__(self, id, directory, *args, extension='.', **kwargs):
kwargs.update(
{
'split_extension': extension,
'split_catchall': True,
'disable_zonefile': True,
}
)
if target:
# When acting as a target we ignore any existing records so that we
# create a completely new copy
return False
before = len(zone.records)
yaml_filenames = _list_all_yaml_files(self._zone_directory(zone))
self.log.info('populate: found %s YAML files', len(yaml_filenames))
for yaml_filename in yaml_filenames:
self._populate_from_file(yaml_filename, zone, lenient)
self.log.info(
'populate: found %s records, exists=False',
len(zone.records) - before,
super().__init__(id, directory, *args, **kwargs)
self.log.warning(
'__init__: DEPRECATED use YamlProvider with split_extension, split_catchall, and disable_zonefile instead, will go away in v2.0'
)
return False
def _do_apply(self, desired, data):
zone_dir = self._zone_directory(desired)
if not isdir(zone_dir):
makedirs(zone_dir)
catchall = dict()
for record, config in data.items():
if record in self.CATCHALL_RECORD_NAMES:
catchall[record] = config
continue
filename = join(zone_dir, f'{record}.yaml')
self.log.debug('_apply: writing filename=%s', filename)
with open(filename, 'w') as fh:
record_data = {record: config}
safe_dump(record_data, fh)
if catchall:
# Scrub the trailing . to make filenames more sane.
dname = desired.name[:-1]
filename = join(zone_dir, f'${dname}.yaml')
self.log.debug('_apply: writing catchall filename=%s', filename)
with open(filename, 'w') as fh:
safe_dump(catchall, fh)

+ 27
- 5
octodns/record/base.py View File

@ -12,6 +12,12 @@ from .change import Update
from .exception import RecordException, ValidationError
def unquote(s):
if s and s[0] in ('"', "'"):
return s[1:-1]
return s
class Record(EqualityTupleMixin):
log = getLogger('Record')
@ -113,7 +119,7 @@ class Record(EqualityTupleMixin):
return reasons
@classmethod
def from_rrs(cls, zone, rrs, lenient=False):
def from_rrs(cls, zone, rrs, lenient=False, source=None):
# group records by name & type so that multiple rdatas can be combined
# into a single record when needed
grouped = defaultdict(list)
@ -128,7 +134,9 @@ class Record(EqualityTupleMixin):
name = zone.hostname_from_fqdn(rr.name)
_class = cls._CLASSES[rr._type]
data = _class.data_from_rrs(rrs)
record = Record.new(zone, name, data, lenient=lenient)
record = Record.new(
zone, name, data, lenient=lenient, source=source
)
records.append(record)
return records
@ -223,6 +231,10 @@ class Record(EqualityTupleMixin):
except KeyError:
return 443
@property
def lenient(self):
return self._octodns.get('lenient', False)
def changes(self, other, target):
# We're assuming we have the same name and type if we're being compared
if self.ttl != other.ttl:
@ -304,19 +316,26 @@ class ValuesMixin(object):
return ret
@property
def rr_values(self):
return self.values
@property
def rrs(self):
return (
self.fqdn,
self.ttl,
self._type,
[v.rdata_text for v in self.values],
[v.rdata_text for v in self.rr_values],
)
def __repr__(self):
values = "', '".join([str(v) for v in self.values])
klass = self.__class__.__name__
return f"<{klass} {self._type} {self.ttl}, {self.decoded_fqdn}, ['{values}']>"
octodns = ''
if self._octodns:
octodns = f', {self._octodns}'
return f"<{klass} {self._type} {self.ttl}, {self.decoded_fqdn}, ['{values}']{octodns}>"
class ValueMixin(object):
@ -359,4 +378,7 @@ class ValueMixin(object):
def __repr__(self):
klass = self.__class__.__name__
return f'<{klass} {self._type} {self.ttl}, {self.decoded_fqdn}, {self.value}>'
octodns = ''
if self._octodns:
octodns = f', {self._octodns}'
return f'<{klass} {self._type} {self.ttl}, {self.decoded_fqdn}, {self.value}{octodns}>'

+ 3
- 1
octodns/record/caa.py View File

@ -3,7 +3,7 @@
#
from ..equality import EqualityTupleMixin
from .base import Record, ValuesMixin
from .base import Record, ValuesMixin, unquote
from .rr import RrParseError
@ -20,6 +20,8 @@ class CaaValue(EqualityTupleMixin, dict):
flags = int(flags)
except ValueError:
pass
tag = unquote(tag)
value = unquote(value)
return {'flags': flags, 'tag': tag, 'value': value}
@classmethod


+ 5
- 1
octodns/record/chunked.py View File

@ -18,7 +18,7 @@ class _ChunkedValuesMixin(ValuesMixin):
for i in range(0, len(value), self.CHUNK_SIZE)
]
vs = '" "'.join(vs)
return f'"{vs}"'
return self._value_type(f'"{vs}"')
@property
def chunked_values(self):
@ -27,6 +27,10 @@ class _ChunkedValuesMixin(ValuesMixin):
values.append(self.chunked_value(v))
return values
@property
def rr_values(self):
return self.chunked_values
class _ChunkedValue(str):
_unescaped_semicolon_re = re.compile(r'\w;')


+ 98
- 55
octodns/record/ds.py View File

@ -2,37 +2,40 @@
#
#
from logging import getLogger
from ..equality import EqualityTupleMixin
from .base import Record, ValuesMixin
from .rr import RrParseError
class DsValue(EqualityTupleMixin, dict):
# https://www.rfc-editor.org/rfc/rfc4034.html#section-2.1
# https://www.rfc-editor.org/rfc/rfc4034.html#section-5.1
log = getLogger('DsValue')
@classmethod
def parse_rdata_text(cls, value):
try:
flags, protocol, algorithm, public_key = value.split(' ')
key_tag, algorithm, digest_type, digest = value.split(' ')
except ValueError:
raise RrParseError()
try:
flags = int(flags)
key_tag = int(key_tag)
except ValueError:
pass
try:
protocol = int(protocol)
algorithm = int(algorithm)
except ValueError:
pass
try:
algorithm = int(algorithm)
digest_type = int(digest_type)
except ValueError:
pass
return {
'flags': flags,
'protocol': protocol,
'key_tag': key_tag,
'algorithm': algorithm,
'public_key': public_key,
'digest_type': digest_type,
'digest': digest,
}
@classmethod
@ -41,26 +44,57 @@ class DsValue(EqualityTupleMixin, dict):
data = (data,)
reasons = []
for value in data:
try:
int(value['flags'])
except KeyError:
reasons.append('missing flags')
except ValueError:
reasons.append(f'invalid flags "{value["flags"]}"')
try:
int(value['protocol'])
except KeyError:
reasons.append('missing protocol')
except ValueError:
reasons.append(f'invalid protocol "{value["protocol"]}"')
try:
int(value['algorithm'])
except KeyError:
reasons.append('missing algorithm')
except ValueError:
reasons.append(f'invalid algorithm "{value["algorithm"]}"')
if 'public_key' not in value:
reasons.append('missing public_key')
# we need to validate both "old" style field names and new
# it is safe to assume if public_key or flags are defined then it is "old" style
# A DS record without public_key doesn't make any sense and shouldn't have validated previously
if "public_key" in value or "flags" in value:
cls.log.warning(
'"algorithm", "flags", "public_key", and "protocol" support is DEPRECATED and will be removed in 2.0'
)
try:
int(value['flags'])
except KeyError:
reasons.append('missing flags')
except ValueError:
reasons.append(f'invalid flags "{value["flags"]}"')
try:
int(value['protocol'])
except KeyError:
reasons.append('missing protocol')
except ValueError:
reasons.append(f'invalid protocol "{value["protocol"]}"')
try:
int(value['algorithm'])
except KeyError:
reasons.append('missing algorithm')
except ValueError:
reasons.append(f'invalid algorithm "{value["algorithm"]}"')
if 'public_key' not in value:
reasons.append('missing public_key')
else:
try:
int(value['key_tag'])
except KeyError:
reasons.append('missing key_tag')
except ValueError:
reasons.append(f'invalid key_tag "{value["key_tag"]}"')
try:
int(value['algorithm'])
except KeyError:
reasons.append('missing algorithm')
except ValueError:
reasons.append(f'invalid algorithm "{value["algorithm"]}"')
try:
int(value['digest_type'])
except KeyError:
reasons.append('missing digest_type')
except ValueError:
reasons.append(
f'invalid digest_type "{value["digest_type"]}"'
)
if 'digest' not in value:
reasons.append('missing digest')
return reasons
@classmethod
@ -68,30 +102,31 @@ class DsValue(EqualityTupleMixin, dict):
return [cls(v) for v in values]
def __init__(self, value):
super().__init__(
{
'flags': int(value['flags']),
'protocol': int(value['protocol']),
# we need to instantiate both based on "old" style field names and new
# it is safe to assume if public_key or flags are defined then it is "old" style
if "public_key" in value or "flags" in value:
init = {
'key_tag': int(value['flags']),
'algorithm': int(value['protocol']),
'digest_type': int(value['algorithm']),
'digest': value['public_key'],
}
else:
init = {
'key_tag': int(value['key_tag']),
'algorithm': int(value['algorithm']),
'public_key': value['public_key'],
'digest_type': int(value['digest_type']),
'digest': value['digest'],
}
)
super().__init__(init)
@property
def flags(self):
return self['flags']
def key_tag(self):
return self['key_tag']
@flags.setter
def flags(self, value):
self['flags'] = value
@property
def protocol(self):
return self['protocol']
@protocol.setter
def protocol(self, value):
self['protocol'] = value
@key_tag.setter
def key_tag(self, value):
self['key_tag'] = value
@property
def algorithm(self):
@ -102,12 +137,20 @@ class DsValue(EqualityTupleMixin, dict):
self['algorithm'] = value
@property
def public_key(self):
return self['public_key']
def digest_type(self):
return self['digest_type']
@digest_type.setter
def digest_type(self, value):
self['digest_type'] = value
@property
def digest(self):
return self['digest']
@public_key.setter
def public_key(self, value):
self['public_key'] = value
@digest.setter
def digest(self, value):
self['digest'] = value
@property
def data(self):
@ -116,15 +159,15 @@ class DsValue(EqualityTupleMixin, dict):
@property
def rdata_text(self):
return (
f'{self.flags} {self.protocol} {self.algorithm} {self.public_key}'
f'{self.key_tag} {self.algorithm} {self.digest_type} {self.digest}'
)
def _equality_tuple(self):
return (self.flags, self.protocol, self.algorithm, self.public_key)
return (self.key_tag, self.algorithm, self.digest_type, self.digest)
def __repr__(self):
return (
f'{self.flags} {self.protocol} {self.algorithm} {self.public_key}'
f'{self.key_tag} {self.algorithm} {self.digest_type} {self.digest}'
)


+ 10
- 4
octodns/record/dynamic.py View File

@ -3,6 +3,7 @@
#
import re
from collections import defaultdict
from logging import getLogger
from .change import Update
@ -220,7 +221,7 @@ class _DynamicMixin(object):
reasons = []
pools_seen = set()
subnets_seen = {}
subnets_seen = defaultdict(dict)
geos_seen = {}
if not isinstance(rules, (list, tuple)):
@ -291,11 +292,16 @@ class _DynamicMixin(object):
# previous loop will log any invalid subnets, here we
# process only valid ones and skip invalid ones
pass
# sort subnets from largest to smallest so that we can
# detect rule that have needlessly targeted a more specific
# subnet along with a larger subnet that already contains it
for subnet in sorted(networks):
for seen, where in subnets_seen.items():
sorted_networks = sorted(
networks, key=lambda n: (n.version, n)
)
for subnet in sorted_networks:
subnets_seen_version = subnets_seen[subnet.version]
for seen, where in subnets_seen_version.items():
if subnet == seen:
reasons.append(
f'rule {rule_num} targets subnet {subnet} which has previously been seen in rule {where}'
@ -305,7 +311,7 @@ class _DynamicMixin(object):
f'rule {rule_num} targets subnet {subnet} which is more specific than the previously seen {seen} in rule {where}'
)
subnets_seen[subnet] = rule_num
subnets_seen_version[subnet] = rule_num
if not isinstance(geos, (list, tuple)):
reasons.append(f'rule {rule_num} geos must be a list')


+ 7
- 5
octodns/record/loc.py View File

@ -3,7 +3,7 @@
#
from ..equality import EqualityTupleMixin
from .base import Record, ValuesMixin
from .base import Record, ValuesMixin, unquote
from .rr import RrParseError
@ -58,21 +58,23 @@ class LocValue(EqualityTupleMixin, dict):
except ValueError:
pass
try:
altitude = float(altitude)
altitude = float(unquote(altitude))
except ValueError:
pass
try:
size = float(size)
size = float(unquote(size))
except ValueError:
pass
try:
precision_horz = float(precision_horz)
precision_horz = float(unquote(precision_horz))
except ValueError:
pass
try:
precision_vert = float(precision_vert)
precision_vert = float(unquote(precision_vert))
except ValueError:
pass
lat_direction = unquote(lat_direction)
long_direction = unquote(long_direction)
return {
'lat_degrees': lat_degrees,
'lat_minutes': lat_minutes,


+ 2
- 1
octodns/record/mx.py View File

@ -6,7 +6,7 @@ from fqdn import FQDN
from ..equality import EqualityTupleMixin
from ..idna import idna_encode
from .base import Record, ValuesMixin
from .base import Record, ValuesMixin, unquote
from .rr import RrParseError
@ -21,6 +21,7 @@ class MxValue(EqualityTupleMixin, dict):
preference = int(preference)
except ValueError:
pass
exchange = unquote(exchange)
return {'preference': preference, 'exchange': exchange}
@classmethod


+ 5
- 1
octodns/record/naptr.py View File

@ -3,7 +3,7 @@
#
from ..equality import EqualityTupleMixin
from .base import Record, ValuesMixin
from .base import Record, ValuesMixin, unquote
from .rr import RrParseError
@ -28,6 +28,10 @@ class NaptrValue(EqualityTupleMixin, dict):
preference = int(preference)
except ValueError:
pass
flags = unquote(flags)
service = unquote(service)
regexp = unquote(regexp)
replacement = unquote(replacement)
return {
'order': order,
'preference': preference,


+ 6
- 0
octodns/record/spf.py View File

@ -10,5 +10,11 @@ class SpfRecord(_ChunkedValuesMixin, Record):
_type = 'SPF'
_value_type = _ChunkedValue
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.log.warning(
'The SPF record type is DEPRECATED in favor of TXT values and will become an ValidationError in 2.0'
)
Record.register_type(SpfRecord)

+ 2
- 1
octodns/record/srv.py View File

@ -8,7 +8,7 @@ from fqdn import FQDN
from ..equality import EqualityTupleMixin
from ..idna import idna_encode
from .base import Record, ValuesMixin
from .base import Record, ValuesMixin, unquote
from .rr import RrParseError
@ -31,6 +31,7 @@ class SrvValue(EqualityTupleMixin, dict):
port = int(port)
except ValueError:
pass
target = unquote(target)
return {
'priority': priority,
'weight': weight,


+ 2
- 1
octodns/record/sshfp.py View File

@ -3,7 +3,7 @@
#
from ..equality import EqualityTupleMixin
from .base import Record, ValuesMixin
from .base import Record, ValuesMixin, unquote
from .rr import RrParseError
@ -25,6 +25,7 @@ class SshfpValue(EqualityTupleMixin, dict):
fingerprint_type = int(fingerprint_type)
except ValueError:
pass
fingerprint = unquote(fingerprint)
return {
'algorithm': algorithm,
'fingerprint_type': fingerprint_type,


+ 2
- 1
octodns/record/tlsa.py View File

@ -3,7 +3,7 @@
#
from ..equality import EqualityTupleMixin
from .base import Record, ValuesMixin
from .base import Record, ValuesMixin, unquote
from .rr import RrParseError
@ -31,6 +31,7 @@ class TlsaValue(EqualityTupleMixin, dict):
matching_type = int(matching_type)
except ValueError:
pass
certificate_association_data = unquote(certificate_association_data)
return {
'certificate_usage': certificate_usage,
'selector': selector,


+ 12
- 0
octodns/yaml.py View File

@ -2,6 +2,8 @@
#
#
from os.path import dirname, join
from natsort import natsort_keygen
from yaml import SafeDumper, SafeLoader, dump, load
from yaml.constructor import ConstructorError
@ -23,7 +25,17 @@ class ContextLoader(SafeLoader):
def _construct(self, node):
return self._pairs(node)[0]
def include(self, node):
mark = self.get_mark()
directory = dirname(mark.name)
filename = join(directory, self.construct_scalar(node))
with open(filename, 'r') as fh:
return safe_load(fh, self.__class__)
ContextLoader.add_constructor('!include', ContextLoader.include)
ContextLoader.add_constructor(
ContextLoader.DEFAULT_MAPPING_TAG, ContextLoader._construct
)


+ 17
- 18
requirements-dev.txt View File

@ -1,16 +1,14 @@
# DO NOT EDIT THIS FILE DIRECTLY - use ./script/update-requirements to update
Pygments==2.16.1
black==23.7.0
bleach==6.0.0
build==0.10.0
black==23.9.1
build==1.0.3
certifi==2023.7.22
cffi==1.15.1
charset-normalizer==3.2.0
click==8.1.6
cffi==1.16.0
charset-normalizer==3.3.0
click==8.1.7
cmarkgfm==2022.10.27
coverage==7.3.0
coverage==7.3.2
docutils==0.20.1
exceptiongroup==1.1.3
importlib-metadata==6.8.0
iniconfig==2.0.0
isort==5.12.0
@ -20,11 +18,12 @@ markdown-it-py==3.0.0
mdurl==0.1.2
more-itertools==10.1.0
mypy-extensions==1.0.0
packaging==23.1
nh3==0.2.14
packaging==23.2
pathspec==0.11.2
pkginfo==1.9.6
platformdirs==3.10.0
pluggy==1.2.0
platformdirs==3.11.0
pluggy==1.3.0
pprintpp==0.4.0
pycountry-convert==0.7.2
pycountry==22.3.5
@ -34,15 +33,15 @@ pyproject_hooks==1.0.0
pytest-cov==4.1.0
pytest-mock==3.11.1
pytest-network==0.0.1
pytest==7.4.0
readme-renderer==40.0
pytest==7.4.2
readme-renderer==42.0
repoze.lru==0.7
requests-toolbelt==1.0.0
requests==2.31.0
rfc3986==2.0.0
rich==13.5.2
tomli==2.0.1
rich==13.6.0
setuptools==68.2.2
twine==4.0.2
urllib3==2.0.4
webencodings==0.5.1
zipp==3.16.2
urllib3==2.0.6
wheel==0.41.2
zipp==3.17.0

+ 1
- 1
script/changelog View File

@ -2,6 +2,6 @@
set -e
VERSION=v$(grep __VERSION__ octodns/__init__.py | sed -e "s/^[^']*'//" -e "s/'$//")
VERSION=v$(grep __version__ octodns/__init__.py | sed -e "s/^[^']*'//" -e "s/'$//")
echo $VERSION
git log --pretty="%h - %cr - %s (%an)" "${VERSION}..HEAD"

+ 2
- 1
script/cibuild View File

@ -16,7 +16,8 @@ fi
echo "## environment & versions ######################################################"
python --version
pip --version
echo "## modules: "
pip freeze
echo "## clean up ####################################################################"
find octodns tests -name "*.pyc" -exec rm {} \;
rm -f *.pyc


script/test-module → script/cibuild-module View File


+ 1
- 0
script/cibuild-setup-py View File

@ -7,6 +7,7 @@ echo "## create test venv ######################################################
TMP_DIR=$(mktemp -d -t ci-XXXXXXXXXX)
python3 -m venv $TMP_DIR
. "$TMP_DIR/bin/activate"
pip install setuptools
echo "## environment & versions ######################################################"
python --version
pip --version


+ 11
- 2
script/release View File

@ -32,12 +32,21 @@ fi
# Set so that setup.py will create a public release style version number
export OCTODNS_RELEASE=1
VERSION="$(grep "^__VERSION__" "$ROOT/octodns/__init__.py" | sed -e "s/.* = '//" -e "s/'$//")"
VERSION="$(grep "^__version__" "$ROOT/octodns/__init__.py" | sed -e "s/.* = '//" -e "s/'$//")"
git tag -s "v$VERSION" -m "Release $VERSION"
git push origin "v$VERSION"
echo "Tagged and pushed v$VERSION"
python -m build --sdist --wheel
TMP_DIR=$(mktemp -d -t ci-XXXXXXXXXX)
git archive --format tar "v$VERSION" | tar xv -C $TMP_DIR
echo "Created clean room $TMP_DIR and archived $VERSION into it"
(cd "$TMP_DIR" && python -m build --sdist --wheel)
cp $TMP_DIR/dist/*$VERSION.tar.gz $TMP_DIR/dist/*$VERSION*.whl dist/
echo "Copied $TMP_DIR/dists into ./dist"
twine check dist/*$VERSION.tar.gz dist/*$VERSION*.whl
twine upload dist/*$VERSION.tar.gz dist/*$VERSION*.whl
echo "Uploaded $VERSION"

+ 0
- 15
script/sdist View File

@ -1,15 +0,0 @@
#!/bin/bash
set -e
if ! git diff-index --quiet HEAD --; then
echo "Changes in local directory, commit or clear" >&2
exit 1
fi
SHA=$(git rev-parse HEAD)
python setup.py sdist
TARBALL="dist/octodns-$SHA.tar.gz"
mv dist/octodns-0.*.tar.gz "$TARBALL"
echo "Created $TARBALL"

+ 1
- 16
setup.py View File

@ -1,9 +1,7 @@
#!/usr/bin/env python
from io import StringIO
from os import environ
from os.path import dirname, join
from subprocess import CalledProcessError, check_output
import octodns
@ -49,19 +47,6 @@ def long_description():
return buf.getvalue()
def version():
# pep440 style public & local version numbers
if environ.get('OCTODNS_RELEASE', False):
# public
return octodns.__VERSION__
try:
sha = check_output(['git', 'rev-parse', 'HEAD']).decode('utf-8')[:8]
except (CalledProcessError, FileNotFoundError):
sha = 'unknown'
# local
return f'{octodns.__VERSION__}+{sha}'
tests_require = ('pytest>=6.2.5', 'pytest-cov>=3.0.0', 'pytest-network>=0.0.1')
setup(
@ -102,5 +87,5 @@ setup(
python_requires='>=3.8',
tests_require=tests_require,
url='https://github.com/octodns/octodns',
version=version(),
version=octodns.__version__,
)

+ 11
- 1
tests/config/dynamic-config.yaml View File

@ -3,17 +3,27 @@ providers:
class: octodns.provider.yaml.YamlProvider
directory: tests/config
in2:
class: octodns.provider.yaml.YamlProvider
directory: tests/config/split
dump:
class: octodns.provider.yaml.YamlProvider
directory: env/YAML_TMP_DIR
zones:
'*':
'*.one':
sources:
- in
targets:
- dump
'*.two':
sources:
- in2
targets:
- dump
subzone.unit.tests.:
sources:
- in


+ 4
- 0
tests/config/hybrid/one.test.yaml View File

@ -0,0 +1,4 @@
---
flat-zone-file:
type: TXT
value: non-split flat zone file

+ 4
- 0
tests/config/hybrid/two.test./$two.test.yaml View File

@ -0,0 +1,4 @@
---
'':
type: TXT
value: root TXT

+ 4
- 0
tests/config/hybrid/two.test./split-zone-file.yaml View File

@ -0,0 +1,4 @@
---
split-zone-file:
type: TXT
value: split zone file

+ 5
- 0
tests/config/include/array.yaml View File

@ -0,0 +1,5 @@
---
- 14
- 15
- 16
- 72

+ 3
- 0
tests/config/include/dict.yaml View File

@ -0,0 +1,3 @@
---
k: v
z: 42

+ 1
- 0
tests/config/include/empty.yaml View File

@ -0,0 +1 @@
---

+ 2
- 0
tests/config/include/include-doesnt-exist.yaml View File

@ -0,0 +1,2 @@
---
key: !include does-not-exist.yaml

+ 8
- 0
tests/config/include/main.yaml View File

@ -0,0 +1,8 @@
---
included-array: !include array.yaml
included-dict: !include dict.yaml
included-empty: !include empty.yaml
included-nested: !include nested.yaml
included-subdir: !include subdir/value.yaml
key: value
name: main

+ 2
- 0
tests/config/include/nested.yaml View File

@ -0,0 +1,2 @@
---
!include subdir/value.yaml

+ 2
- 0
tests/config/include/subdir/value.yaml View File

@ -0,0 +1,2 @@
---
Hello World!

+ 1
- 0
tests/config/simple-arpa.yaml View File

@ -1,6 +1,7 @@
manager:
max_workers: 2
auto_arpa:
populate_should_replace: True
ttl: 1800
providers:


+ 4
- 0
tests/config/split/shared.yaml View File

@ -0,0 +1,4 @@
---
only-shared:
type: TXT
value: Only included when shared file processing is enabled

+ 4
- 0
tests/config/split/unit.tests.yaml View File

@ -0,0 +1,4 @@
---
only-zone-file:
type: TXT
value: Only included when zone file processing is enabled

+ 21
- 5
tests/test_octodns_manager.py View File

@ -16,7 +16,7 @@ from helpers import (
TemporaryDirectory,
)
from octodns import __VERSION__
from octodns import __version__
from octodns.idna import IdnaDict, idna_encode
from octodns.manager import (
MainThreadExecutor,
@ -746,13 +746,13 @@ class TestManager(TestCase):
manager = Manager(get_config_filename('simple.yaml'))
class DummyModule(object):
__VERSION__ = '2.3.4'
__version__ = '2.3.4'
dummy_module = DummyModule()
# use importlib.metadata.version
self.assertTrue(
__VERSION__,
__version__,
manager._try_version(
'octodns', module=dummy_module, version='1.2.3'
),
@ -928,6 +928,18 @@ class TestManager(TestCase):
def test_auto_arpa(self):
manager = Manager(get_config_filename('simple-arpa.yaml'))
# provider config
self.assertEqual(
True, manager.providers.get("auto-arpa").populate_should_replace
)
self.assertEqual(1800, manager.providers.get("auto-arpa").ttl)
# processor config
self.assertEqual(
True, manager.processors.get("auto-arpa").populate_should_replace
)
self.assertEqual(1800, manager.processors.get("auto-arpa").ttl)
with TemporaryDirectory() as tmpdir:
environ['YAML_TMP_DIR'] = tmpdir.dirname
@ -985,10 +997,14 @@ class TestManager(TestCase):
manager = Manager(get_config_filename('dynamic-config.yaml'))
# just unit.tests. which should have been dynamically configured via
# two zones which should have been dynamically configured via
# list_zones
self.assertEqual(
23, manager.sync(eligible_zones=['unit.tests.'], dry_run=False)
29,
manager.sync(
eligible_zones=['unit.tests.', 'dynamic.tests.'],
dry_run=False,
),
)
# just subzone.unit.tests. which was explicitly configured


+ 201
- 1
tests/test_octodns_processor_filter.py View File

@ -5,13 +5,19 @@
from unittest import TestCase
from octodns.processor.filter import (
ExcludeRootNsChanges,
IgnoreRootNsFilter,
NameAllowlistFilter,
NameRejectlistFilter,
NetworkValueAllowlistFilter,
NetworkValueRejectlistFilter,
TypeAllowlistFilter,
TypeRejectlistFilter,
ZoneNameFilter,
)
from octodns.record import Record
from octodns.provider.plan import Plan
from octodns.record import Record, Update
from octodns.record.exception import ValidationError
from octodns.zone import Zone
zone = Zone('unit.tests.', [])
@ -50,6 +56,22 @@ class TestTypeAllowListFilter(TestCase):
['a', 'a2', 'aaaa'], sorted([r.name for r in got.records])
)
def test_include_target(self):
filter_txt = TypeAllowlistFilter(
'only-txt', ['TXT'], include_target=False
)
# as a source we don't see them
got = filter_txt.process_source_zone(zone.copy())
self.assertEqual(['txt', 'txt2'], sorted([r.name for r in got.records]))
# but as a target we do b/c it's not included
got = filter_txt.process_target_zone(zone.copy())
self.assertEqual(
['a', 'a2', 'aaaa', 'txt', 'txt2'],
sorted([r.name for r in got.records]),
)
class TestTypeRejectListFilter(TestCase):
def test_basics(self):
@ -157,6 +179,66 @@ class TestNameRejectListFilter(TestCase):
)
class TestNetworkValueFilter(TestCase):
zone = Zone('unit.tests.', [])
for record in [
Record.new(
zone,
'private-ipv4',
{'type': 'A', 'ttl': 42, 'value': '10.42.42.42'},
),
Record.new(
zone,
'public-ipv4',
{'type': 'A', 'ttl': 42, 'value': '42.42.42.42'},
),
Record.new(
zone,
'private-ipv6',
{'type': 'AAAA', 'ttl': 42, 'value': 'fd12:3456:789a:1::1'},
),
Record.new(
zone,
'public-ipv6',
{'type': 'AAAA', 'ttl': 42, 'value': 'dead:beef:cafe::1'},
),
Record.new(
zone,
'keep-me',
{'ttl': 30, 'type': 'TXT', 'value': 'this should always be here'},
),
]:
zone.add_record(record)
def test_bad_config(self):
with self.assertRaises(ValueError):
NetworkValueRejectlistFilter(
'rejectlist', set(('string', '42.42.42.42/43'))
)
def test_reject(self):
filter_private = NetworkValueRejectlistFilter(
'rejectlist', set(('10.0.0.0/8', 'fd00::/8'))
)
got = filter_private.process_source_zone(self.zone.copy())
self.assertEqual(
['keep-me', 'public-ipv4', 'public-ipv6'],
sorted([r.name for r in got.records]),
)
def test_allow(self):
filter_private = NetworkValueAllowlistFilter(
'allowlist', set(('10.0.0.0/8', 'fd00::/8'))
)
got = filter_private.process_source_zone(self.zone.copy())
self.assertEqual(
['keep-me', 'private-ipv4', 'private-ipv6'],
sorted([r.name for r in got.records]),
)
class TestIgnoreRootNsFilter(TestCase):
zone = Zone('unit.tests.', [])
root = Record.new(
@ -180,3 +262,121 @@ class TestIgnoreRootNsFilter(TestCase):
[('A', ''), ('NS', 'sub')],
sorted([(r._type, r.name) for r in filtered.records]),
)
class TestExcludeRootNsChanges(TestCase):
zone = Zone('unit.tests.', [])
root = Record.new(
zone, '', {'type': 'NS', 'ttl': 42, 'value': 'ns1.unit.tests.'}
)
zone.add_record(root)
not_root = Record.new(
zone, 'sub', {'type': 'NS', 'ttl': 43, 'value': 'ns2.unit.tests.'}
)
zone.add_record(not_root)
not_ns = Record.new(zone, '', {'type': 'A', 'ttl': 42, 'value': '3.4.5.6'})
zone.add_record(not_ns)
changes_with_root = [
Update(root, root),
Update(not_root, not_root),
Update(not_ns, not_ns),
]
plan_with_root = Plan(zone, zone, changes_with_root, True)
changes_without_root = [Update(not_root, not_root), Update(not_ns, not_ns)]
plan_without_root = Plan(zone, zone, changes_without_root, True)
def test_no_plan(self):
proc = ExcludeRootNsChanges('exclude-root')
self.assertFalse(proc.process_plan(None, None, None))
def test_error(self):
proc = ExcludeRootNsChanges('exclude-root')
with self.assertRaises(ValidationError) as ctx:
proc.process_plan(self.plan_with_root, None, None)
self.assertEqual(
['root NS changes are disallowed'], ctx.exception.reasons
)
self.assertEqual(
self.plan_without_root,
proc.process_plan(self.plan_without_root, None, None),
)
def test_warning(self):
proc = ExcludeRootNsChanges('exclude-root', error=False)
filtered_plan = proc.process_plan(self.plan_with_root, None, None)
self.assertEqual(self.plan_without_root.changes, filtered_plan.changes)
self.assertEqual(
self.plan_without_root,
proc.process_plan(self.plan_without_root, None, None),
)
class TestZoneNameFilter(TestCase):
def test_ends_with_zone(self):
zone_name_filter = ZoneNameFilter('zone-name', error=False)
zone = Zone('unit.tests.', [])
# something that doesn't come into play
zone.add_record(
Record.new(
zone, 'www', {'type': 'A', 'ttl': 43, 'value': '1.2.3.4'}
)
)
# something that has the zone name, but doesn't end with it
zone.add_record(
Record.new(
zone,
f'{zone.name}more',
{'type': 'A', 'ttl': 43, 'value': '1.2.3.4'},
)
)
self.assertEqual(2, len(zone.records))
filtered = zone_name_filter.process_source_zone(zone.copy())
# get everything back
self.assertEqual(2, len(filtered.records))
with_dot = zone.copy()
with_dot.add_record(
Record.new(
zone, zone.name, {'type': 'A', 'ttl': 43, 'value': '1.2.3.4'}
)
)
self.assertEqual(3, len(with_dot.records))
filtered = zone_name_filter.process_source_zone(with_dot.copy())
# don't get the one that ends with the zone name
self.assertEqual(2, len(filtered.records))
without_dot = zone.copy()
without_dot.add_record(
Record.new(
zone,
zone.name[:-1],
{'type': 'A', 'ttl': 43, 'value': '1.2.3.4'},
)
)
self.assertEqual(3, len(without_dot.records))
filtered = zone_name_filter.process_source_zone(without_dot.copy())
# don't get the one that ends with the zone name
self.assertEqual(2, len(filtered.records))
def test_error(self):
errors = ZoneNameFilter('zone-name', error=True)
zone = Zone('unit.tests.', [])
zone.add_record(
Record.new(
zone, zone.name, {'type': 'A', 'ttl': 43, 'value': '1.2.3.4'}
)
)
with self.assertRaises(ValidationError) as ctx:
errors.process_source_zone(zone)
self.assertEqual(
['record name ends with zone name'], ctx.exception.reasons
)

+ 202
- 0
tests/test_octodns_processor_meta.py View File

@ -0,0 +1,202 @@
#
#
#
from unittest import TestCase
from unittest.mock import patch
from octodns import __version__
from octodns.processor.meta import MetaProcessor
from octodns.provider.plan import Plan
from octodns.record import Create, Record, Update
from octodns.zone import Zone
class TestMetaProcessor(TestCase):
zone = Zone('unit.tests.', [])
meta_needs_update = Record.new(
zone,
'meta',
{
'type': 'TXT',
'ttl': 60,
# will always need updating
'values': ['uuid'],
},
)
meta_up_to_date = Record.new(
zone,
'meta',
{
'type': 'TXT',
'ttl': 60,
# only has time, value should be ignored
'values': ['time=xxx'],
},
)
not_meta = Record.new(
zone,
'its-not-meta',
{
'type': 'TXT',
'ttl': 60,
# has time, but name is wrong so won't matter
'values': ['time=xyz'],
},
)
@patch('octodns.processor.meta.MetaProcessor.now')
@patch('octodns.processor.meta.MetaProcessor.uuid')
def test_args_and_values(self, uuid_mock, now_mock):
# defaults, just time
uuid_mock.side_effect = [Exception('not used')]
now_mock.side_effect = ['the-time']
proc = MetaProcessor('test')
self.assertEqual(['time=the-time'], proc.values)
# just uuid
uuid_mock.side_effect = ['abcdef-1234567890']
now_mock.side_effect = [Exception('not used')]
proc = MetaProcessor('test', include_time=False, include_uuid=True)
self.assertEqual(['uuid=abcdef-1234567890'], proc.values)
# just version
uuid_mock.side_effect = [Exception('not used')]
now_mock.side_effect = [Exception('not used')]
proc = MetaProcessor('test', include_time=False, include_version=True)
self.assertEqual([f'octodns-version={__version__}'], proc.values)
# just provider
proc = MetaProcessor('test', include_time=False, include_provider=True)
self.assertTrue(proc.include_provider)
self.assertFalse(proc.values)
# everything
uuid_mock.side_effect = ['abcdef-1234567890']
now_mock.side_effect = ['the-time']
proc = MetaProcessor(
'test',
include_time=True,
include_uuid=True,
include_version=True,
include_provider=True,
)
self.assertEqual(
[
f'octodns-version={__version__}',
'time=the-time',
'uuid=abcdef-1234567890',
],
proc.values,
)
self.assertTrue(proc.include_provider)
def test_uuid(self):
proc = MetaProcessor('test', include_time=False, include_uuid=True)
self.assertEqual(1, len(proc.values))
self.assertTrue(proc.values[0].startswith('uuid'))
# uuid's have 4 -
self.assertEqual(4, proc.values[0].count('-'))
def test_up_to_date(self):
proc = MetaProcessor('test')
# Creates always need to happen
self.assertFalse(proc._up_to_date(Create(self.meta_needs_update)))
self.assertFalse(proc._up_to_date(Create(self.meta_up_to_date)))
# Updates depend on the contents
self.assertFalse(proc._up_to_date(Update(self.meta_needs_update, None)))
self.assertTrue(proc._up_to_date(Update(self.meta_up_to_date, None)))
@patch('octodns.processor.meta.MetaProcessor.now')
def test_process_source_zone(self, now_mock):
now_mock.side_effect = ['the-time']
proc = MetaProcessor('test')
# meta record was added
desired = self.zone.copy()
processed = proc.process_source_zone(desired, None)
record = next(iter(processed.records))
self.assertEqual(self.meta_up_to_date, record)
self.assertEqual(['time=the-time'], record.values)
def test_process_target_zone(self):
proc = MetaProcessor('test')
# with defaults, not enabled
zone = self.zone.copy()
processed = proc.process_target_zone(zone, None)
self.assertFalse(processed.records)
# enable provider
proc = MetaProcessor('test', include_provider=True)
class DummyTarget:
id = 'dummy'
# enabled provider, no meta record, shouldn't happen, but also shouldn't
# blow up
processed = proc.process_target_zone(zone, DummyTarget())
self.assertFalse(processed.records)
# enabled provider, should now look for and update the provider value,
# - only record so nothing to skip over
# - time value in there to be skipped over
proc = MetaProcessor('test', include_provider=True)
zone = self.zone.copy()
meta = self.meta_up_to_date.copy()
zone.add_record(meta)
processed = proc.process_target_zone(zone, DummyTarget())
record = next(iter(processed.records))
self.assertEqual(['provider=dummy', 'time=xxx'], record.values)
# add another unrelated record that needs to be skipped
proc = MetaProcessor('test', include_provider=True)
zone = self.zone.copy()
meta = self.meta_up_to_date.copy()
zone.add_record(meta)
zone.add_record(self.not_meta)
processed = proc.process_target_zone(zone, DummyTarget())
self.assertEqual(2, len(processed.records))
record = [r for r in processed.records if r.name == proc.record_name][0]
self.assertEqual(['provider=dummy', 'time=xxx'], record.values)
def test_process_plan(self):
proc = MetaProcessor('test')
# no plan, shouldn't happen, but we shouldn't blow up
self.assertFalse(proc.process_plan(None, None, None))
# plan with just an up to date meta record, should kill off the plan
plan = Plan(
None,
None,
[Update(self.meta_up_to_date, self.meta_needs_update)],
True,
)
self.assertFalse(proc.process_plan(plan, None, None))
# plan with an out of date meta record, should leave the plan alone
plan = Plan(
None,
None,
[Update(self.meta_needs_update, self.meta_up_to_date)],
True,
)
self.assertEqual(plan, proc.process_plan(plan, None, None))
# plan with other changes preserved even if meta was somehow up to date
plan = Plan(
None,
None,
[
Update(self.meta_up_to_date, self.meta_needs_update),
Create(self.not_meta),
],
True,
)
self.assertEqual(plan, proc.process_plan(plan, None, None))

+ 214
- 38
tests/test_octodns_provider_yaml.py View File

@ -2,8 +2,9 @@
#
#
from os import makedirs
from os.path import basename, dirname, isdir, isfile, join
from os import makedirs, remove
from os.path import dirname, isdir, isfile, join
from shutil import rmtree
from unittest import TestCase
from helpers import TemporaryDirectory
@ -12,16 +13,15 @@ from yaml.constructor import ConstructorError
from octodns.idna import idna_encode
from octodns.provider import ProviderException
from octodns.provider.base import Plan
from octodns.provider.yaml import (
SplitYamlProvider,
YamlProvider,
_list_all_yaml_files,
)
from octodns.provider.yaml import SplitYamlProvider, YamlProvider
from octodns.record import Create, NsValue, Record, ValuesMixin
from octodns.zone import SubzoneRecordException, Zone
def touch(filename):
open(filename, 'w').close()
class TestYamlProvider(TestCase):
def test_provider(self):
source = YamlProvider('test', join(dirname(__file__), 'config'))
@ -299,6 +299,7 @@ xn--dj-kia8a:
self.assertTrue(source.supports(DummyType(self)))
def test_list_zones(self):
# test of pre-existing config that lives on disk
provider = YamlProvider('test', 'tests/config')
self.assertEqual(
[
@ -307,9 +308,159 @@ xn--dj-kia8a:
'subzone.unit.tests.',
'unit.tests.',
],
sorted(provider.list_zones()),
list(provider.list_zones()),
)
# some synthetic tests to explicitly exercise the full functionality
with TemporaryDirectory() as td:
directory = join(td.dirname)
# noise
touch(join(directory, 'README.txt'))
# not a zone.name.yaml
touch(join(directory, 'production.yaml'))
# non-zone directories
makedirs(join(directory, 'directory'))
makedirs(join(directory, 'never.matches'))
# basic yaml zone files
touch(join(directory, 'unit.test.yaml'))
touch(join(directory, 'sub.unit.test.yaml'))
touch(join(directory, 'other.tld.yaml'))
touch(join(directory, 'both.tld.yaml'))
# split zones with .
makedirs(join(directory, 'split.test.'))
makedirs(join(directory, 'sub.split.test.'))
makedirs(join(directory, 'other.split.'))
makedirs(join(directory, 'both.tld.'))
# split zones with .tst
makedirs(join(directory, 'split-ext.test.tst'))
makedirs(join(directory, 'sub.split-ext.test.tst'))
makedirs(join(directory, 'other-ext.split.tst'))
provider = YamlProvider('test', directory)
# basic, should only find zone files
self.assertEqual(
['both.tld.', 'other.tld.', 'sub.unit.test.', 'unit.test.'],
list(provider.list_zones()),
)
# include stuff with . AND basic
provider.split_extension = '.'
self.assertEqual(
[
'both.tld.',
'other.split.',
'other.tld.',
'split.test.',
'sub.split.test.',
'sub.unit.test.',
'unit.test.',
],
list(provider.list_zones()),
)
# include stuff with .tst AND basic
provider.split_extension = '.tst'
self.assertEqual(
[
'both.tld.',
'other-ext.split.',
'other.tld.',
'split-ext.test.',
'sub.split-ext.test.',
'sub.unit.test.',
'unit.test.',
],
list(provider.list_zones()),
)
# only .tst
provider.disable_zonefile = True
self.assertEqual(
['other-ext.split.', 'split-ext.test.', 'sub.split-ext.test.'],
list(provider.list_zones()),
)
# only . (and both zone)
provider.split_extension = '.'
self.assertEqual(
['both.tld.', 'other.split.', 'split.test.', 'sub.split.test.'],
list(provider.list_zones()),
)
def test_split_sources(self):
with TemporaryDirectory() as td:
directory = join(td.dirname)
provider = YamlProvider('test', directory, split_extension='.')
zone = Zone('déjà.vu.', [])
zone_utf8 = join(directory, f'{zone.decoded_name}')
zone_idna = join(directory, f'{zone.name}')
filenames = (
'*.yaml',
'.yaml',
'www.yaml',
f'${zone.decoded_name}yaml',
)
# create the utf8 zone dir
makedirs(zone_utf8)
# nothing in it so we should get nothing back
self.assertEqual([], list(provider._split_sources(zone)))
# create some record files
for filename in filenames:
touch(join(zone_utf8, filename))
# make sure we see them
expected = [join(zone_utf8, f) for f in sorted(filenames)]
self.assertEqual(expected, sorted(provider._split_sources(zone)))
# add a idna zone directory
makedirs(zone_idna)
for filename in filenames:
touch(join(zone_idna, filename))
with self.assertRaises(ProviderException) as ctx:
list(provider._split_sources(zone))
msg = str(ctx.exception)
self.assertTrue('Both UTF-8' in msg)
# delete the utf8 version
rmtree(zone_utf8)
expected = [join(zone_idna, f) for f in sorted(filenames)]
self.assertEqual(expected, sorted(provider._split_sources(zone)))
def test_zone_sources(self):
with TemporaryDirectory() as td:
directory = join(td.dirname)
provider = YamlProvider('test', directory)
zone = Zone('déjà.vu.', [])
utf8 = join(directory, f'{zone.decoded_name}yaml')
idna = join(directory, f'{zone.name}yaml')
# create the utf8 version
touch(utf8)
# make sure that's what we get back
self.assertEqual(utf8, provider._zone_sources(zone))
# create idna version, both exists
touch(idna)
with self.assertRaises(ProviderException) as ctx:
provider._zone_sources(zone)
msg = str(ctx.exception)
self.assertTrue('Both UTF-8' in msg)
# delete the utf8 version
remove(utf8)
# make sure that we get the idna one back
self.assertEqual(idna, provider._zone_sources(zone))
class TestSplitYamlProvider(TestCase):
def test_list_all_yaml_files(self):
@ -323,40 +474,16 @@ class TestSplitYamlProvider(TestCase):
# Create some files, some of them with a .yaml extension, all of
# them empty.
for emptyfile in all_files:
open(join(directory, emptyfile), 'w').close()
touch(join(directory, emptyfile))
# Do the same for some fake directories
for emptydir in all_dirs:
makedirs(join(directory, emptydir))
# This isn't great, but given the variable nature of the temp dir
# names, it's necessary.
d = list(basename(f) for f in _list_all_yaml_files(directory))
d = [join(directory, f) for f in yaml_files]
self.assertEqual(len(yaml_files), len(d))
def test_zone_directory(self):
source = SplitYamlProvider(
'test', join(dirname(__file__), 'config/split'), extension='.tst'
)
zone = Zone('unit.tests.', [])
self.assertEqual(
join(dirname(__file__), 'config/split', 'unit.tests.tst'),
source._zone_directory(zone),
)
def test_apply_handles_existing_zone_directory(self):
with TemporaryDirectory() as td:
provider = SplitYamlProvider(
'test', join(td.dirname, 'config'), extension='.tst'
)
makedirs(join(td.dirname, 'config', 'does.exist.tst'))
zone = Zone('does.exist.', [])
self.assertTrue(isdir(provider._zone_directory(zone)))
provider.apply(Plan(None, zone, [], True))
self.assertTrue(isdir(provider._zone_directory(zone)))
def test_provider(self):
source = SplitYamlProvider(
'test',
@ -375,9 +502,40 @@ class TestSplitYamlProvider(TestCase):
# without it we see everything
source.populate(zone)
self.assertEqual(20, len(zone.records))
self.assertFalse([r for r in zone.records if r.name.startswith('only')])
# temporarily enable zone file processing too, we should see one extra
# record that came from unit.tests.
source.disable_zonefile = False
zone_both = Zone('unit.tests.', [])
source.populate(zone_both)
self.assertEqual(21, len(zone_both.records))
n = len([r for r in zone_both.records if r.name == 'only-zone-file'])
self.assertEqual(1, n)
source.disable_zonefile = True
# temporarily enable shared file processing, we should see one extra
# record in the zone
source.shared_filename = 'shared.yaml'
zone_shared = Zone('unit.tests.', [])
source.populate(zone_shared)
self.assertEqual(21, len(zone_shared.records))
n = len([r for r in zone_shared.records if r.name == 'only-shared'])
self.assertEqual(1, n)
dynamic_zone_shared = Zone('dynamic.tests.', [])
source.populate(dynamic_zone_shared)
self.assertEqual(6, len(dynamic_zone_shared.records))
n = len(
[r for r in dynamic_zone_shared.records if r.name == 'only-shared']
)
self.assertEqual(1, n)
source.shared_filename = None
source.populate(dynamic_zone)
self.assertEqual(5, len(dynamic_zone.records))
self.assertFalse(
[r for r in dynamic_zone.records if r.name.startswith('only')]
)
with TemporaryDirectory() as td:
# Add some subdirs to make sure that it can create them
@ -505,8 +663,8 @@ class TestSplitYamlProvider(TestCase):
zone = Zone('empty.', [])
# without it we see everything
source.populate(zone)
self.assertEqual(0, len(zone.records))
with self.assertRaises(ProviderException):
source.populate(zone)
def test_unsorted(self):
source = SplitYamlProvider(
@ -579,7 +737,7 @@ class TestSplitYamlProvider(TestCase):
)
copy = source.copy()
self.assertEqual(source.directory, copy.directory)
self.assertEqual(source.extension, copy.extension)
self.assertEqual(source.split_extension, copy.split_extension)
self.assertEqual(source.default_ttl, copy.default_ttl)
self.assertEqual(source.enforce_order, copy.enforce_order)
self.assertEqual(
@ -602,6 +760,24 @@ class TestSplitYamlProvider(TestCase):
sorted(provider.list_zones()),
)
def test_hybrid_directory(self):
source = YamlProvider(
'test',
join(dirname(__file__), 'config/hybrid'),
split_extension='.',
strict_supports=False,
)
# flat zone file only
zone = Zone('one.test.', [])
source.populate(zone)
self.assertEqual(1, len(zone.records))
# split zone only
zone = Zone('two.test.', [])
source.populate(zone)
self.assertEqual(2, len(zone.records))
class TestOverridingYamlProvider(TestCase):
def test_provider(self):


+ 65
- 1
tests/test_octodns_record.py View File

@ -22,6 +22,7 @@ from octodns.record import (
ValidationError,
ValuesMixin,
)
from octodns.record.base import unquote
from octodns.yaml import ContextDict
from octodns.zone import Zone
@ -159,10 +160,13 @@ class TestRecord(TestCase):
)
zone = Zone('unit.tests.', [])
records = {(r._type, r.name): r for r in Record.from_rrs(zone, rrs)}
records = {
(r._type, r.name): r for r in Record.from_rrs(zone, rrs, source=99)
}
record = records[('A', '')]
self.assertEqual(42, record.ttl)
self.assertEqual(['1.2.3.4', '2.3.4.5'], record.values)
self.assertEqual(99, record.source)
record = records[('AAAA', '')]
self.assertEqual(43, record.ttl)
self.assertEqual(['fc00::1', 'fc00::2'], record.values)
@ -409,6 +413,18 @@ class TestRecord(TestCase):
record.rrs,
)
def test_unquote(self):
s = 'Hello "\'"World!'
single = f"'{s}'"
double = f'"{s}"'
self.assertEqual(s, unquote(s))
self.assertEqual(s, unquote(single))
self.assertEqual(s, unquote(double))
# edge cases
self.assertEqual(None, unquote(None))
self.assertEqual('', unquote(''))
class TestRecordValidation(TestCase):
zone = Zone('unit.tests.', [])
@ -638,3 +654,51 @@ class TestRecordValidation(TestCase):
),
)
self.assertEqual('needle', record.context)
def test_values_mixin_repr(self):
# ValuesMixin
record = Record.new(
self.zone,
'www',
{
'ttl': 42,
'type': 'A',
'values': ['1.2.3.4', '2.3.4.5'],
'octodns': {'key': 'value'},
},
)
# has the octodns special section
self.assertEqual(
"<ARecord A 42, www.unit.tests., ['1.2.3.4', '2.3.4.5'], {'key': 'value'}>",
record.__repr__(),
)
# no special section
record._octodns = {}
self.assertEqual(
"<ARecord A 42, www.unit.tests., ['1.2.3.4', '2.3.4.5']>",
record.__repr__(),
)
def test_value_mixin_repr(self):
# ValueMixin
record = Record.new(
self.zone,
'pointer',
{
'ttl': 43,
'type': 'CNAME',
'value': 'unit.tests.',
'octodns': {'key': 42},
},
)
# has the octodns special section
self.assertEqual(
"<CnameRecord CNAME 43, pointer.unit.tests., unit.tests., {'key': 42}>",
record.__repr__(),
)
# no special section
record._octodns = {}
self.assertEqual(
'<CnameRecord CNAME 43, pointer.unit.tests., unit.tests.>',
record.__repr__(),
)

+ 6
- 0
tests/test_octodns_record_caa.py View File

@ -105,6 +105,12 @@ class TestRecordCaa(TestCase):
CaaValue.parse_rdata_text('0 tag 99148c81'),
)
# quoted
self.assertEqual(
{'flags': 0, 'tag': 'tag', 'value': '99148c81'},
CaaValue.parse_rdata_text('0 "tag" "99148c81"'),
)
zone = Zone('unit.tests.', [])
a = CaaRecord(
zone,


+ 2
- 0
tests/test_octodns_record_chunked.py View File

@ -21,6 +21,8 @@ class TestRecordChunked(TestCase):
'some.words.that.here',
'1.2.word.4',
'1.2.3.4',
# quotes are not removed
'"Hello World!"',
):
self.assertEqual(s, _ChunkedValue.parse_rdata_text(s))


+ 140
- 85
tests/test_octodns_record_ds.py View File

@ -12,52 +12,67 @@ from octodns.zone import Zone
class TestRecordDs(TestCase):
def test_ds(self):
for a, b in (
# diff flags
# diff key_tag
(
{
'flags': 0,
'protocol': 1,
'algorithm': 2,
'public_key': 'abcdef0123456',
'key_tag': 0,
'algorithm': 1,
'digest_type': 2,
'digest': 'abcdef0123456',
},
{
'flags': 1,
'protocol': 1,
'algorithm': 2,
'public_key': 'abcdef0123456',
'key_tag': 1,
'algorithm': 1,
'digest_type': 2,
'digest': 'abcdef0123456',
},
),
# diff protocol
# diff algorithm
(
{
'flags': 0,
'protocol': 1,
'algorithm': 2,
'public_key': 'abcdef0123456',
'key_tag': 0,
'algorithm': 1,
'digest_type': 2,
'digest': 'abcdef0123456',
},
{
'flags': 0,
'protocol': 2,
'key_tag': 0,
'algorithm': 2,
'public_key': 'abcdef0123456',
'digest_type': 2,
'digest': 'abcdef0123456',
},
),
# diff algorithm
# diff digest_type
(
{
'flags': 0,
'protocol': 1,
'algorithm': 2,
'public_key': 'abcdef0123456',
'key_tag': 0,
'algorithm': 1,
'digest_type': 2,
'digest': 'abcdef0123456',
},
{
'flags': 0,
'protocol': 1,
'algorithm': 3,
'public_key': 'abcdef0123456',
'key_tag': 0,
'algorithm': 1,
'digest_type': 3,
'digest': 'abcdef0123456',
},
),
# diff digest
(
{
'key_tag': 0,
'algorithm': 1,
'digest_type': 2,
'digest': 'abcdef0123456',
},
{
'key_tag': 0,
'algorithm': 1,
'digest_type': 2,
'digest': 'bcdef0123456a',
},
),
# diff public_key
# diff digest with previously used key names
(
{
'flags': 0,
@ -66,10 +81,10 @@ class TestRecordDs(TestCase):
'public_key': 'abcdef0123456',
},
{
'flags': 0,
'protocol': 1,
'algorithm': 2,
'public_key': 'bcdef0123456a',
'key_tag': 0,
'algorithm': 1,
'digest_type': 2,
'digest': 'bcdef0123456a',
},
),
):
@ -104,73 +119,113 @@ class TestRecordDs(TestCase):
# things ints, will parse
self.assertEqual(
{
'flags': 'one',
'protocol': 'two',
'algorithm': 'three',
'public_key': 'key',
'key_tag': 'one',
'algorithm': 'two',
'digest_type': 'three',
'digest': 'key',
},
DsValue.parse_rdata_text('one two three key'),
)
# valid
data = {
'flags': 0,
'protocol': 1,
'algorithm': 2,
'public_key': '99148c81',
'key_tag': 0,
'algorithm': 1,
'digest_type': 2,
'digest': '99148c81',
}
self.assertEqual(data, DsValue.parse_rdata_text('0 1 2 99148c81'))
self.assertEqual([], DsValue.validate(data, 'DS'))
# missing flags
data = {'protocol': 1, 'algorithm': 2, 'public_key': '99148c81'}
self.assertEqual(['missing flags'], DsValue.validate(data, 'DS'))
# invalid flags
# missing key_tag
data = {'algorithm': 1, 'digest_type': 2, 'digest': '99148c81'}
self.assertEqual(['missing key_tag'], DsValue.validate(data, 'DS'))
# invalid key_tag
data = {
'flags': 'a',
'protocol': 1,
'algorithm': 2,
'public_key': '99148c81',
'key_tag': 'a',
'algorithm': 1,
'digest_type': 2,
'digest': '99148c81',
}
self.assertEqual(['invalid flags "a"'], DsValue.validate(data, 'DS'))
# missing protocol
data = {'flags': 1, 'algorithm': 2, 'public_key': '99148c81'}
self.assertEqual(['missing protocol'], DsValue.validate(data, 'DS'))
# invalid protocol
data = {
'flags': 1,
'protocol': 'a',
'algorithm': 2,
'public_key': '99148c81',
}
self.assertEqual(['invalid protocol "a"'], DsValue.validate(data, 'DS'))
self.assertEqual(['invalid key_tag "a"'], DsValue.validate(data, 'DS'))
# missing algorithm
data = {'flags': 1, 'protocol': 2, 'public_key': '99148c81'}
data = {'key_tag': 1, 'digest_type': 2, 'digest': '99148c81'}
self.assertEqual(['missing algorithm'], DsValue.validate(data, 'DS'))
# invalid algorithm
data = {
'flags': 1,
'protocol': 2,
'key_tag': 1,
'algorithm': 'a',
'public_key': '99148c81',
'digest_type': 2,
'digest': '99148c81',
}
self.assertEqual(
['invalid algorithm "a"'], DsValue.validate(data, 'DS')
)
# missing digest_type
data = {'key_tag': 1, 'algorithm': 2, 'digest': '99148c81'}
self.assertEqual(['missing digest_type'], DsValue.validate(data, 'DS'))
# invalid digest_type
data = {
'key_tag': 1,
'algorithm': 2,
'digest_type': 'a',
'digest': '99148c81',
}
self.assertEqual(
['invalid digest_type "a"'], DsValue.validate(data, 'DS')
)
# missing public_key (list)
data = {'key_tag': 1, 'algorithm': 2, 'digest_type': 3}
self.assertEqual(['missing digest'], DsValue.validate([data], 'DS'))
# do validations again with old field style
# missing flags (list)
data = {'protocol': 2, 'algorithm': 3, 'public_key': '99148c81'}
self.assertEqual(['missing flags'], DsValue.validate([data], 'DS'))
# missing protocol (list)
data = {'flags': 1, 'algorithm': 3, 'public_key': '99148c81'}
self.assertEqual(['missing protocol'], DsValue.validate([data], 'DS'))
# missing algorithm (list)
data = {'flags': 1, 'protocol': 2, 'algorithm': 3}
data = {'flags': 1, 'protocol': 2, 'public_key': '99148c81'}
self.assertEqual(['missing algorithm'], DsValue.validate([data], 'DS'))
# missing public_key (list)
data = {'flags': 1, 'algorithm': 3, 'protocol': 2}
self.assertEqual(['missing public_key'], DsValue.validate([data], 'DS'))
# missing public_key (list)
data = {'flags': 1, 'algorithm': 3, 'protocol': 2, 'digest': '99148c81'}
self.assertEqual(['missing public_key'], DsValue.validate([data], 'DS'))
# invalid flags, protocol and algorithm
data = {
'flags': 'a',
'protocol': 'a',
'algorithm': 'a',
'public_key': '99148c81',
}
self.assertEqual(
[
'invalid flags "a"',
'invalid protocol "a"',
'invalid algorithm "a"',
],
DsValue.validate(data, 'DS'),
)
zone = Zone('unit.tests.', [])
values = [
{
'flags': 0,
'protocol': 1,
'algorithm': 2,
'public_key': '99148c81',
'key_tag': 0,
'algorithm': 1,
'digest_type': 2,
'digest': '99148c81',
},
{
'flags': 1,
@ -180,26 +235,26 @@ class TestRecordDs(TestCase):
},
]
a = DsRecord(zone, 'ds', {'ttl': 32, 'values': values})
self.assertEqual(0, a.values[0].flags)
a.values[0].flags += 1
self.assertEqual(1, a.values[0].flags)
self.assertEqual(0, a.values[0].key_tag)
a.values[0].key_tag += 1
self.assertEqual(1, a.values[0].key_tag)
self.assertEqual(1, a.values[0].protocol)
a.values[0].protocol += 1
self.assertEqual(2, a.values[0].protocol)
self.assertEqual(2, a.values[0].algorithm)
self.assertEqual(1, a.values[0].algorithm)
a.values[0].algorithm += 1
self.assertEqual(3, a.values[0].algorithm)
self.assertEqual(2, a.values[0].algorithm)
self.assertEqual(2, a.values[0].digest_type)
a.values[0].digest_type += 1
self.assertEqual(3, a.values[0].digest_type)
self.assertEqual('99148c81', a.values[0].public_key)
a.values[0].public_key = '99148c42'
self.assertEqual('99148c42', a.values[0].public_key)
self.assertEqual('99148c81', a.values[0].digest)
a.values[0].digest = '99148c42'
self.assertEqual('99148c42', a.values[0].digest)
self.assertEqual(1, a.values[1].flags)
self.assertEqual(2, a.values[1].protocol)
self.assertEqual(3, a.values[1].algorithm)
self.assertEqual('99148c44', a.values[1].public_key)
self.assertEqual(1, a.values[1].key_tag)
self.assertEqual(2, a.values[1].algorithm)
self.assertEqual(3, a.values[1].digest_type)
self.assertEqual('99148c44', a.values[1].digest)
self.assertEqual(DsValue(values[1]), a.values[1].data)
self.assertEqual('1 2 3 99148c44', a.values[1].rdata_text)


+ 43
- 0
tests/test_octodns_record_dynamic.py View File

@ -1561,3 +1561,46 @@ class TestRecordDynamic(TestCase):
]
),
)
def test_dynamic_subnet_mixed_versions(self):
# mixed IPv4 and IPv6 subnets should not raise a validation error
Record.new(
self.zone,
'good',
{
'dynamic': {
'pools': {
'one': {'values': [{'value': '1.1.1.1'}]},
'two': {'values': [{'value': '2.2.2.2'}]},
},
'rules': [
{'subnets': ['10.1.0.0/16', '1::/66'], 'pool': 'one'},
{'pool': 'two'},
],
},
'ttl': 60,
'type': 'A',
'values': ['2.2.2.2'],
},
)
Record.new(
self.zone,
'good',
{
'dynamic': {
'pools': {
'one': {'values': [{'value': '1.1.1.1'}]},
'two': {'values': [{'value': '2.2.2.2'}]},
},
'rules': [
{'subnets': ['10.1.0.0/16'], 'pool': 'one'},
{'subnets': ['1::/66'], 'pool': 'two'},
{'pool': 'two'},
],
},
'ttl': 60,
'type': 'A',
'values': ['2.2.2.2'],
},
)

+ 21
- 1
tests/test_octodns_record_loc.py View File

@ -160,6 +160,26 @@ class TestRecordLoc(TestCase):
LocValue.parse_rdata_text(s),
)
# quoted
s = '0 1 2.2 "N" 3 4 5.5 "E" "6.6m" "7.7m" "8.8m" "9.9m"'
self.assertEqual(
{
'altitude': 6.6,
'lat_degrees': 0,
'lat_direction': 'N',
'lat_minutes': 1,
'lat_seconds': 2.2,
'long_degrees': 3,
'long_direction': 'E',
'long_minutes': 4,
'long_seconds': 5.5,
'precision_horz': 8.8,
'precision_vert': 9.9,
'size': 7.7,
},
LocValue.parse_rdata_text(s),
)
# make sure that the cstor is using parse_rdata_text
zone = Zone('unit.tests.', [])
a = LocRecord(
@ -196,7 +216,7 @@ class TestRecordLoc(TestCase):
self.assertEqual(7.7, a.values[0].size)
self.assertEqual(8.8, a.values[0].precision_horz)
self.assertEqual(9.9, a.values[0].precision_vert)
self.assertEqual(s, a.values[0].rdata_text)
self.assertEqual(s.replace('"', ''), a.values[0].rdata_text)
def test_loc_value(self):
a = LocValue(


+ 6
- 0
tests/test_octodns_record_mx.py View File

@ -92,6 +92,12 @@ class TestRecordMx(TestCase):
MxValue.parse_rdata_text('10 mx.unit.tests.'),
)
# quoted
self.assertEqual(
{'preference': 10, 'exchange': 'mx.unit.tests.'},
MxValue.parse_rdata_text('10 "mx.unit.tests."'),
)
zone = Zone('unit.tests.', [])
a = MxRecord(
zone,


+ 13
- 0
tests/test_octodns_record_naptr.py View File

@ -346,6 +346,19 @@ class TestRecordNaptr(TestCase):
NaptrValue.parse_rdata_text('1 2 three four five six'),
)
# string fields are unquoted if needed
self.assertEqual(
{
'order': 1,
'preference': 2,
'flags': 'three',
'service': 'four',
'regexp': 'five',
'replacement': 'six',
},
NaptrValue.parse_rdata_text('1 2 "three" "four" "five" "six"'),
)
# make sure that the cstor is using parse_rdata_text
zone = Zone('unit.tests.', [])
a = NaptrRecord(


+ 11
- 0
tests/test_octodns_record_srv.py View File

@ -123,6 +123,17 @@ class TestRecordSrv(TestCase):
SrvValue.parse_rdata_text('1 2 3 srv.unit.tests.'),
)
# quoted
self.assertEqual(
{
'priority': 1,
'weight': 2,
'port': 3,
'target': 'srv.unit.tests.',
},
SrvValue.parse_rdata_text('1 2 3 "srv.unit.tests."'),
)
zone = Zone('unit.tests.', [])
a = SrvRecord(
zone,


+ 6
- 0
tests/test_octodns_record_sshfp.py View File

@ -113,6 +113,12 @@ class TestRecordSshfp(TestCase):
SshfpValue.parse_rdata_text('1 2 00479b27'),
)
# valid
self.assertEqual(
{'algorithm': 1, 'fingerprint_type': 2, 'fingerprint': '00479b27'},
SshfpValue.parse_rdata_text('1 2 "00479b27"'),
)
zone = Zone('unit.tests.', [])
a = SshfpRecord(
zone,


+ 11
- 0
tests/test_octodns_record_tlsa.py View File

@ -160,6 +160,17 @@ class TestRecordTlsa(TestCase):
TlsaValue.parse_rdata_text('1 2 3 abcd'),
)
# valid
self.assertEqual(
{
'certificate_usage': 1,
'selector': 2,
'matching_type': 3,
'certificate_association_data': 'abcd',
},
TlsaValue.parse_rdata_text('1 2 3 "abcd"'),
)
zone = Zone('unit.tests.', [])
a = TlsaRecord(
zone,


+ 36
- 0
tests/test_octodns_record_txt.py View File

@ -142,3 +142,39 @@ class TestRecordTxt(TestCase):
self.assertEqual(single.values, chunked.values)
# should be chunked values, with quoting
self.assertEqual(single.chunked_values, chunked.chunked_values)
def test_rr(self):
zone = Zone('unit.tests.', [])
# simple TXT
record = Record.new(
zone,
'txt',
{'ttl': 42, 'type': 'TXT', 'values': ['short 1', 'short 2']},
)
self.assertEqual(
('txt.unit.tests.', 42, 'TXT', ['"short 1"', '"short 2"']),
record.rrs,
)
# long chunked text
record = Record.new(
zone,
'txt',
{
'ttl': 42,
'type': 'TXT',
'values': [
'before',
'v=DKIM1\\; h=sha256\\; k=rsa\\; p=MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAx78E7PtJvr8vpoNgHdIAe+llFKoy8WuTXDd6Z5mm3D4AUva9MBt5fFetxg/kcRy3KMDnMw6kDybwbpS/oPw1ylk6DL1xit7Cr5xeYYSWKukxXURAlHwT2K72oUsFKRUvN1X9lVysAeo+H8H/22Z9fJ0P30sOuRIRqCaiz+OiUYicxy4xrpfH2s9a+o3yRwX3zhlp8GjRmmmyK5mf7CkQTCfjnKVsYtB7mabXXmClH9tlcymnBMoN9PeXxaS5JRRysVV8RBCC9/wmfp9y//cck8nvE/MavFpSUHvv+TfTTdVKDlsXPjKX8iZQv0nO3xhspgkqFquKjydiR8nf4meHhwIDAQAB',
'z after',
],
},
)
vals = [
'"before"',
'"v=DKIM1\\; h=sha256\\; k=rsa\\; p=MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAx78E7PtJvr8vpoNgHdIAe+llFKoy8WuTXDd6Z5mm3D4AUva9MBt5fFetxg/kcRy3KMDnMw6kDybwbpS/oPw1ylk6DL1xit7Cr5xeYYSWKukxXURAlHwT2K72oUsFKRUvN1X9lVysAeo+H8H/22Z9fJ0P30sOuRIRqCaiz+OiUYicxy4xrpfH" '
'"2s9a+o3yRwX3zhlp8GjRmmmyK5mf7CkQTCfjnKVsYtB7mabXXmClH9tlcymnBMoN9PeXxaS5JRRysVV8RBCC9/wmfp9y//cck8nvE/MavFpSUHvv+TfTTdVKDlsXPjKX8iZQv0nO3xhspgkqFquKjydiR8nf4meHhwIDAQAB"',
'"z after"',
]
self.assertEqual(('txt.unit.tests.', 42, 'TXT', vals), record.rrs)

+ 24
- 0
tests/test_octodns_yaml.py View File

@ -62,3 +62,27 @@ class TestYaml(TestCase):
buf = StringIO()
safe_dump({'45a03129': 42, '45a0392a': 43}, buf)
self.assertEqual("---\n45a0392a: 43\n45a03129: 42\n", buf.getvalue())
def test_include(self):
with open('tests/config/include/main.yaml') as fh:
data = safe_load(fh)
self.assertEqual(
{
'included-array': [14, 15, 16, 72],
'included-dict': {'k': 'v', 'z': 42},
'included-empty': None,
'included-nested': 'Hello World!',
'included-subdir': 'Hello World!',
'key': 'value',
'name': 'main',
},
data,
)
with open('tests/config/include/include-doesnt-exist.yaml') as fh:
with self.assertRaises(FileNotFoundError) as ctx:
data = safe_load(fh)
self.assertEqual(
"[Errno 2] No such file or directory: 'tests/config/include/does-not-exist.yaml'",
str(ctx.exception),
)

Loading…
Cancel
Save