Browse Source

Merge pull request #780 from octodns/pool-value-up

Force enable/disable pool values
pull/786/head
Ross McFarland 4 years ago
committed by GitHub
parent
commit
5c881e3bc6
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
14 changed files with 338 additions and 30 deletions
  1. +25
    -0
      docs/dynamic_records.md
  2. +48
    -2
      octodns/provider/azuredns.py
  3. +29
    -8
      octodns/provider/base.py
  4. +6
    -1
      octodns/provider/constellix.py
  5. +24
    -11
      octodns/provider/ns1.py
  6. +1
    -0
      octodns/provider/yaml.py
  7. +9
    -0
      octodns/record/__init__.py
  8. +1
    -0
      octodns/source/base.py
  9. +80
    -0
      tests/test_octodns_provider_azuredns.py
  10. +13
    -0
      tests/test_octodns_provider_base.py
  11. +3
    -0
      tests/test_octodns_provider_dyn.py
  12. +50
    -2
      tests/test_octodns_provider_ns1.py
  13. +6
    -6
      tests/test_octodns_provider_route53.py
  14. +43
    -0
      tests/test_octodns_record.py

+ 25
- 0
docs/dynamic_records.md View File

@ -105,6 +105,31 @@ test:
| port | port to check | 443 |
| protocol | HTTP/HTTPS/TCP | HTTPS |
Healthchecks can also be skipped for individual pool values. These values can be forced to always-serve or never-serve using the `status` flag.
`status` flag is optional and accepts one of three possible values, `up`/`down`/`obey`, with `obey` being the default:
```yaml
test:
...
dynamic:
pools:
na:
values:
- value: 1.2.3.4
status: down
- value: 2.3.4.5
status: up
- value: 3.4.5.6
# defaults to status: obey
...
```
Support matrix:
* NS1 supports all 3 flag values
* Azure DNS supports only `obey` and `down`
* All other dynamic-capable providers only support the default `obey`
#### Route53 Healtch Check Options
| Key | Description | Default |


+ 48
- 2
octodns/provider/azuredns.py View File

@ -375,8 +375,11 @@ def _profile_is_match(have, desired):
desired_endpoints = desired.endpoints
endpoints = zip(have_endpoints, desired_endpoints)
for have_endpoint, desired_endpoint in endpoints:
have_status = have_endpoint.endpoint_status or 'Enabled'
desired_status = desired_endpoint.endpoint_status or 'Enabled'
if have_endpoint.name != desired_endpoint.name or \
have_endpoint.type != desired_endpoint.type:
have_endpoint.type != desired_endpoint.type or \
have_status != desired_status:
return false(have_endpoint, desired_endpoint, have.name)
target_type = have_endpoint.type.split('/')[-1]
if target_type == 'externalEndpoints':
@ -457,6 +460,7 @@ class AzureProvider(BaseProvider):
'''
SUPPORTS_GEO = False
SUPPORTS_DYNAMIC = True
SUPPORTS_POOL_VALUE_STATUS = True
SUPPORTS_MULTIVALUE_PTR = True
SUPPORTS = set(('A', 'AAAA', 'CAA', 'CNAME', 'MX', 'NS', 'PTR', 'SRV',
'TXT'))
@ -807,9 +811,14 @@ class AzureProvider(BaseProvider):
defaults.add(val)
ep_name = ep_name[:-len('--default--')]
status = 'obey'
if pool_ep.endpoint_status == 'Disabled':
status = 'down'
values.append({
'value': val,
'weight': pool_ep.weight or 1,
'status': status,
})
return values
@ -898,6 +907,36 @@ class AzureProvider(BaseProvider):
return data
def _process_desired_zone(self, desired):
# check for status=up values
for record in desired.records:
if not getattr(record, 'dynamic', False):
continue
up_pools = []
for name, pool in record.dynamic.pools.items():
for value in pool.data['values']:
if value['status'] == 'up':
# Azure only supports obey and down, not up
up_pools.append(name)
if not up_pools:
continue
up_pools = ','.join(up_pools)
msg = f'status=up is not supported for pools {up_pools} in ' \
f'{record.fqdn}'
fallback = 'will ignore it and respect the healthcheck'
self.supports_warn_or_except(msg, fallback)
record = record.copy()
for pool in record.dynamic.pools.values():
for value in pool.data['values']:
if value['status'] == 'up':
value['status'] = 'obey'
desired.add_record(record, replace=True)
return super()._process_desired_zone(desired)
def _extra_changes(self, existing, desired, changes):
changed = set(c.record for c in changes)
@ -1039,10 +1078,13 @@ class AzureProvider(BaseProvider):
# mark default
ep_name += '--default--'
default_seen = True
ep_status = 'Disabled' if val['status'] == 'down' else \
'Enabled'
endpoints.append(Endpoint(
name=ep_name,
target=target,
weight=val.get('weight', 1),
endpoint_status=ep_status,
))
pool_profile = self._generate_tm_profile(
@ -1075,7 +1117,8 @@ class AzureProvider(BaseProvider):
else:
# Skip Weighted profile hop for single-value pool; append its
# value as an external endpoint to fallback rule profile
target = pool_values[0]['value']
value = pool_values[0]
target = value['value']
if record._type == 'CNAME':
target = target[:-1]
ep_name = pool_name
@ -1083,10 +1126,13 @@ class AzureProvider(BaseProvider):
# mark default
ep_name += '--default--'
default_seen = True
ep_status = 'Disabled' if value['status'] == 'down' else \
'Enabled'
return Endpoint(
name=ep_name,
target=target,
priority=priority,
endpoint_status=ep_status,
), default_seen
def _make_rule_profile(self, rule_endpoints, rule_name, record, geos,


+ 29
- 8
octodns/provider/base.py View File

@ -55,14 +55,35 @@ class BaseProvider(BaseSource):
fallback = 'omitting record'
self.supports_warn_or_except(msg, fallback)
desired.remove_record(record)
elif getattr(record, 'dynamic', False) and \
not self.SUPPORTS_DYNAMIC:
msg = f'dynamic records not supported for {record.fqdn}'
fallback = 'falling back to simple record'
self.supports_warn_or_except(msg, fallback)
record = record.copy()
record.dynamic = None
desired.add_record(record, replace=True)
elif getattr(record, 'dynamic', False):
if self.SUPPORTS_DYNAMIC:
if self.SUPPORTS_POOL_VALUE_STATUS:
continue
# drop unsupported up flag
unsupported_pools = []
for _id, pool in record.dynamic.pools.items():
for value in pool.data['values']:
if value['status'] != 'obey':
unsupported_pools.append(_id)
if not unsupported_pools:
continue
unsupported_pools = ','.join(unsupported_pools)
msg = f'"status" flag used in pools {unsupported_pools}' \
f' in {record.fqdn} is not supported'
fallback = 'will ignore it and respect the healthcheck'
self.supports_warn_or_except(msg, fallback)
record = record.copy()
for pool in record.dynamic.pools.values():
for value in pool.data['values']:
value['status'] = 'obey'
desired.add_record(record, replace=True)
else:
msg = f'dynamic records not supported for {record.fqdn}'
fallback = 'falling back to simple record'
self.supports_warn_or_except(msg, fallback)
record = record.copy()
record.dynamic = None
desired.add_record(record, replace=True)
elif record._type == 'PTR' and len(record.values) > 1 and \
not self.SUPPORTS_MULTIVALUE_PTR:
# replace with a single-value copy


+ 6
- 1
octodns/provider/constellix.py View File

@ -618,7 +618,12 @@ class ConstellixProvider(BaseProvider):
for i, rule in enumerate(record.dynamic.rules):
pool_name = rule.data.get('pool')
pool = record.dynamic.pools.get(pool_name)
values = pool.data.get('values')
values = [
{
'value': value['value'],
'weight': value['weight'],
} for value in pool.data.get('values', [])
]
# Make a pool name based on zone, record, type and name
generated_pool_name = \


+ 24
- 11
octodns/provider/ns1.py View File

@ -304,6 +304,7 @@ class Ns1Provider(BaseProvider):
'''
SUPPORTS_GEO = True
SUPPORTS_DYNAMIC = True
SUPPORTS_POOL_VALUE_STATUS = True
SUPPORTS_MULTIVALUE_PTR = True
SUPPORTS = set(('A', 'AAAA', 'ALIAS', 'CAA', 'CNAME', 'MX', 'NAPTR',
'NS', 'PTR', 'SPF', 'SRV', 'TXT', 'URLFWD'))
@ -589,6 +590,9 @@ class Ns1Provider(BaseProvider):
'value': value,
'weight': int(meta.get('weight', 1)),
}
if isinstance(meta['up'], bool):
value_dict['status'] = 'up' if meta['up'] else 'down'
if value_dict not in pool['values']:
# If we haven't seen this value before add it to the pool
pool['values'].append(value_dict)
@ -1139,6 +1143,10 @@ class Ns1Provider(BaseProvider):
pool = pools[current_pool_name]
for answer in pool_answers[current_pool_name]:
fallback = pool.data['fallback']
if answer['feed_id']:
up = {'feed': answer['feed_id']}
else:
up = answer['status'] == 'up'
answer = {
'answer': answer['answer'],
'meta': {
@ -1148,9 +1156,7 @@ class Ns1Provider(BaseProvider):
'pool': current_pool_name,
'fallback': fallback or '',
}),
'up': {
'feed': answer['feed_id'],
},
'up': up,
'weight': answer['weight'],
},
'region': pool_label, # the one we're answering
@ -1271,20 +1277,27 @@ class Ns1Provider(BaseProvider):
for pool_name, pool in sorted(pools.items()):
for value in pool.data['values']:
weight = value['weight']
status = value['status']
value = value['value']
feed_id = value_feed.get(value)
# check for identical monitor and skip creating one if found
if not feed_id:
existing = existing_monitors.get(value)
monitor_id, feed_id = self._monitor_sync(record, value,
existing)
value_feed[value] = feed_id
active_monitors.add(monitor_id)
feed_id = None
if status == 'obey':
# state is not forced, let's find a monitor
feed_id = value_feed.get(value)
# check for identical monitor and skip creating one if
# found
if not feed_id:
existing = existing_monitors.get(value)
monitor_id, feed_id = self._monitor_sync(record, value,
existing)
value_feed[value] = feed_id
active_monitors.add(monitor_id)
pool_answers[pool_name].append({
'answer': [value],
'weight': weight,
'feed_id': feed_id,
'status': status,
})
if record._type == 'CNAME':


+ 1
- 0
octodns/provider/yaml.py View File

@ -104,6 +104,7 @@ class YamlProvider(BaseProvider):
'''
SUPPORTS_GEO = True
SUPPORTS_DYNAMIC = True
SUPPORTS_POOL_VALUE_STATUS = True
SUPPORTS_MULTIVALUE_PTR = True
SUPPORTS = set(('A', 'AAAA', 'ALIAS', 'CAA', 'CNAME', 'DNAME', 'LOC', 'MX',
'NAPTR', 'NS', 'PTR', 'SSHFP', 'SPF', 'SRV', 'TXT',


+ 9
- 0
octodns/record/__init__.py View File

@ -418,6 +418,7 @@ class _DynamicPool(object):
{
'value': d['value'],
'weight': d.get('weight', 1),
'status': d.get('status', 'obey'),
} for d in data['values']
]
values.sort(key=lambda d: d['value'])
@ -565,6 +566,14 @@ class _DynamicMixin(object):
reasons.append(f'invalid weight "{weight}" in '
f'pool "{_id}" value {value_num}')
try:
status = value['status']
if status not in ['up', 'down', 'obey']:
reasons.append(f'invalid status "{status}" in '
f'pool "{_id}" value {value_num}')
except KeyError:
pass
try:
value = value['value']
reasons.extend(cls._value_type.validate(value,


+ 1
- 0
octodns/source/base.py View File

@ -9,6 +9,7 @@ from __future__ import absolute_import, division, print_function, \
class BaseSource(object):
SUPPORTS_MULTIVALUE_PTR = False
SUPPORTS_POOL_VALUE_STATUS = False
def __init__(self, id):
self.id = id


+ 80
- 0
tests/test_octodns_provider_azuredns.py View File

@ -473,6 +473,7 @@ class Test_ProfileIsMatch(TestCase):
endpoints = 1,
endpoint_name = 'name',
endpoint_type = 'profile/nestedEndpoints',
endpoint_status = None,
target = 'target.unit.tests',
target_id = 'resource/id',
geos = ['GEO-AF'],
@ -490,6 +491,7 @@ class Test_ProfileIsMatch(TestCase):
endpoints=[Endpoint(
name=endpoint_name,
type=endpoint_type,
endpoint_status=endpoint_status,
target=target,
target_resource_id=target_id,
geo_mapping=geos,
@ -506,6 +508,9 @@ class Test_ProfileIsMatch(TestCase):
self.assertFalse(is_match(profile(), profile(monitor_proto='HTTP')))
self.assertFalse(is_match(profile(), profile(endpoint_name='a')))
self.assertFalse(is_match(profile(), profile(endpoint_type='b')))
self.assertFalse(
is_match(profile(), profile(endpoint_status='Disabled'))
)
self.assertFalse(
is_match(profile(endpoint_type='b'), profile(endpoint_type='b'))
)
@ -1717,6 +1722,81 @@ class TestAzureDnsProvider(TestCase):
changes = provider._extra_changes(zone, desired, [])
self.assertEqual(len(changes), 0)
def test_dynamic_pool_status(self):
# test that traffic managers are generated as expected for pool value
# statuses
provider = self._get_provider()
zone1 = Zone('unit.tests.', [])
record1 = Record.new(zone1, 'foo', data={
'type': 'CNAME',
'ttl': 60,
'value': 'default.unit.tests.',
'dynamic': {
'pools': {
'one': {
'values': [
{'value': 'one1.unit.tests.', 'status': 'up'},
],
},
'two': {
'values': [
{'value': 'two1.unit.tests.', 'status': 'down'},
{'value': 'two2.unit.tests.'},
],
},
},
'rules': [
{'geos': ['AS'], 'pool': 'one'},
{'pool': 'two'},
],
}
})
zone1.add_record(record1)
zone2 = provider._process_desired_zone(zone1.copy())
record2 = list(zone2.records)[0]
self.assertTrue(
record2.dynamic.pools['one'].data['values'][0]['status'],
'obey'
)
record1.dynamic.pools['one'].data['values'][0]['status'] = 'down'
profiles = provider._generate_traffic_managers(record1)
self.assertEqual(len(profiles), 4)
self.assertEqual(profiles[0].endpoints[0].endpoint_status, 'Disabled')
self.assertEqual(profiles[1].endpoints[0].endpoint_status, 'Disabled')
# test that same record gets populated back from traffic managers
tm_list = provider._tm_client.profiles.list_by_resource_group
tm_list.return_value = profiles
azrecord = RecordSet(
ttl=60,
target_resource=SubResource(id=profiles[-1].id),
)
azrecord.name = record1.name or '@'
azrecord.type = f'Microsoft.Network/dnszones/{record1._type}'
record2 = provider._populate_record(zone, azrecord)
self.assertEqual(record1.dynamic._data(), record2.dynamic._data())
# _process_desired_zone shouldn't change anything when status value is
# supported
zone1 = Zone(zone.name, sub_zones=[])
zone1.add_record(record1)
zone2 = provider._process_desired_zone(zone1.copy())
record2 = list(zone2.records)[0]
self.assertTrue(record1.data, record2.data)
# simple records should not get changed by _process_desired_zone
zone1 = Zone(zone.name, sub_zones=[])
record1 = Record.new(zone1, 'foo', data={
'type': 'CNAME',
'ttl': 86400,
'value': 'one.unit.tests.',
})
zone1.add_record(record1)
zone2 = provider._process_desired_zone(zone1.copy())
record2 = list(zone2.records)[0]
self.assertTrue(record1.data, record2.data)
def test_dynamic_A(self):
provider = self._get_provider()
external = 'Microsoft.Network/trafficManagerProfiles/externalEndpoints'


+ 13
- 0
tests/test_octodns_provider_base.py View File

@ -293,6 +293,19 @@ class TestBaseProvider(TestCase):
record2 = list(zone2.records)[0]
self.assertTrue(record2.dynamic)
# SUPPORTS_POOL_VALUE_STATUS
provider.SUPPORTS_POOL_VALUE_STATUS = False
zone1 = Zone('unit.tests.', [])
record1.dynamic.pools['one'].data['values'][0]['status'] = 'up'
zone1.add_record(record1)
zone2 = provider._process_desired_zone(zone1.copy())
record2 = list(zone2.records)[0]
self.assertEqual(
record2.dynamic.pools['one'].data['values'][0]['status'],
'obey'
)
def test_safe_none(self):
# No changes is safe
Plan(None, None, [], True).raise_if_unsafe()


+ 3
- 0
tests/test_octodns_provider_dyn.py View File

@ -1921,12 +1921,15 @@ class TestDynProviderDynamic(TestCase):
'values': [{
'value': '1.2.3.5',
'weight': 1,
'status': 'obey',
}, {
'value': '1.2.3.6',
'weight': 1,
'status': 'obey',
}, {
'value': '1.2.3.7',
'weight': 1,
'status': 'obey',
}]
}, record.dynamic.pools['pool1'].data)
self.assertEquals(2, len(record.dynamic.rules))


+ 50
- 2
tests/test_octodns_provider_ns1.py View File

@ -1209,6 +1209,40 @@ class TestNs1ProviderDynamic(TestCase):
monitors_delete_mock.assert_has_calls([call('mon-id2')])
notifylists_delete_mock.assert_not_called()
@patch('octodns.provider.ns1.Ns1Provider._monitors_for')
def test_params_for_dynamic_with_pool_status(self, monitors_for_mock):
provider = Ns1Provider('test', 'api-key')
monitors_for_mock.reset_mock()
monitors_for_mock.return_value = {}
record = Record.new(self.zone, '', {
'dynamic': {
'pools': {
'iad': {
'values': [{
'value': '1.2.3.4',
'status': 'up',
}],
},
},
'rules': [{
'pool': 'iad',
}],
},
'ttl': 32,
'type': 'A',
'value': '1.2.3.4',
'meta': {},
})
params, active_monitors = provider._params_for_dynamic(record)
self.assertEqual(params['answers'][0]['meta']['up'], True)
self.assertEqual(len(active_monitors), 0)
# check for down also
record.dynamic.pools['iad'].data['values'][0]['status'] = 'down'
params, active_monitors = provider._params_for_dynamic(record)
self.assertEqual(params['answers'][0]['meta']['up'], False)
self.assertEqual(len(active_monitors), 0)
@patch('octodns.provider.ns1.Ns1Provider._monitor_sync')
@patch('octodns.provider.ns1.Ns1Provider._monitors_for')
def test_params_for_dynamic_region_only(self, monitors_for_mock,
@ -1614,6 +1648,7 @@ class TestNs1ProviderDynamic(TestCase):
'meta': {
'priority': 1,
'note': 'from:lhr__country',
'up': {},
},
'region': 'lhr',
}, {
@ -1622,6 +1657,7 @@ class TestNs1ProviderDynamic(TestCase):
'priority': 2,
'weight': 12,
'note': 'from:iad',
'up': {},
},
'region': 'lhr',
}, {
@ -1637,6 +1673,7 @@ class TestNs1ProviderDynamic(TestCase):
'priority': 1,
'weight': 12,
'note': 'from:iad',
'up': {},
},
'region': 'iad',
}, {
@ -1652,6 +1689,7 @@ class TestNs1ProviderDynamic(TestCase):
'priority': 1,
'weight': 12,
'note': f'from:{catchall_pool_name}',
'up': {},
},
'region': catchall_pool_name,
}, {
@ -1798,6 +1836,7 @@ class TestNs1ProviderDynamic(TestCase):
'meta': {
'priority': 1,
'note': 'from:one__country pool:one fallback:two',
'up': True,
},
'region': 'one_country',
}, {
@ -1805,6 +1844,7 @@ class TestNs1ProviderDynamic(TestCase):
'meta': {
'priority': 2,
'note': 'from:one__country pool:two fallback:three',
'up': {},
},
'region': 'one_country',
}, {
@ -1812,6 +1852,7 @@ class TestNs1ProviderDynamic(TestCase):
'meta': {
'priority': 3,
'note': 'from:one__country pool:three fallback:',
'up': False,
},
'region': 'one_country',
}, {
@ -1826,6 +1867,7 @@ class TestNs1ProviderDynamic(TestCase):
'meta': {
'priority': 1,
'note': 'from:four__country pool:four fallback:',
'up': {},
},
'region': 'four_country',
}, {
@ -1872,11 +1914,15 @@ class TestNs1ProviderDynamic(TestCase):
},
'one': {
'fallback': 'two',
'values': [{'value': '1.1.1.1', 'weight': 1}]
'values': [
{'value': '1.1.1.1', 'weight': 1, 'status': 'up'},
],
},
'three': {
'fallback': None,
'values': [{'value': '3.3.3.3', 'weight': 1}]
'values': [
{'value': '3.3.3.3', 'weight': 1, 'status': 'down'}
]
},
'two': {
'fallback': 'three',
@ -1916,6 +1962,7 @@ class TestNs1ProviderDynamic(TestCase):
'priority': 1,
'weight': 12,
'note': f'from:{catchall_pool_name}',
'up': {},
},
'region': catchall_pool_name,
}, {
@ -1923,6 +1970,7 @@ class TestNs1ProviderDynamic(TestCase):
'meta': {
'priority': 2,
'note': 'from:--default--',
'up': {},
},
'region': catchall_pool_name,
}],


+ 6
- 6
tests/test_octodns_provider_route53.py View File

@ -2591,25 +2591,25 @@ class TestRoute53Provider(TestCase):
'ap-southeast-1': {
'fallback': 'us-east-1',
'values': [{
'weight': 2, 'value': '1.4.1.1'
'weight': 2, 'value': '1.4.1.1', 'status': 'obey',
}, {
'weight': 2, 'value': '1.4.1.2'
'weight': 2, 'value': '1.4.1.2', 'status': 'obey',
}]
},
'eu-central-1': {
'fallback': 'us-east-1',
'values': [{
'weight': 1, 'value': '1.3.1.1'
'weight': 1, 'value': '1.3.1.1', 'status': 'obey',
}, {
'weight': 1, 'value': '1.3.1.2'
'weight': 1, 'value': '1.3.1.2', 'status': 'obey',
}],
},
'us-east-1': {
'fallback': None,
'values': [{
'weight': 1, 'value': '1.5.1.1'
'weight': 1, 'value': '1.5.1.1', 'status': 'obey',
}, {
'weight': 1, 'value': '1.5.1.2'
'weight': 1, 'value': '1.5.1.2', 'status': 'obey',
}],
}
}, {k: v.data for k, v in record.dynamic.pools.items()})


+ 43
- 0
tests/test_octodns_record.py View File

@ -3423,20 +3423,25 @@ class TestDynamicRecords(TestCase):
self.assertEquals({
'value': '3.3.3.3',
'weight': 1,
'status': 'obey',
}, pools['one'].data['values'][0])
self.assertEquals([{
'value': '4.4.4.4',
'weight': 1,
'status': 'obey',
}, {
'value': '5.5.5.5',
'weight': 1,
'status': 'obey',
}], pools['two'].data['values'])
self.assertEquals([{
'weight': 10,
'value': '4.4.4.4',
'status': 'obey',
}, {
'weight': 12,
'value': '5.5.5.5',
'status': 'obey',
}], pools['three'].data['values'])
rules = dynamic.rules
@ -3526,20 +3531,25 @@ class TestDynamicRecords(TestCase):
self.assertEquals({
'value': '2601:642:500:e210:62f8:1dff:feb8:9473',
'weight': 1,
'status': 'obey',
}, pools['one'].data['values'][0])
self.assertEquals([{
'value': '2601:642:500:e210:62f8:1dff:feb8:9474',
'weight': 1,
'status': 'obey',
}, {
'value': '2601:642:500:e210:62f8:1dff:feb8:9475',
'weight': 1,
'status': 'obey',
}], pools['two'].data['values'])
self.assertEquals([{
'weight': 10,
'value': '2601:642:500:e210:62f8:1dff:feb8:9476',
'status': 'obey',
}, {
'weight': 12,
'value': '2601:642:500:e210:62f8:1dff:feb8:9477',
'status': 'obey',
}], pools['three'].data['values'])
rules = dynamic.rules
@ -3596,17 +3606,21 @@ class TestDynamicRecords(TestCase):
self.assertEquals({
'value': 'one.cname.target.',
'weight': 1,
'status': 'obey',
}, pools['one'].data['values'][0])
self.assertEquals({
'value': 'two.cname.target.',
'weight': 1,
'status': 'obey',
}, pools['two'].data['values'][0])
self.assertEquals([{
'value': 'three-1.cname.target.',
'weight': 12,
'status': 'obey',
}, {
'value': 'three-2.cname.target.',
'weight': 32,
'status': 'obey',
}], pools['three'].data['values'])
rules = dynamic.rules
@ -4529,6 +4543,29 @@ class TestDynamicRecords(TestCase):
# This should be valid, no exception
Record.new(self.zone, 'bad', a_data)
# invalid status
a_data = {
'dynamic': {
'pools': {
'one': {
'values': [{
'value': '2.2.2.2',
'status': 'none',
}],
},
},
'rules': [{
'pool': 'one',
}],
},
'ttl': 60,
'type': 'A',
'values': ['1.1.1.1'],
}
with self.assertRaises(ValidationError) as ctx:
Record.new(self.zone, 'bad', a_data)
self.assertIn('invalid status', ctx.exception.reasons[0])
def test_dynamic_lenient(self):
# Missing pools
a_data = {
@ -4587,6 +4624,7 @@ class TestDynamicRecords(TestCase):
'values': [{
'value': '3.3.3.3',
'weight': 1,
'status': 'obey',
}]
},
'two': {
@ -4594,9 +4632,11 @@ class TestDynamicRecords(TestCase):
'values': [{
'value': '4.4.4.4',
'weight': 1,
'status': 'obey',
}, {
'value': '5.5.5.5',
'weight': 2,
'status': 'obey',
}]
},
},
@ -4642,6 +4682,7 @@ class TestDynamicRecords(TestCase):
'values': [{
'value': '3.3.3.3',
'weight': 1,
'status': 'obey',
}]
},
'two': {
@ -4649,9 +4690,11 @@ class TestDynamicRecords(TestCase):
'values': [{
'value': '4.4.4.4',
'weight': 1,
'status': 'obey',
}, {
'value': '5.5.5.5',
'weight': 2,
'status': 'obey',
}]
},
},


Loading…
Cancel
Save