diff --git a/CHANGELOG.md b/CHANGELOG.md index 55a88c2..ca42a68 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,14 @@ * NS1 NA target now includes `SX` and `UM`. If `NA` continent is in use in dynamic records care must be taken to upgrade/downgrade to v0.9.13. +* Ns1Provider now supports a new parameter, shared_notifylist, which results in + all dynamic record monitors using a shared notify list named 'octoDNS NS1 + Notify List'. Only newly created record values will use the shared notify + list. It should be safe to enable this functionality, but existing records + will not be converted. Note: Once this option is enabled downgrades to + previous versions of octoDNS are discouraged and may result in undefined + behavior and broken records. See https://github.com/octodns/octodns/pull/749 + for related discussion. ## v0.9.13 - 2021-07-18 - Processors Alpha @@ -17,7 +25,7 @@ * Fixes NS1 provider's geotarget limitation of using `NA` continent. Now, when `NA` is used in geos it considers **all** the countries of `North America` insted of just `us-east`, `us-west` and `us-central` regions -* `SX' & 'UM` country support added to NS1Provider, not yet in the North +* `SX' & 'UM` country support added to NS1Provider, not yet in the North America list for backwards compatibility reasons. They will be added in the next releaser. diff --git a/octodns/provider/ns1.py b/octodns/provider/ns1.py index c114199..5488d60 100644 --- a/octodns/provider/ns1.py +++ b/octodns/provider/ns1.py @@ -77,8 +77,10 @@ class Ns1Client(object): self._datafeed = client.datafeed() self._datasource_id = None + self._feeds_for_monitors = None self._monitors_cache = None + self._notifylists_cache = None @property def datasource_id(self): @@ -121,6 +123,14 @@ class Ns1Client(object): {m['id']: m for m in self.monitors_list()} return self._monitors_cache + @property + def notifylists(self): + if self._notifylists_cache is None: + self.log.debug('notifylists: fetching & building') + self._notifylists_cache = \ + {l['name']: l for l in self.notifylists_list()} + return self._notifylists_cache + def datafeed_create(self, sourceid, name, config): ret = self._try(self._datafeed.create, sourceid, name, config) self.feeds_for_monitors[config['jobid']] = ret['id'] @@ -163,10 +173,17 @@ class Ns1Client(object): return ret def notifylists_delete(self, nlid): + for name, nl in self.notifylists.items(): + if nl['id'] == nlid: + del self._notifylists_cache[name] + break return self._try(self._notifylists.delete, nlid) def notifylists_create(self, **body): - return self._try(self._notifylists.create, body) + nl = self._try(self._notifylists.create, body) + # cache it + self.notifylists[nl['name']] = nl + return nl def notifylists_list(self): return self._try(self._notifylists.list) @@ -216,6 +233,13 @@ class Ns1Provider(BaseProvider): # Only required if using dynamic records monitor_regions: - lga + # Optional. Default: false. true is Recommended, but not the default + # for backwards compatibility reasons. If true, all NS1 monitors will + # use a shared notify list rather than one per record & value + # combination. See CHANGELOG, + # https://github.com/octodns/octodns/blob/master/CHANGELOG.md, for more + # information before enabling this behavior. + shared_notifylist: false # Optional. Default: None. If set, back off in advance to avoid 429s # from rate-limiting. Generally this should be set to the number # of processes or workers hitting the API, e.g. the value of @@ -237,6 +261,7 @@ class Ns1Provider(BaseProvider): 'NS', 'PTR', 'SPF', 'SRV', 'TXT', 'URLFWD')) ZONE_NOT_FOUND_MESSAGE = 'server error: zone not found' + SHARED_NOTIFYLIST_NAME = 'octoDNS NS1 Notify List' def _update_filter(self, filter, with_disabled): if with_disabled: @@ -368,7 +393,8 @@ class Ns1Provider(BaseProvider): } def __init__(self, id, api_key, retry_count=4, monitor_regions=None, - parallelism=None, client_config=None, *args, **kwargs): + parallelism=None, client_config=None, shared_notifylist=False, + *args, **kwargs): self.log = getLogger('Ns1Provider[{}]'.format(id)) self.log.debug('__init__: id=%s, api_key=***, retry_count=%d, ' 'monitor_regions=%s, parallelism=%s, client_config=%s', @@ -376,6 +402,7 @@ class Ns1Provider(BaseProvider): client_config) super(Ns1Provider, self).__init__(id, *args, **kwargs) self.monitor_regions = monitor_regions + self.shared_notifylist = shared_notifylist self._client = Ns1Client(api_key, parallelism, retry_count, client_config) @@ -888,7 +915,6 @@ class Ns1Provider(BaseProvider): def _feed_create(self, monitor): monitor_id = monitor['id'] self.log.debug('_feed_create: monitor=%s', monitor_id) - # TODO: looks like length limit is 64 char name = '{} - {}'.format(monitor['name'], self._uuid()[:6]) # Create the data feed @@ -902,22 +928,36 @@ class Ns1Provider(BaseProvider): return feed_id + def _notifylists_find_or_create(self, name): + self.log.debug('_notifylists_find_or_create: name="%s"', name) + try: + nl = self._client.notifylists[name] + self.log.debug('_notifylists_find_or_create: existing=%s', + nl['id']) + except KeyError: + notify_list = [{ + 'config': { + 'sourceid': self._client.datasource_id, + }, + 'type': 'datafeed', + }] + nl = self._client.notifylists_create(name=name, + notify_list=notify_list) + self.log.debug('_notifylists_find_or_create: created=%s', + nl['id']) + + return nl + def _monitor_create(self, monitor): self.log.debug('_monitor_create: monitor="%s"', monitor['name']) - # Create the notify list - notify_list = [{ - 'config': { - 'sourceid': self._client.datasource_id, - }, - 'type': 'datafeed', - }] - nl = self._client.notifylists_create(name=monitor['name'], - notify_list=notify_list) - nl_id = nl['id'] - self.log.debug('_monitor_create: notify_list=%s', nl_id) + + # Find the right notifylist + nl_name = self.SHARED_NOTIFYLIST_NAME \ + if self.shared_notifylist else monitor['name'] + nl = self._notifylists_find_or_create(nl_name) # Create the monitor - monitor['notify_list'] = nl_id + monitor['notify_list'] = nl['id'] monitor = self._client.monitors_create(**monitor) monitor_id = monitor['id'] self.log.debug('_monitor_create: monitor=%s', monitor_id) @@ -1028,7 +1068,13 @@ class Ns1Provider(BaseProvider): self._client.monitors_delete(monitor_id) notify_list_id = monitor['notify_list'] - self._client.notifylists_delete(notify_list_id) + for nl_name, nl in self._client.notifylists.items(): + if nl['id'] == notify_list_id: + # We've found the that might need deleting + if nl['name'] != self.SHARED_NOTIFYLIST_NAME: + # It's not shared so is safe to delete + self._client.notifylists_delete(notify_list_id) + break def _add_answers_for_pool(self, answers, default_answers, pool_name, pool_label, pool_answers, pools, priority): diff --git a/tests/test_octodns_provider_ns1.py b/tests/test_octodns_provider_ns1.py index 875ebbf..1f69465 100644 --- a/tests/test_octodns_provider_ns1.py +++ b/tests/test_octodns_provider_ns1.py @@ -768,12 +768,70 @@ class TestNs1ProviderDynamic(TestCase): monitor = { 'name': 'test monitor', } + provider._client._notifylists_cache = {} monitor_id, feed_id = provider._monitor_create(monitor) self.assertEquals('mon-id', monitor_id) self.assertEquals('feed-id', feed_id) monitors_create_mock.assert_has_calls([call(name='test monitor', notify_list='nl-id')]) + @patch('octodns.provider.ns1.Ns1Provider._feed_create') + @patch('octodns.provider.ns1.Ns1Client.monitors_create') + @patch('octodns.provider.ns1.Ns1Client._try') + def test_monitor_create_shared_notifylist(self, try_mock, + monitors_create_mock, + feed_create_mock): + provider = Ns1Provider('test', 'api-key', shared_notifylist=True) + + # pre-fill caches to avoid extranious calls (things we're testing + # elsewhere) + provider._client._datasource_id = 'foo' + provider._client._feeds_for_monitors = {} + + # First time we'll need to create the share list + provider._client._notifylists_cache = {} + try_mock.reset_mock() + monitors_create_mock.reset_mock() + feed_create_mock.reset_mock() + try_mock.side_effect = [{ + 'id': 'nl-id', + 'name': provider.SHARED_NOTIFYLIST_NAME, + }] + monitors_create_mock.side_effect = [{ + 'id': 'mon-id', + }] + feed_create_mock.side_effect = ['feed-id'] + monitor = { + 'name': 'test monitor', + } + monitor_id, feed_id = provider._monitor_create(monitor) + self.assertEquals('mon-id', monitor_id) + self.assertEquals('feed-id', feed_id) + monitors_create_mock.assert_has_calls([call(name='test monitor', + notify_list='nl-id')]) + try_mock.assert_called_once() + # The shared notifylist should be cached now + self.assertEquals([provider.SHARED_NOTIFYLIST_NAME], + list(provider._client._notifylists_cache.keys())) + + # Second time we'll use the cached version + try_mock.reset_mock() + monitors_create_mock.reset_mock() + feed_create_mock.reset_mock() + monitors_create_mock.side_effect = [{ + 'id': 'mon-id', + }] + feed_create_mock.side_effect = ['feed-id'] + monitor = { + 'name': 'test monitor', + } + monitor_id, feed_id = provider._monitor_create(monitor) + self.assertEquals('mon-id', monitor_id) + self.assertEquals('feed-id', feed_id) + monitors_create_mock.assert_has_calls([call(name='test monitor', + notify_list='nl-id')]) + try_mock.assert_not_called() + def test_monitor_gen(self): provider = Ns1Provider('test', 'api-key') @@ -986,6 +1044,12 @@ class TestNs1ProviderDynamic(TestCase): 'notify_list': 'nl-id', } }] + provider._client._notifylists_cache = { + 'not shared': { + 'id': 'nl-id', + 'name': 'not shared', + } + } provider._monitors_gc(record) monitors_for_mock.assert_has_calls([call(record)]) datafeed_delete_mock.assert_has_calls([call('foo', 'feed-id')]) @@ -1025,12 +1089,75 @@ class TestNs1ProviderDynamic(TestCase): 'notify_list': 'nl-id2', }, }] + provider._client._notifylists_cache = { + 'not shared': { + 'id': 'nl-id', + 'name': 'not shared', + }, + 'not shared 2': { + 'id': 'nl-id2', + 'name': 'not shared 2', + } + } provider._monitors_gc(record, {'mon-id'}) monitors_for_mock.assert_has_calls([call(record)]) datafeed_delete_mock.assert_not_called() monitors_delete_mock.assert_has_calls([call('mon-id2')]) notifylists_delete_mock.assert_has_calls([call('nl-id2')]) + # Non-active monitor w/o a notifylist, generally shouldn't happen, but + # code should handle it just in case someone gets clicky in the UI + monitors_for_mock.reset_mock() + datafeed_delete_mock.reset_mock() + monitors_delete_mock.reset_mock() + notifylists_delete_mock.reset_mock() + monitors_for_mock.side_effect = [{ + 'y': { + 'id': 'mon-id2', + 'notify_list': 'nl-id2', + }, + }] + provider._client._notifylists_cache = { + 'not shared a': { + 'id': 'nl-ida', + 'name': 'not shared a', + }, + 'not shared b': { + 'id': 'nl-idb', + 'name': 'not shared b', + } + } + provider._monitors_gc(record, {'mon-id'}) + monitors_for_mock.assert_has_calls([call(record)]) + datafeed_delete_mock.assert_not_called() + monitors_delete_mock.assert_has_calls([call('mon-id2')]) + notifylists_delete_mock.assert_not_called() + + # Non-active monitor with a shared notifylist, monitor deleted, but + # notifylist is left alone + provider.shared_notifylist = True + monitors_for_mock.reset_mock() + datafeed_delete_mock.reset_mock() + monitors_delete_mock.reset_mock() + notifylists_delete_mock.reset_mock() + monitors_for_mock.side_effect = [{ + 'y': { + 'id': 'mon-id2', + 'notify_list': 'shared', + }, + }] + provider._client._notifylists_cache = { + 'shared': { + 'id': 'shared', + 'name': provider.SHARED_NOTIFYLIST_NAME, + }, + } + provider._monitors_gc(record, {'mon-id'}) + monitors_for_mock.assert_has_calls([call(record)]) + datafeed_delete_mock.assert_not_called() + monitors_delete_mock.assert_has_calls([call('mon-id2')]) + notifylists_delete_mock.assert_not_called() + @patch('octodns.provider.ns1.Ns1Provider._monitor_sync') @patch('octodns.provider.ns1.Ns1Provider._monitors_for') def test_params_for_dynamic_region_only(self, monitors_for_mock, @@ -2325,17 +2452,22 @@ class TestNs1Client(TestCase): notifylists_list_mock.reset_mock() notifylists_create_mock.reset_mock() notifylists_delete_mock.reset_mock() - notifylists_create_mock.side_effect = ['bar'] + notifylists_list_mock.side_effect = [{}] + expected = { + 'id': 'nl-id', + 'name': 'bar', + } + notifylists_create_mock.side_effect = [expected] notify_list = [{ 'config': { 'sourceid': 'foo', }, 'type': 'datafeed', }] - nl = client.notifylists_create(name='some name', - notify_list=notify_list) - self.assertEquals('bar', nl) - notifylists_list_mock.assert_not_called() + got = client.notifylists_create(name='some name', + notify_list=notify_list) + self.assertEquals(expected, got) + notifylists_list_mock.assert_called_once() notifylists_create_mock.assert_has_calls([ call({'name': 'some name', 'notify_list': notify_list}) ]) @@ -2349,6 +2481,29 @@ class TestNs1Client(TestCase): notifylists_create_mock.assert_not_called() notifylists_delete_mock.assert_has_calls([call('nlid')]) + # Delete again, this time with a cache item that needs cleaned out and + # another that needs to be ignored + notifylists_list_mock.reset_mock() + notifylists_create_mock.reset_mock() + notifylists_delete_mock.reset_mock() + client._notifylists_cache = { + 'another': { + 'id': 'notid', + 'name': 'another', + }, + # This one comes 2nd on purpose + 'the-one': { + 'id': 'nlid', + 'name': 'the-one', + }, + } + client.notifylists_delete('nlid') + notifylists_list_mock.assert_not_called() + notifylists_create_mock.assert_not_called() + notifylists_delete_mock.assert_has_calls([call('nlid')]) + # Only another left + self.assertEquals(['another'], list(client._notifylists_cache.keys())) + notifylists_list_mock.reset_mock() notifylists_create_mock.reset_mock() notifylists_delete_mock.reset_mock()