Rename all references of Domain to Zone

This is a pretty invasive change :)

A non complete list of changes:

* Database Tables
* Database Columns
* Designate Objects and fields on Objects
* Designate Objects Adaptors stop doing some of the renames
* All RPCAPI versions are bumped - totally backward
  incompatable (function names have changed)

Change-Id: Ib99e918998a3909fa4aa92bf1ee0475f8a519196
This commit is contained in:
Graham Hayes 2015-11-10 19:42:29 +00:00
parent 3e9ec422ab
commit c5949ccb28
140 changed files with 4444 additions and 4230 deletions

View File

@ -213,11 +213,11 @@ class IPABackend(base.Backend):
self.ntries = cfg.CONF[self.name].ipa_connect_retries
self.force = cfg.CONF[self.name].ipa_force_ns_use
def create_domain(self, context, domain):
LOG.debug('Create Domain %r' % domain)
def create_zone(self, context, zone):
LOG.debug('Create Zone %r' % zone)
ipareq = {'method': 'dnszone_add', 'id': 0}
params = [domain['name']]
servers = self.central_service.get_domain_servers(self.admin_context)
params = [zone['name']]
servers = self.central_service.get_zone_ns_records(self.admin_context)
# just use the first one for zone creation - add the others
# later, below - use force because designate assumes the NS
# already exists somewhere, is resolvable, and already has
@ -226,35 +226,35 @@ class IPABackend(base.Backend):
if self.force:
args['force'] = True
for dkey, ipakey in list(domain2ipa.items()):
if dkey in domain:
args[ipakey] = domain[dkey]
if dkey in zone:
args[ipakey] = zone[dkey]
ipareq['params'] = [params, args]
self._call_and_handle_error(ipareq)
# add NS records for all of the other servers
if len(servers) > 1:
ipareq = {'method': 'dnsrecord_add', 'id': 0}
params = [domain['name'], "@"]
params = [zone['name'], "@"]
args = {'nsrecord': servers[1:]}
if self.force:
args['force'] = True
ipareq['params'] = [params, args]
self._call_and_handle_error(ipareq)
def update_domain(self, context, domain):
LOG.debug('Update Domain %r' % domain)
def update_zone(self, context, zone):
LOG.debug('Update Zone %r' % zone)
ipareq = {'method': 'dnszone_mod', 'id': 0}
params = [domain['name']]
params = [zone['name']]
args = {}
for dkey, ipakey in list(domain2ipa.items()):
if dkey in domain:
args[ipakey] = domain[dkey]
if dkey in zone:
args[ipakey] = zone[dkey]
ipareq['params'] = [params, args]
self._call_and_handle_error(ipareq)
def delete_domain(self, context, domain):
LOG.debug('Delete Domain %r' % domain)
def delete_zone(self, context, zone):
LOG.debug('Delete Zone %r' % zone)
ipareq = {'method': 'dnszone_del', 'id': 0}
params = [domain['name']]
params = [zone['name']]
args = {}
ipareq['params'] = [params, args]
self._call_and_handle_error(ipareq)

View File

@ -97,54 +97,54 @@ class MultiBackend(base.Backend):
with excutils.save_and_reraise_exception():
self.slave.create_tsigkey(context, tsigkey)
def create_domain(self, context, domain):
self.master.create_domain(context, domain)
def create_zone(self, context, zone):
self.master.create_zone(context, zone)
try:
self.slave.create_domain(context, domain)
self.slave.create_zone(context, zone)
except Exception:
with excutils.save_and_reraise_exception():
self.master.delete_domain(context, domain)
self.master.delete_zone(context, zone)
def update_domain(self, context, domain):
self.master.update_domain(context, domain)
def update_zone(self, context, zone):
self.master.update_zone(context, zone)
def delete_domain(self, context, domain):
# Fetch the full domain from Central first, as we may
def delete_zone(self, context, zone):
# Fetch the full zone from Central first, as we may
# have to recreate it on slave if delete on master fails
deleted_context = context.deepcopy()
deleted_context.show_deleted = True
full_domain = self.central.find_domain(
deleted_context, {'id': domain['id']})
full_domain = self.central.find_zone(
deleted_context, {'id': zone['id']})
self.slave.delete_domain(context, domain)
self.slave.delete_zone(context, zone)
try:
self.master.delete_domain(context, domain)
self.master.delete_zone(context, zone)
except Exception:
with excutils.save_and_reraise_exception():
self.slave.create_domain(context, domain)
self.slave.create_zone(context, zone)
[self.slave.create_record(context, domain, record)
[self.slave.create_record(context, zone, record)
for record in self.central.find_records(
context, {'domain_id': full_domain['id']})]
def create_recordset(self, context, domain, recordset):
self.master.create_recordset(context, domain, recordset)
def create_recordset(self, context, zone, recordset):
self.master.create_recordset(context, zone, recordset)
def update_recordset(self, context, domain, recordset):
self.master.update_recordset(context, domain, recordset)
def update_recordset(self, context, zone, recordset):
self.master.update_recordset(context, zone, recordset)
def delete_recordset(self, context, domain, recordset):
self.master.delete_recordset(context, domain, recordset)
def delete_recordset(self, context, zone, recordset):
self.master.delete_recordset(context, zone, recordset)
def create_record(self, context, domain, recordset, record):
self.master.create_record(context, domain, recordset, record)
def create_record(self, context, zone, recordset, record):
self.master.create_record(context, zone, recordset, record)
def update_record(self, context, domain, recordset, record):
self.master.update_record(context, domain, recordset, record)
def update_record(self, context, zone, recordset, record):
self.master.update_record(context, zone, recordset, record)
def delete_record(self, context, domain, recordset, record):
self.master.delete_record(context, domain, recordset, record)
def delete_record(self, context, zone, recordset, record):
self.master.delete_record(context, zone, recordset, record)
def ping(self, context):
return {

View File

@ -242,7 +242,7 @@ def main():
# create a fake domain in IPA
# create a fake server in Designate
# try to create the same fake domain in Designate
# if we get a DuplicateDomain error from Designate, then
# if we get a DuplicateZone error from Designate, then
# raise the CannotUseIPABackend error, after deleting
# the fake server and fake domain
# find the first non-reverse zone

View File

@ -305,7 +305,7 @@ ComputeAdminGroup = [
help="API key to use when authenticating as admin.",
secret=True),
cfg.StrOpt('domain_name',
help="Domain name for authentication as admin (Keystone V3)."
help="Zone name for authentication as admin (Keystone V3)."
"The same domain applies to user and project"),
]

View File

@ -39,13 +39,13 @@ class DomainsClientJSON(rest_client.RestClient):
"""Get the details of a domain."""
resp, body = self.get("v1/domains/%s" % str(domain_id))
body = json.loads(body)
self.validate_response(schema.get_domain, resp, body)
self.validate_response(schema.get_zone, resp, body)
return resp, body
def delete_domain(self, domain_id):
"""Delete the given domain."""
resp, body = self.delete("v1/domains/%s" % str(domain_id))
self.validate_response(schema.delete_domain, resp, body)
self.validate_response(schema.delete_zone, resp, body)
return resp, body
def create_domain(self, name, email, **kwargs):
@ -61,7 +61,7 @@ class DomainsClientJSON(rest_client.RestClient):
post_body[post_param] = value
resp, body = self.post('v1/domains', json.dumps(post_body))
body = json.loads(body)
self.validate_response(schema.create_domain, resp, body)
self.validate_response(schema.create_zone, resp, body)
return resp, body
def update_domain(self, domain_id, **kwargs):
@ -75,5 +75,5 @@ class DomainsClientJSON(rest_client.RestClient):
resp, body = self.put('v1/domains/%s' % domain_id,
json.dumps(post_body))
body = json.loads(body)
self.validate_response(schema.update_domain, resp, body)
self.validate_response(schema.update_zone, resp, body)
return resp, body

View File

@ -30,19 +30,19 @@ class DnsDomainsTest(base.BaseDnsTest):
for i in range(2):
name = data_utils.rand_name('domain') + '.com.'
email = data_utils.rand_name('dns') + '@testmail.com'
_, domain = cls.client.create_domain(name, email)
_, domain = cls.client.create_zone(name, email)
cls.setup_domains.append(domain)
@classmethod
def tearDownClass(cls):
for domain in cls.setup_domains:
cls.client.delete_domain(domain['id'])
cls.client.delete_zone(domain['id'])
super(DnsDomainsTest, cls).tearDownClass()
def _delete_domain(self, domain_id):
self.client.delete_domain(domain_id)
self.client.delete_zone(domain_id)
self.assertRaises(exceptions.NotFound,
self.client.get_domain, domain_id)
self.client.get_zone, domain_id)
@test.attr(type='gate')
def test_list_domains(self):
@ -54,20 +54,20 @@ class DnsDomainsTest(base.BaseDnsTest):
@test.attr(type='smoke')
def test_create_update_get_domain(self):
# Create Domain
# Create Zone
d_name = data_utils.rand_name('domain') + '.com.'
d_email = data_utils.rand_name('dns') + '@testmail.com'
_, domain = self.client.create_domain(name=d_name, email=d_email)
_, domain = self.client.create_zone(name=d_name, email=d_email)
self.addCleanup(self._delete_domain, domain['id'])
self.assertEqual(d_name, domain['name'])
self.assertEqual(d_email, domain['email'])
# Update Domain with ttl
# Update Zone with ttl
d_ttl = 3600
_, update_domain = self.client.update_domain(domain['id'],
ttl=d_ttl)
_, update_domain = self.client.update_zone(domain['id'],
ttl=d_ttl)
self.assertEqual(d_ttl, update_domain['ttl'])
# Get the details of Domain
_, get_domain = self.client.get_domain(domain['id'])
# Get the details of Zone
_, get_domain = self.client.get_zone(domain['id'])
self.assertEqual(update_domain['name'], get_domain['name'])
self.assertEqual(update_domain['email'], get_domain['email'])
self.assertEqual(update_domain['ttl'], get_domain['ttl'])

View File

@ -30,7 +30,7 @@ class RecordsTest(base.BaseDnsTest):
cls.setup_records = list()
name = data_utils.rand_name('domain') + '.com.'
email = data_utils.rand_name('dns') + '@testmail.com'
_, cls.domain = cls.dns_domains_client.create_domain(name, email)
_, cls.domain = cls.dns_domains_client.create_zone(name, email)
# Creates a record with type as A
r_name = 'www.' + name
data1 = "192.0.2.3"
@ -49,7 +49,7 @@ class RecordsTest(base.BaseDnsTest):
def tearDownClass(cls):
for record in cls.setup_records:
cls.client.delete_record(cls.domain['id'], record['id'])
cls.dns_domains_client.delete_domain(cls.domain['id'])
cls.dns_domains_client.delete_zone(cls.domain['id'])
def _delete_record(self, domain_id, record_id):
self.client.delete_record(domain_id, record_id)
@ -66,11 +66,11 @@ class RecordsTest(base.BaseDnsTest):
@test.attr(type='smoke')
def test_create_update_get_delete_record(self):
# Create Domain
# Create Zone
name = data_utils.rand_name('domain') + '.com.'
email = data_utils.rand_name('dns') + '@testmail.com'
_, domain = self.dns_domains_client.create_domain(name, email)
self.addCleanup(self.dns_domains_client.delete_domain, domain['id'])
_, domain = self.dns_domains_client.create_zone(name, email)
self.addCleanup(self.dns_domains_client.delete_zone, domain['id'])
# Create Record
r_name = 'www.' + name
r_data = "192.0.2.4"

View File

@ -108,28 +108,28 @@ class RequestHandler(object):
question = request.question[0]
requester = request.environ['addr'][0]
domain_name = question.name.to_text()
zone_name = question.name.to_text()
if not self._allowed(request, requester, "CREATE", domain_name):
if not self._allowed(request, requester, "CREATE", zone_name):
response.set_rcode(dns.rcode.from_text("REFUSED"))
return response
serial = self.backend.find_domain_serial(domain_name)
serial = self.backend.find_zone_serial(zone_name)
if serial is not None:
LOG.warn(_LW("Not creating %(name)s, zone already exists") %
{'name': domain_name})
{'name': zone_name})
# Provide an authoritative answer
response.flags |= dns.flags.AA
return response
LOG.debug("Received %(verb)s for %(name)s from %(host)s" %
{'verb': "CREATE", 'name': domain_name, 'host': requester})
{'verb': "CREATE", 'name': zone_name, 'host': requester})
try:
zone = dnsutils.do_axfr(domain_name, self.masters,
zone = dnsutils.do_axfr(zone_name, self.masters,
source=self.transfer_source)
self.backend.create_domain(zone)
self.backend.create_zone(zone)
except Exception:
response.set_rcode(dns.rcode.from_text("SERVFAIL"))
return response
@ -152,35 +152,35 @@ class RequestHandler(object):
question = request.question[0]
requester = request.environ['addr'][0]
domain_name = question.name.to_text()
zone_name = question.name.to_text()
if not self._allowed(request, requester, "NOTIFY", domain_name):
if not self._allowed(request, requester, "NOTIFY", zone_name):
response.set_rcode(dns.rcode.from_text("REFUSED"))
return response
serial = self.backend.find_domain_serial(domain_name)
serial = self.backend.find_zone_serial(zone_name)
if serial is None:
LOG.warn(_LW("Refusing NOTIFY for %(name)s, doesn't exist") %
{'name': domain_name})
{'name': zone_name})
response.set_rcode(dns.rcode.from_text("REFUSED"))
return response
LOG.debug("Received %(verb)s for %(name)s from %(host)s" %
{'verb': "NOTIFY", 'name': domain_name, 'host': requester})
{'verb': "NOTIFY", 'name': zone_name, 'host': requester})
# According to RFC we should query the server that sent the NOTIFY
# TODO(Tim): Reenable this when it makes more sense
# resolver = dns.resolver.Resolver()
# resolver.nameservers = [requester]
# This assumes that the Master is running on port 53
# soa_answer = resolver.query(domain_name, 'SOA')
# soa_answer = resolver.query(zone_name, 'SOA')
# Check that the serial is < serial above
try:
zone = dnsutils.do_axfr(domain_name, self.masters,
zone = dnsutils.do_axfr(zone_name, self.masters,
source=self.transfer_source)
self.backend.update_domain(zone)
self.backend.update_zone(zone)
except Exception:
response.set_rcode(dns.rcode.from_text("SERVFAIL"))
return response
@ -203,44 +203,44 @@ class RequestHandler(object):
question = request.question[0]
requester = request.environ['addr'][0]
domain_name = question.name.to_text()
zone_name = question.name.to_text()
if not self._allowed(request, requester, "DELETE", domain_name):
if not self._allowed(request, requester, "DELETE", zone_name):
response.set_rcode(dns.rcode.from_text("REFUSED"))
return response
serial = self.backend.find_domain_serial(domain_name)
serial = self.backend.find_zone_serial(zone_name)
if serial is None:
LOG.warn(_LW("Not deleting %(name)s, zone doesn't exist") %
{'name': domain_name})
{'name': zone_name})
# Provide an authoritative answer
response.flags |= dns.flags.AA
return response
LOG.debug("Received DELETE for %(name)s from %(host)s" %
{'name': domain_name, 'host': requester})
{'name': zone_name, 'host': requester})
# Provide an authoritative answer
response.flags |= dns.flags.AA
# Call into the backend to Delete
try:
self.backend.delete_domain(domain_name)
self.backend.delete_zone(zone_name)
except Exception:
response.set_rcode(dns.rcode.from_text("SERVFAIL"))
return response
return response
def _allowed(self, request, requester, op, domain_name):
def _allowed(self, request, requester, op, zone_name):
# If there are no explict notifiers specified, allow all
if not self.allow_notify:
return True
if requester not in self.allow_notify:
LOG.warn(_LW("%(verb)s for %(name)s from %(server)s refused") %
{'verb': op, 'name': domain_name, 'server': requester})
{'verb': op, 'name': zone_name, 'server': requester})
return False
return True

View File

@ -33,7 +33,7 @@ class BaseView(object):
as part of the API call.
For example, in the V2 API, we did s/domain/zone/. Adapting a record
resources "domain_id" <-> "zone_id" is the responsibility of a View.
resources "tenant_id" <-> "project_id" is the responsibility of a View.
"""
_resource_name = None
_collection_name = None

View File

@ -30,24 +30,15 @@ class QuotasView(base_view.BaseView):
def show_basic(self, context, request, quota):
"""Basic view of a quota"""
return {
"zones": quota['domains'],
"zone_records": quota['domain_records'],
"zone_recordsets": quota['domain_recordsets'],
"zones": quota['zones'],
"zone_records": quota['zone_records'],
"zone_recordsets": quota['zone_recordsets'],
"recordset_records": quota['recordset_records']
}
def load(self, context, request, body):
"""Extract a "central" compatible dict from an API call"""
valid_keys = ('domain_records', 'domain_recordsets', 'domains',
valid_keys = ('zone_records', 'zone_recordsets', 'zones',
'recordset_records')
mapping = {
'zones': 'domains',
'zone_records': 'domain_records',
'zone_recordsets': 'domain_recordsets',
'recordset_records': 'recordset_records'
}
body["quota"] = {mapping[k]: body["quota"][k] for k in body["quota"]}
return self._load(context, request, body, valid_keys)

View File

@ -51,7 +51,7 @@ class TenantsView(base_view.BaseView):
"""Basic view of the Tenants Report"""
return {
"zone_count": tenants['domain_count'],
"zone_count": tenants['zone_count'],
"id": tenants['id'],
"links": self._get_resource_links(request, tenants)
}
@ -60,8 +60,8 @@ class TenantsView(base_view.BaseView):
"""Detail view of the Tenants Report"""
return {
"zones_count": tenant['domain_count'],
"zones": tenant['domains'],
"zones_count": tenant['zone_count'],
"zones": tenant['zones'],
"id": tenant['id'],
"links": self._get_resource_links(request, tenant)
}

View File

@ -65,7 +65,7 @@ def create_domain():
# A V1 zone only supports being a primary (No notion of a type)
values['type'] = 'PRIMARY'
domain = central_api.create_domain(context, objects.Domain(**values))
domain = central_api.create_zone(context, objects.Zone(**values))
response = flask.jsonify(domain_schema.filter(domain))
response.status_int = 201
@ -80,7 +80,7 @@ def get_domains():
central_api = central_rpcapi.CentralAPI.get_instance()
domains = central_api.find_domains(context, criterion={"type": "PRIMARY"})
domains = central_api.find_zones(context, criterion={"type": "PRIMARY"})
return flask.jsonify(domains_schema.filter({'domains': domains}))
@ -92,7 +92,7 @@ def get_domain(domain_id):
central_api = central_rpcapi.CentralAPI.get_instance()
criterion = {"id": domain_id, "type": "PRIMARY"}
domain = central_api.find_domain(context, criterion=criterion)
domain = central_api.find_zone(context, criterion=criterion)
return flask.jsonify(domain_schema.filter(domain))
@ -106,7 +106,7 @@ def update_domain(domain_id):
# Fetch the existing resource
criterion = {"id": domain_id, "type": "PRIMARY"}
domain = central_api.find_domain(context, criterion=criterion)
domain = central_api.find_zone(context, criterion=criterion)
# Prepare a dict of fields for validation
domain_data = domain_schema.filter(domain)
@ -117,7 +117,7 @@ def update_domain(domain_id):
# Update and persist the resource
domain.update(values)
domain = central_api.update_domain(context, domain)
domain = central_api.update_zone(context, domain)
return flask.jsonify(domain_schema.filter(domain))
@ -130,9 +130,9 @@ def delete_domain(domain_id):
# TODO(ekarlso): Fix this to something better.
criterion = {"id": domain_id, "type": "PRIMARY"}
central_api.find_domain(context, criterion=criterion)
central_api.find_zone(context, criterion=criterion)
central_api.delete_domain(context, domain_id)
central_api.delete_zone(context, domain_id)
return flask.Response(status=200)
@ -145,9 +145,9 @@ def get_domain_servers(domain_id):
# TODO(ekarlso): Fix this to something better.
criterion = {"id": domain_id, "type": "PRIMARY"}
central_api.find_domain(context, criterion=criterion)
central_api.find_zone(context, criterion=criterion)
nameservers = central_api.get_domain_servers(context, domain_id)
nameservers = central_api.get_zone_ns_records(context, domain_id)
servers = objects.ServerList()

View File

@ -47,7 +47,7 @@ def reports_counts():
context = flask.request.environ.get('context')
tenants = central_api.count_tenants(context)
domains = central_api.count_domains(context)
domains = central_api.count_zones(context)
records = central_api.count_records(context)
return flask.jsonify(tenants=tenants, domains=domains, records=records)
@ -66,7 +66,7 @@ def reports_counts_tenants():
def reports_counts_domains():
context = flask.request.environ.get('context')
count = central_api.count_domains(context)
count = central_api.count_zones(context)
return flask.jsonify(domains=count)

View File

@ -28,7 +28,7 @@ blueprint = flask.Blueprint('sync', __name__)
def sync_domains():
context = flask.request.environ.get('context')
central_api.sync_domains(context)
central_api.sync_zones(context)
return flask.Response(status=200)
@ -37,7 +37,7 @@ def sync_domains():
def sync_domain(domain_id):
context = flask.request.environ.get('context')
central_api.sync_domain(context, domain_id)
central_api.sync_zone(context, domain_id)
return flask.Response(status=200)

View File

@ -26,6 +26,6 @@ blueprint = flask.Blueprint('touch', __name__)
def touch_domain(domain_id):
context = flask.request.environ.get('context')
central_api.touch_domain(context, domain_id)
central_api.touch_zone(context, domain_id)
return flask.Response(status=200)

View File

@ -41,8 +41,8 @@ def get_limits():
return flask.jsonify(limits_schema.filter({
"limits": {
"absolute": {
"maxDomains": absolute_limits['domains'],
"maxDomainRecords": absolute_limits['domain_records']
"maxDomains": absolute_limits['zones'],
"maxDomainRecords": absolute_limits['zone_records']
}
}
}))

View File

@ -33,7 +33,7 @@ def _find_recordset(context, domain_id, name, type):
central_api = central_rpcapi.CentralAPI.get_instance()
return central_api.find_recordset(context, {
'domain_id': domain_id,
'zone_id': domain_id,
'name': name,
'type': type,
})
@ -43,7 +43,7 @@ def _find_or_create_recordset(context, domain_id, name, type, ttl):
central_api = central_rpcapi.CentralAPI.get_instance()
criterion = {"id": domain_id, "type": "PRIMARY"}
central_api.find_domain(context, criterion=criterion)
central_api.find_zone(context, criterion=criterion)
try:
# Attempt to create an empty recordset
@ -83,6 +83,10 @@ def _format_record_v1(record, recordset):
record['priority'], record['data'] = utils.extract_priority_from_data(
recordset.type, record)
record['domain_id'] = record['zone_id']
del record['zone_id']
record.update({
'name': recordset['name'],
'type': recordset['type'],
@ -122,8 +126,8 @@ def create_record(domain_id):
central_api = central_rpcapi.CentralAPI.get_instance()
record = central_api.create_record(context, domain_id,
recordset['id'],
record)
recordset['id'],
record)
record = _format_record_v1(record, recordset)
@ -143,9 +147,9 @@ def get_records(domain_id):
# NOTE: We need to ensure the domain actually exists, otherwise we may
# return an empty records array instead of a domain not found
central_api.get_domain(context, domain_id)
central_api.get_zone(context, domain_id)
recordsets = central_api.find_recordsets(context, {'domain_id': domain_id})
recordsets = central_api.find_recordsets(context, {'zone_id': domain_id})
records = []
@ -164,9 +168,9 @@ def get_record(domain_id, record_id):
# NOTE: We need to ensure the domain actually exists, otherwise we may
# return an record not found instead of a domain not found
central_api.get_domain(context, domain_id)
central_api.get_zone(context, domain_id)
criterion = {'domain_id': domain_id, 'id': record_id}
criterion = {'zone_id': domain_id, 'id': record_id}
record = central_api.find_record(context, criterion)
recordset = central_api.get_recordset(
@ -188,12 +192,12 @@ def update_record(domain_id, record_id):
# NOTE: We need to ensure the domain actually exists, otherwise we may
# return a record not found instead of a domain not found
criterion = {"id": domain_id, "type": "PRIMARY"}
central_api.find_domain(context, criterion)
central_api.find_zone(context, criterion)
# Fetch the existing resource
# NOTE(kiall): We use "find_record" rather than "get_record" as we do not
# have the recordset_id.
criterion = {'domain_id': domain_id, 'id': record_id}
criterion = {'zone_id': domain_id, 'id': record_id}
record = central_api.find_record(context, criterion)
# TODO(graham): Move this further down the stack
@ -247,10 +251,10 @@ def delete_record(domain_id, record_id):
# NOTE: We need to ensure the domain actually exists, otherwise we may
# return a record not found instead of a domain not found
criterion = {"id": domain_id, "type": "PRIMARY"}
central_api.find_domain(context, criterion=criterion)
central_api.find_zone(context, criterion=criterion)
# Find the record
criterion = {'domain_id': domain_id, 'id': record_id}
criterion = {'zone_id': domain_id, 'id': record_id}
record = central_api.find_record(context, criterion)
central_api.delete_record(

View File

@ -35,15 +35,15 @@ class LimitsController(rest.RestController):
return {
# Resource Creation Limits
"max_zones": absolute_limits['domains'],
"max_zone_recordsets": absolute_limits['domain_recordsets'],
"max_zone_records": absolute_limits['domain_records'],
"max_zones": absolute_limits['zones'],
"max_zone_recordsets": absolute_limits['zone_recordsets'],
"max_zone_records": absolute_limits['zone_records'],
"max_recordset_records": absolute_limits['recordset_records'],
# Resource Field Value Limits
"min_ttl": CONF['service:central'].min_ttl,
"max_zone_name_length":
CONF['service:central'].max_domain_name_len,
CONF['service:central'].max_zone_name_len,
"max_recordset_name_length":
CONF['service:central'].max_recordset_name_len,

View File

@ -27,7 +27,7 @@ LOG = logging.getLogger(__name__)
class RecordSetsController(rest.RestController):
SORT_KEYS = ['created_at', 'id', 'updated_at', 'domain_id', 'tenant_id',
SORT_KEYS = ['created_at', 'id', 'updated_at', 'zone_id', 'tenant_id',
'name', 'type', 'ttl', 'records']
@pecan.expose(template='json:', content_type='application/json')
@ -50,9 +50,9 @@ class RecordSetsController(rest.RestController):
request = pecan.request
context = request.environ['context']
# NOTE: We need to ensure the domain actually exists, otherwise we may
# return deleted recordsets instead of a domain not found
self.central_api.get_domain(context, zone_id)
# NOTE: We need to ensure the zone actually exists, otherwise we may
# return deleted recordsets instead of a zone not found
self.central_api.get_zone(context, zone_id)
# Extract the pagination params
marker, limit, sort_key, sort_dir = utils.get_paging_params(
@ -64,7 +64,7 @@ class RecordSetsController(rest.RestController):
criterion = self._apply_filter_params(
params, accepted_filters, {})
criterion['domain_id'] = zone_id
criterion['zone_id'] = zone_id
# Data must be filtered separately, through the Records table
data = criterion.pop('data', None)
@ -77,7 +77,7 @@ class RecordSetsController(rest.RestController):
# 'data' filter param: only return recordsets with matching data
if data:
records = self.central_api.find_records(
context, criterion={'data': data, 'domain_id': zone_id})
context, criterion={'data': data, 'zone_id': zone_id})
recordset_with_data_ids = set(record.recordset_id
for record in records)
@ -163,7 +163,7 @@ class RecordSetsController(rest.RestController):
# NS recordsets at the zone root cannot be manually updated
if recordset['type'] == 'NS':
zone = self.central_api.get_domain(context, zone_id)
zone = self.central_api.get_zone(context, zone_id)
if recordset['name'] == zone['name']:
raise exceptions.BadRequest(
'Updating a root zone NS record is not allowed')

View File

@ -49,7 +49,7 @@ class ZonesController(rest.RestController):
return DesignateAdapter.render(
'API_v2',
self.central_api.get_domain(context, zone_id),
self.central_api.get_zone(context, zone_id),
request=request)
@pecan.expose(template='json:', content_type='application/json')
@ -70,7 +70,7 @@ class ZonesController(rest.RestController):
return DesignateAdapter.render(
'API_v2',
self.central_api.find_domains(
self.central_api.find_zones(
context, criterion, marker, limit, sort_key, sort_dir),
request=request)
@ -90,7 +90,7 @@ class ZonesController(rest.RestController):
if 'type' not in zone:
zone['type'] = 'PRIMARY'
zone = DesignateAdapter.parse('API_v2', zone, objects.Domain())
zone = DesignateAdapter.parse('API_v2', zone, objects.Zone())
zone.validate()
if zone.type == 'SECONDARY':
@ -98,7 +98,7 @@ class ZonesController(rest.RestController):
zone['email'] = mgmt_email
# Create the zone
zone = self.central_api.create_domain(context, zone)
zone = self.central_api.create_zone(context, zone)
# Prepare the response headers
# If the zone has been created asynchronously
@ -129,7 +129,7 @@ class ZonesController(rest.RestController):
# TODO(kiall): Validate we have a sane UUID for zone_id
# Fetch the existing zone
zone = self.central_api.get_domain(context, zone_id)
zone = self.central_api.get_zone(context, zone_id)
# Don't allow updates to zones that are being deleted
if zone.action == "DELETE":
@ -166,7 +166,7 @@ class ZonesController(rest.RestController):
raise exceptions.InvalidObject(msg)
increment_serial = zone.type == 'PRIMARY'
zone = self.central_api.update_domain(
zone = self.central_api.update_zone(
context, zone, increment_serial=increment_serial)
if zone.status == 'PENDING':
@ -184,7 +184,7 @@ class ZonesController(rest.RestController):
response = pecan.response
context = request.environ['context']
zone = self.central_api.delete_domain(context, zone_id)
zone = self.central_api.delete_zone(context, zone_id)
response.status_int = 202
return DesignateAdapter.render('API_v2', zone, request=request)

View File

@ -37,5 +37,5 @@ class NameServersController(rest.RestController):
return {
"nameservers": DesignateAdapter.render(
'API_v2',
self.central_api.get_domain_servers(context, zone_id),
self.central_api.get_zone_ns_records(context, zone_id),
request=request)}

View File

@ -30,7 +30,7 @@ class AbandonController(rest.RestController):
context.abandon = 'True'
# abandon the zone
zone = self.central_api.delete_domain(context, zone_id)
zone = self.central_api.delete_zone(context, zone_id)
if zone.deleted_at:
response.status_int = 204
else:

View File

@ -40,7 +40,7 @@ class ZoneExportController(rest.RestController):
if export.location and export.location.startswith('designate://'):
return self.zone_manager_api.\
render_zone(context, export['domain_id'])
render_zone(context, export['zone_id'])
else:
msg = 'Zone can not be exported synchronously'
raise exceptions.BadRequest(msg)

View File

@ -29,7 +29,7 @@ class XfrController(rest.RestController):
response = pecan.response
context = request.environ['context']
self.central_api.xfr_domain(context, zone_id)
self.central_api.xfr_zone(context, zone_id)
response.status_int = 202
# NOTE: This is a hack and a half.. But Pecan needs it.

View File

@ -67,10 +67,10 @@ class AgentPoolBackend(base.Backend):
def mdns_api(self):
return mdns_api.MdnsAPI.get_instance()
def create_domain(self, context, domain):
LOG.debug('Create Domain')
def create_zone(self, context, zone):
LOG.debug('Create Zone')
response, retry = self._make_and_send_dns_message(
domain.name,
zone.name,
self.timeout,
CC,
CREATE,
@ -81,12 +81,12 @@ class AgentPoolBackend(base.Backend):
if response is None:
raise exceptions.Backend()
def update_domain(self, context, domain):
LOG.debug('Update Domain')
def update_zone(self, context, zone):
LOG.debug('Update Zone')
self.mdns_api.notify_zone_changed(
context,
domain,
zone,
self.host,
self.port,
self.timeout,
@ -95,10 +95,10 @@ class AgentPoolBackend(base.Backend):
self.delay
)
def delete_domain(self, context, domain):
LOG.debug('Delete Domain')
def delete_zone(self, context, zone):
LOG.debug('Delete Zone')
response, retry = self._make_and_send_dns_message(
domain.name,
zone.name,
self.timeout,
CC,
DELETE,
@ -109,10 +109,10 @@ class AgentPoolBackend(base.Backend):
if response is None:
raise exceptions.Backend()
def _make_and_send_dns_message(self, domain_name, timeout, opcode,
rdatatype, rdclass, dest_ip,
dest_port):
dns_message = self._make_dns_message(domain_name, opcode, rdatatype,
def _make_and_send_dns_message(self, zone_name, timeout, opcode,
rdatatype, rdclass, dest_ip,
dest_port):
dns_message = self._make_dns_message(zone_name, opcode, rdatatype,
rdclass)
retry = 0
@ -121,7 +121,7 @@ class AgentPoolBackend(base.Backend):
LOG.info(_LI("Sending '%(msg)s' for '%(zone)s' to '%(server)s:"
"%(port)d'.") %
{'msg': str(opcode),
'zone': domain_name, 'server': dest_ip,
'zone': zone_name, 'server': dest_ip,
'port': dest_port})
response = self._send_dns_message(
dns_message, dest_ip, dest_port, timeout)
@ -131,7 +131,7 @@ class AgentPoolBackend(base.Backend):
"'%(zone)s' to '%(server)s:%(port)d'. Timeout="
"'%(timeout)d' seconds. Retry='%(retry)d'") %
{'msg': str(opcode),
'zone': domain_name, 'server': dest_ip,
'zone': zone_name, 'server': dest_ip,
'port': dest_port, 'timeout': timeout,
'retry': retry})
response = None
@ -140,7 +140,7 @@ class AgentPoolBackend(base.Backend):
"for '%(zone)s' to '%(server)s:%(port)d'. Timeout"
"='%(timeout)d' seconds. Retry='%(retry)d'") %
{'msg': str(opcode),
'zone': domain_name, 'server': dest_ip,
'zone': zone_name, 'server': dest_ip,
'port': dest_port, 'timeout': timeout,
'retry': retry})
response = None
@ -153,7 +153,7 @@ class AgentPoolBackend(base.Backend):
"send '%(msg)s' for '%(zone)s' to '%(server)s:"
"%(port)d'. Response message: %(resp)s") %
{'msg': str(opcode),
'zone': domain_name, 'server': dest_ip,
'zone': zone_name, 'server': dest_ip,
'port': dest_port, 'resp': str(response)})
response = None
return (response, retry)

View File

@ -39,19 +39,19 @@ class AgentBackend(DriverPlugin):
pass
@abc.abstractmethod
def find_domain_serial(self, domain_name):
"""Find a DNS Domain"""
def find_zone_serial(self, zone_name):
"""Find a DNS Zone"""
@abc.abstractmethod
def create_domain(self, domain):
"""Create a DNS domain"""
"""Domain is a DNSPython Zone object"""
def create_zone(self, zone):
"""Create a DNS zone"""
"""Zone is a DNSPython Zone object"""
@abc.abstractmethod
def update_domain(self, domain):
"""Update a DNS domain"""
"""Domain is a DNSPython Zone object"""
def update_zone(self, zone):
"""Update a DNS zone"""
"""Zone is a DNSPython Zone object"""
@abc.abstractmethod
def delete_domain(self, domain_name):
"""Delete a DNS domain"""
def delete_zone(self, zone_name):
"""Delete a DNS zone"""

View File

@ -50,7 +50,7 @@ class Bind9Backend(base.AgentBackend):
cfg.StrOpt('zone-file-path', default='$state_path/zones',
help='Path where zone files are stored'),
cfg.StrOpt('query-destination', default='127.0.0.1',
help='Host to query when finding domains')
help='Host to query when finding zones')
]
return [(group, opts)]
@ -58,30 +58,30 @@ class Bind9Backend(base.AgentBackend):
def start(self):
LOG.info(_LI("Started bind9 backend"))
def find_domain_serial(self, domain_name):
LOG.debug("Finding %s" % domain_name)
def find_zone_serial(self, zone_name):
LOG.debug("Finding %s" % zone_name)
resolver = dns.resolver.Resolver()
resolver.nameservers = [cfg.CONF[CFG_GROUP].query_destination]
try:
rdata = resolver.query(domain_name, 'SOA')[0]
rdata = resolver.query(zone_name, 'SOA')[0]
except Exception:
return None
return rdata.serial
def create_domain(self, domain):
LOG.debug("Creating %s" % domain.origin.to_text())
self._sync_domain(domain, new_domain_flag=True)
def create_zone(self, zone):
LOG.debug("Creating %s" % zone.origin.to_text())
self._sync_zone(zone, new_zone_flag=True)
def update_domain(self, domain):
LOG.debug("Updating %s" % domain.origin.to_text())
self._sync_domain(domain)
def update_zone(self, zone):
LOG.debug("Updating %s" % zone.origin.to_text())
self._sync_zone(zone)
def delete_domain(self, domain_name):
LOG.debug('Delete Domain: %s' % domain_name)
def delete_zone(self, zone_name):
LOG.debug('Delete Zone: %s' % zone_name)
rndc_op = 'delzone'
# RNDC doesn't like the trailing dot on the domain name
rndc_call = self._rndc_base() + [rndc_op, domain_name.rstrip('.')]
# RNDC doesn't like the trailing dot on the zone name
rndc_call = self._rndc_base() + [rndc_op, zone_name.rstrip('.')]
utils.execute(*rndc_call)
@ -102,39 +102,39 @@ class Bind9Backend(base.AgentBackend):
return rndc_call
def _sync_domain(self, domain, new_domain_flag=False):
"""Sync a single domain's zone file and reload bind config"""
def _sync_zone(self, zone, new_zone_flag=False):
"""Sync a single zone's zone file and reload bind config"""
# NOTE: Different versions of BIND9 behave differently with a trailing
# dot, so we're just going to take it off.
domain_name = domain.origin.to_text().rstrip('.')
zone_name = zone.origin.to_text().rstrip('.')
# NOTE: Only one thread should be working with the Zonefile at a given
# time. The sleep(1) below introduces a not insignificant risk
# of more than 1 thread working with a zonefile at a given time.
with lockutils.lock('bind9-%s' % domain_name):
LOG.debug('Synchronising Domain: %s' % domain_name)
with lockutils.lock('bind9-%s' % zone_name):
LOG.debug('Synchronising Zone: %s' % zone_name)
zone_path = cfg.CONF[CFG_GROUP].zone_file_path
output_path = os.path.join(zone_path,
'%s.zone' % domain_name)
'%s.zone' % zone_name)
domain.to_file(output_path, relativize=False)
zone.to_file(output_path, relativize=False)
rndc_call = self._rndc_base()
if new_domain_flag:
if new_zone_flag:
rndc_op = [
'addzone',
'%s { type master; file "%s"; };' % (domain_name,
'%s { type master; file "%s"; };' % (zone_name,
output_path),
]
rndc_call.extend(rndc_op)
else:
rndc_op = 'reload'
rndc_call.extend([rndc_op])
rndc_call.extend([domain_name])
rndc_call.extend([zone_name])
LOG.debug('Calling RNDC with: %s' % " ".join(rndc_call))
self._execute_rndc(rndc_call)

View File

@ -118,14 +118,14 @@ class DenominatorBackend(base.AgentBackend):
def stop(self):
LOG.info(_LI("Stopped Denominator backend"))
def find_domain_serial(self, domain_name):
LOG.debug("Finding %s" % domain_name)
def find_zone_serial(self, zone_name):
LOG.debug("Finding %s" % zone_name)
domain_name = domain_name.rstrip('.')
zone_name = zone_name.rstrip('.')
output = self.denominator.get_record(
zone=domain_name,
zone=zone_name,
type='SOA',
name=domain_name)
name=zone_name)
try:
text = ' '.join(output.split()[3:])
rdata = dns.rdata.from_text(dns.rdataclass.IN,
@ -135,109 +135,109 @@ class DenominatorBackend(base.AgentBackend):
return None
return rdata.serial
def create_domain(self, domain):
LOG.debug("Creating %s" % domain.origin.to_text())
domain_name = domain.origin.to_text(omit_final_dot=True)
def create_zone(self, zone):
LOG.debug("Creating %s" % zone.origin.to_text())
zone_name = zone.origin.to_text(omit_final_dot=True)
# Use SOA TTL as zone default TTL
soa_record = domain.find_rrset(domain.origin, dns.rdatatype.SOA)
rname = soa_record.items[0].rname.derelativize(origin=domain.origin)
soa_record = zone.find_rrset(zone.origin, dns.rdatatype.SOA)
rname = soa_record.items[0].rname.derelativize(origin=zone.origin)
# Lock domain to prevent concurrent changes.
with self._sync_domain(domain.origin):
# Lock zone to prevent concurrent changes.
with self._sync_zone(zone.origin):
# NOTE: If zone already exists, denominator will update it with
# new values, in other a duplicate zone will be created if
# provider supports such functionality.
self.denominator.create_zone(
name=domain_name,
name=zone_name,
ttl=soa_record.ttl,
email=rname)
# Add records one by one.
for name, ttl, rtype, data in self._iterate_records(domain):
for name, ttl, rtype, data in self._iterate_records(zone):
# Some providers do not support creationg of SOA record.
rdatatype = dns.rdatatype.from_text(rtype)
if rdatatype == dns.rdatatype.SOA:
continue
self.denominator.create_record(
zone=domain_name,
zone=zone_name,
name=name,
type=rtype,
ttl=ttl,
data=data)
def update_domain(self, domain):
LOG.debug("Updating %s" % domain.origin)
domain_name = domain.origin.to_text(omit_final_dot=True)
def update_zone(self, zone):
LOG.debug("Updating %s" % zone.origin)
zone_name = zone.origin.to_text(omit_final_dot=True)
soa_record = domain.find_rrset(domain.origin, dns.rdatatype.SOA)
rname = soa_record.items[0].rname.derelativize(origin=domain.origin)
soa_record = zone.find_rrset(zone.origin, dns.rdatatype.SOA)
rname = soa_record.items[0].rname.derelativize(origin=zone.origin)
with self._sync_domain(domain.origin):
with self._sync_zone(zone.origin):
# Update zone with a new parameters
self.denominator.update_zone(
id=domain_name,
id=zone_name,
ttl=soa_record.ttl,
email=rname)
# Fetch records to create a differential update of a zone.
output = self.denominator.get_records(domain_name)
subdomains = dict()
output = self.denominator.get_records(zone_name)
subzones = dict()
# Subdomains dict will contain names of subdomains without
# subzones dict will contain names of subzones without
# trailing dot.
for raw in output.splitlines():
data = raw.split()
name, rtype = data[0], data[1]
rtypes = subdomains.get(name, set())
rtypes = subzones.get(name, set())
rtypes.add(rtype)
subdomains[name] = rtypes
subzones[name] = rtypes
for name, ttl, rtype, data in self._iterate_records(domain):
for name, ttl, rtype, data in self._iterate_records(zone):
record_action = self.denominator.create_record
if name in subdomains and rtype in subdomains[name]:
if name in subzones and rtype in subzones[name]:
# When RR set already exists, replace it with a new one.
rdatatype = dns.rdatatype.from_text(rtype)
record_action = self.denominator.update_record
# So next call will ADD a new record to record set
# instead of replacing of the existing one.
subdomains[name].remove(rtype)
subzones[name].remove(rtype)
# NOTE: DynECT does not support deleting of the SOA
# record. Skip updating of the SOA record.
if rdatatype == dns.rdatatype.SOA:
continue
record_action(zone=domain_name,
record_action(zone=zone_name,
name=name,
type=rtype,
ttl=ttl,
data=data)
# Remaining records should be deleted
for name, types in subdomains.items():
for name, types in subzones.items():
for rtype in types:
self.denominator.delete_record(
zone=domain_name, id=name, type=rtype)
zone=zone_name, id=name, type=rtype)
def delete_domain(self, domain_name):
LOG.debug('Delete Domain: %s' % domain_name)
def delete_zone(self, zone_name):
LOG.debug('Delete Zone: %s' % zone_name)
with self._sync_domain(domain_name):
self.denominator.delete_zone(id=domain_name)
with self._sync_zone(zone_name):
self.denominator.delete_zone(id=zone_name)
def _sync_domain(self, domain_name):
LOG.debug('Synchronising domain: %s' % domain_name)
return lockutils.lock('denominator-%s' % domain_name)
def _sync_zone(self, zone_name):
LOG.debug('Synchronising zone: %s' % zone_name)
return lockutils.lock('denominator-%s' % zone_name)
def _iterate_records(self, domain):
for rname, ttl, rdata in domain.iterate_rdatas():
name = rname.derelativize(origin=domain.origin)
def _iterate_records(self, zone):
for rname, ttl, rdata in zone.iterate_rdatas():
name = rname.derelativize(origin=zone.origin)
name = name.to_text(omit_final_dot=True)
data = rdata.to_text(origin=domain.origin, relativize=False)
data = rdata.to_text(origin=zone.origin, relativize=False)
yield name, ttl, dns.rdatatype.to_text(rdata.rdtype), data

View File

@ -30,15 +30,15 @@ class FakeBackend(base.AgentBackend):
def stop(self):
LOG.info(_LI("Stopped fake backend"))
def find_domain_serial(self, domain_name):
LOG.debug("Finding %s" % domain_name)
def find_zone_serial(self, zone_name):
LOG.debug("Finding %s" % zone_name)
return 0
def create_domain(self, domain):
LOG.debug("Creating %s" % domain.origin.to_text())
def create_zone(self, zone):
LOG.debug("Creating %s" % zone.origin.to_text())
def update_domain(self, domain):
LOG.debug("Updating %s" % domain.origin.to_text())
def update_zone(self, zone):
LOG.debug("Updating %s" % zone.origin.to_text())
def delete_domain(self, domain_name):
LOG.debug('Delete Domain: %s' % domain_name)
def delete_zone(self, zone_name):
LOG.debug('Delete Zone: %s' % zone_name)

View File

@ -67,34 +67,34 @@ class Backend(DriverPlugin):
# Core Backend Interface
@abc.abstractmethod
def create_domain(self, context, domain):
def create_zone(self, context, zone):
"""
Create a DNS domain.
Create a DNS zone.
:param context: Security context information.
:param domain: the DNS domain.
:param zone: the DNS zone.
"""
def update_domain(self, context, domain):
def update_zone(self, context, zone):
"""
Update a DNS domain.
Update a DNS zone.
:param context: Security context information.
:param domain: the DNS domain.
:param zone: the DNS zone.
"""
LOG.debug('Update Domain')
LOG.debug('Update Zone')
self.mdns_api.notify_zone_changed(
context, domain, self.host, self.port, self.timeout,
context, zone, self.host, self.port, self.timeout,
self.retry_interval, self.max_retries, self.delay)
@abc.abstractmethod
def delete_domain(self, context, domain):
def delete_zone(self, context, zone):
"""
Delete a DNS domain.
Delete a DNS zone.
:param context: Security context information.
:param domain: the DNS domain.
:param zone: the DNS zone.
"""
def ping(self, context):

View File

@ -47,7 +47,7 @@ class DelegationExists(exceptions.BadRequest, EnhancedDNSException):
error_type = 'delegation_exists'
class DuplicateDomain(exceptions.DuplicateDomain, EnhancedDNSException):
class DuplicateZone(exceptions.DuplicateZone, EnhancedDNSException):
"""
Raised when an attempt to create a zone which is registered to another
Akamai account is made
@ -60,7 +60,7 @@ class Forbidden(exceptions.Forbidden, EnhancedDNSException):
Raised when an attempt to modify a zone which is registered to another
Akamai account is made.
This appears to be returned when creating a new subdomain of domain which
This appears to be returned when creating a new subzone of zone which
already exists in another Akamai account.
"""
pass
@ -142,7 +142,7 @@ class EnhancedDNSClient(object):
return self.client.service.setZones(zones=zones)
except Exception as e:
if 'You do not have permission to view this zone' in str(e):
raise DuplicateDomain()
raise DuplicateZone()
elif 'You do not have access to edit this zone' in str(e):
raise Forbidden()
elif 'basic auth failed' in str(e):
@ -158,7 +158,7 @@ class EnhancedDNSClient(object):
self.client.service.setZone(zone=zone)
except Exception as e:
if 'You do not have permission to view this zone' in str(e):
raise DuplicateDomain()
raise DuplicateZone()
elif 'You do not have access to edit this zone' in str(e):
raise Forbidden()
elif 'basic auth failed' in str(e):
@ -197,22 +197,22 @@ class EnhancedDNSClient(object):
return zoneName.rstrip('.').lower()
def build_zone(client, target, domain):
def build_zone(client, target, zone):
masters = [m.host for m in target.masters]
if target.options.get("tsig_key_name", None):
return client.buildZone(
domain.name,
zone.name,
masters,
domain.id,
zone.id,
target.options["tsig_key_name"],
target.options.get("tsig_key_secret", None),
target.options.get("tsig_key_algorithm", None))
else:
return client.buildZone(
domain.name,
zone.name,
masters,
domain.id)
zone.id)
class AkamaiBackend(base.Backend):
@ -247,12 +247,12 @@ class AkamaiBackend(base.Backend):
raise exceptions.ConfigurationError(
"Akamai only supports mDNS instances on port 53")
def create_domain(self, context, domain):
"""Create a DNS domain"""
zone = build_zone(self.client, self.target, domain)
def create_zone(self, context, zone):
"""Create a DNS zone"""
zone = build_zone(self.client, self.target, zone)
self.client.setZone(zone=zone)
def delete_domain(self, context, domain):
"""Delete a DNS domain"""
self.client.deleteZone(zoneName=domain['name'])
def delete_zone(self, context, zone):
"""Delete a DNS zone"""
self.client.deleteZone(zoneName=zone['name'])

View File

@ -48,8 +48,8 @@ class Bind9Backend(base.Backend):
self.clean_zonefile = strutils.bool_from_string(
self.options.get('clean_zonefile', 'false'))
def create_domain(self, context, domain):
LOG.debug('Create Domain')
def create_zone(self, context, zone):
LOG.debug('Create Zone')
masters = []
for master in self.masters:
host = master['host']
@ -62,26 +62,26 @@ class Bind9Backend(base.Backend):
rndc_op = [
'addzone',
'%s { type slave; masters { %s;}; file "slave.%s%s"; };' %
(domain['name'].rstrip('.'), '; '.join(masters), domain['name'],
domain['id']),
(zone['name'].rstrip('.'), '; '.join(masters), zone['name'],
zone['id']),
]
try:
self._execute_rndc(rndc_op)
except exceptions.Backend as e:
# If create fails because the domain exists, don't reraise
# If create fails because the zone exists, don't reraise
if "already exists" not in six.text_type(e):
raise
self.mdns_api.notify_zone_changed(
context, domain, self.host, self.port, self.timeout,
context, zone, self.host, self.port, self.timeout,
self.retry_interval, self.max_retries, self.delay)
def delete_domain(self, context, domain):
LOG.debug('Delete Domain')
def delete_zone(self, context, zone):
LOG.debug('Delete Zone')
rndc_op = [
'delzone',
'%s' % domain['name'].rstrip('.'),
'%s' % zone['name'].rstrip('.'),
]
if self.clean_zonefile:
rndc_op.insert(1, '-clean')
@ -89,7 +89,7 @@ class Bind9Backend(base.Backend):
try:
self._execute_rndc(rndc_op)
except exceptions.Backend as e:
# If domain is already deleted, don't reraise
# If zone is already deleted, don't reraise
if "not found" not in six.text_type(e):
raise

View File

@ -87,20 +87,20 @@ class DesignateBackend(base.Backend):
session=session, service_type=self.service_type)
return self._client
def create_domain(self, context, domain):
msg = _LI('Creating domain %(d_id)s / %(d_name)s')
LOG.info(msg, {'d_id': domain['id'], 'd_name': domain['name']})
def create_zone(self, context, zone):
msg = _LI('Creating zone %(d_id)s / %(d_name)s')
LOG.info(msg, {'d_id': zone['id'], 'd_name': zone['name']})
masters = ["%s:%s" % (i.host, i.port) for i in self.masters]
self.client.zones.create(
domain.name, 'SECONDARY', masters=masters)
zone.name, 'SECONDARY', masters=masters)
def delete_domain(self, context, domain):
msg = _LI('Deleting domain %(d_id)s / %(d_name)s')
LOG.info(msg, {'d_id': domain['id'], 'd_name': domain['name']})
def delete_zone(self, context, zone):
msg = _LI('Deleting zone %(d_id)s / %(d_name)s')
LOG.info(msg, {'d_id': zone['id'], 'd_name': zone['name']})
try:
self.client.zones.delete(domain.name)
self.client.zones.delete(zone.name)
except exceptions.NotFound:
msg = _LW("Zone %s not found on remote Designate, Ignoring")
LOG.warn(msg, domain.id)
LOG.warn(msg, zone.id)

View File

@ -344,11 +344,11 @@ class DynECTBackend(base.Backend):
timeout=CONF[CFG_GROUP].timeout,
timings=CONF[CFG_GROUP].timings)
def create_domain(self, context, domain):
LOG.info(_LI('Creating domain %(d_id)s / %(d_name)s') %
{'d_id': domain['id'], 'd_name': domain['name']})
def create_zone(self, context, zone):
LOG.info(_LI('Creating zone %(d_id)s / %(d_name)s') %
{'d_id': zone['id'], 'd_name': zone['name']})
url = '/Secondary/%s' % domain['name'].rstrip('.')
url = '/Secondary/%s' % zone['name'].rstrip('.')
data = {
'masters': [m.host for m in self.masters]
}
@ -366,9 +366,9 @@ class DynECTBackend(base.Backend):
except DynClientError as e:
for emsg in e.msgs:
if emsg['ERR_CD'] == 'TARGET_EXISTS':
msg = _LI("Domain already exists, updating existing "
"domain instead %s")
LOG.info(msg % domain['name'])
msg = _LI("Zone already exists, updating existing "
"zone instead %s")
LOG.info(msg % zone['name'])
client.put(url, data=data)
break
else:
@ -377,10 +377,10 @@ class DynECTBackend(base.Backend):
client.put(url, data={'activate': True})
client.logout()
def delete_domain(self, context, domain):
LOG.info(_LI('Deleting domain %(d_id)s / %(d_name)s') %
{'d_id': domain['id'], 'd_name': domain['name']})
url = '/Zone/%s' % domain['name'].rstrip('.')
def delete_zone(self, context, zone):
LOG.info(_LI('Deleting zone %(d_id)s / %(d_name)s') %
{'d_id': zone['id'], 'd_name': zone['name']})
url = '/Zone/%s' % zone['name'].rstrip('.')
client = self.get_client()
try:
client.delete(url)
@ -388,26 +388,8 @@ class DynECTBackend(base.Backend):
if e.http_status == 404:
LOG.warn(_LW("Attempt to delete %(d_id)s / %(d_name)s "
"caused 404, ignoring.") %
{'d_id': domain['id'], 'd_name': domain['name']})
{'d_id': zone['id'], 'd_name': zone['name']})
pass
else:
raise
client.logout()
def create_recordset(self, context, domain, recordset):
LOG.debug('Discarding create_recordset call, not-applicable')
def update_recordset(self, context, domain, recordset):
LOG.debug('Discarding update_recordset call, not-applicable')
def delete_recordset(self, context, domain, recordset):
LOG.debug('Discarding delete_recordset call, not-applicable')
def create_record(self, context, domain, recordset, record):
LOG.debug('Discarding create_record call, not-applicable')
def update_record(self, context, domain, recordset, record):
LOG.debug('Discarding update_record call, not-applicable')
def delete_record(self, context, domain, recordset, record):
LOG.debug('Discarding delete_record call, not-applicable')

View File

@ -25,8 +25,8 @@ LOG = logging.getLogger(__name__)
class FakeBackend(base.Backend):
__plugin_name__ = 'fake'
def create_domain(self, context, domain):
LOG.info(_LI('Create Domain %r') % domain)
def create_zone(self, context, zone):
LOG.info(_LI('Create Zone %r') % zone)
def delete_domain(self, context, domain):
LOG.info(_LI('Delete Domain %r') % domain)
def delete_zone(self, context, zone):
LOG.info(_LI('Delete Zone %r') % zone)

View File

@ -42,18 +42,18 @@ class InfobloxBackend(base.Backend):
raise exceptions.ConfigurationError(
"Infoblox only supports mDNS instances on port 53")
def create_domain(self, context, domain):
LOG.info(_LI('Create Domain %r') % domain)
def create_zone(self, context, zone):
LOG.info(_LI('Create Zone %r') % zone)
dns_net_view = self.infoblox.get_dns_view(context.tenant)
self.infoblox.create_zone_auth(
fqdn=domain['name'][0:-1],
fqdn=zone['name'][0:-1],
dns_view=dns_net_view
)
def delete_domain(self, context, domain):
LOG.info(_LI('Delete Domain %r') % domain)
self.infoblox.delete_zone_auth(domain['name'][0:-1])
def delete_zone(self, context, zone):
LOG.info(_LI('Delete Zone %r') % zone)
self.infoblox.delete_zone_auth(zone['name'][0:-1])
def ping(self, context):
LOG.info(_LI('Ping'))

View File

@ -75,8 +75,8 @@ class NSD4Backend(base.Backend):
if result != 'ok':
raise exceptions.Backend(result)
def create_domain(self, context, domain):
LOG.debug('Create Domain')
def create_zone(self, context, zone):
LOG.debug('Create Zone')
masters = []
for master in self.masters:
host = master['host']
@ -86,22 +86,22 @@ class NSD4Backend(base.Backend):
# Ensure different MiniDNS instances are targeted for AXFRs
random.shuffle(masters)
command = 'addzone %s %s' % (domain['name'], self.pattern)
command = 'addzone %s %s' % (zone['name'], self.pattern)
try:
self._execute_nsd4(command)
except exceptions.Backend as e:
# If create fails because the domain exists, don't reraise
# If create fails because the zone exists, don't reraise
if "already exists" not in six.text_type(e):
raise
def delete_domain(self, context, domain):
LOG.debug('Delete Domain')
command = 'delzone %s' % domain['name']
def delete_zone(self, context, zone):
LOG.debug('Delete Zone')
command = 'delzone %s' % zone['name']
try:
self._execute_nsd4(command)
except exceptions.Backend as e:
# If domain is already deleted, don't reraise
# If zone is already deleted, don't reraise
if "not found" not in six.text_type(e):
raise

View File

@ -118,8 +118,8 @@ class PowerDNSBackend(base.Backend):
if resultproxy.rowcount != 1:
raise exc_notfound()
# Domain Methods
def create_domain(self, context, domain):
# Zone Methods
def create_zone(self, context, zone):
try:
self.session.begin()
@ -128,8 +128,8 @@ class PowerDNSBackend(base.Backend):
masters = six.moves.map(_parse_master, self.masters)
domain_values = {
'designate_id': domain['id'],
'name': domain['name'].rstrip('.'),
'designate_id': zone['id'],
'name': zone['name'].rstrip('.'),
'master': ','.join(masters),
'type': 'SLAVE',
'account': context.tenant
@ -143,23 +143,23 @@ class PowerDNSBackend(base.Backend):
self.session.commit()
self.mdns_api.notify_zone_changed(
context, domain, self.host, self.port, self.timeout,
context, zone, self.host, self.port, self.timeout,
self.retry_interval, self.max_retries, self.delay)
def delete_domain(self, context, domain):
# TODO(kiall): We should make this match create_domain with regard to
def delete_zone(self, context, zone):
# TODO(kiall): We should make this match create_zone with regard to
# transactions.
try:
self._get(tables.domains, domain['id'], exceptions.DomainNotFound,
self._get(tables.domains, zone['id'], exceptions.ZoneNotFound,
id_col=tables.domains.c.designate_id)
except exceptions.DomainNotFound:
# If the Domain is already gone, that's ok. We're deleting it
except exceptions.ZoneNotFound:
# If the Zone is already gone, that's ok. We're deleting it
# anyway, so just log and continue.
LOG.critical(_LC('Attempted to delete a domain which is '
LOG.critical(_LC('Attempted to delete a zone which is '
'not present in the backend. ID: %s') %
domain['id'])
zone['id'])
return
self._delete(tables.domains, domain['id'],
exceptions.DomainNotFound,
self._delete(tables.domains, zone['id'],
exceptions.ZoneNotFound,
id_col=tables.domains.c.designate_id)

View File

@ -28,8 +28,8 @@ cfg.CONF.register_opts([
help='The storage driver to use'),
cfg.ListOpt('enabled-notification-handlers', default=[],
help='Enabled Notification Handlers'),
cfg.IntOpt('max_domain_name_len', default=255,
help="Maximum domain name length"),
cfg.IntOpt('max_zone_name_len', default=255,
help="Maximum zone name length"),
cfg.IntOpt('max_recordset_name_len', default=255,
help="Maximum recordset name length",
deprecated_name='max_record_name_len'),

View File

@ -47,20 +47,21 @@ class CentralAPI(object):
4.2 - Add methods for pool manager integration
4.3 - Added Zone Transfer Methods
5.0 - Remove dead server code
5.1 - Add xfr_domain
5.1 - Add xfr_zone
5.2 - Add Zone Import methods
5.3 - Add Zone Export method
5.4 - Add asynchronous Zone Export methods
5.5 - Add deleted zone purging task
5.6 - Changed 'purge_domains' function args
5.6 - Changed 'purge_zones' function args
6.0 - Renamed domains to zones
"""
RPC_API_VERSION = '5.6'
RPC_API_VERSION = '6.0'
def __init__(self, topic=None):
topic = topic if topic else cfg.CONF.central_topic
target = messaging.Target(topic=topic, version=self.RPC_API_VERSION)
self.client = rpc.get_client(target, version_cap='5.6')
self.client = rpc.get_client(target, version_cap='6.0')
@classmethod
def get_instance(cls):
@ -144,56 +145,55 @@ class CentralAPI(object):
LOG.info(_LI("count_tenants: Calling central's count_tenants."))
return self.client.call(context, 'count_tenants')
# Domain Methods
def create_domain(self, context, domain):
LOG.info(_LI("create_domain: Calling central's create_domain."))
return self.client.call(context, 'create_domain', domain=domain)
# Zone Methods
def create_zone(self, context, zone):
LOG.info(_LI("create_zone: Calling central's create_zone."))
return self.client.call(context, 'create_zone', zone=zone)
def get_domain(self, context, domain_id):
LOG.info(_LI("get_domain: Calling central's get_domain."))
return self.client.call(context, 'get_domain', domain_id=domain_id)
def get_zone(self, context, zone_id):
LOG.info(_LI("get_zone: Calling central's get_zone."))
return self.client.call(context, 'get_zone', zone_id=zone_id)
def get_domain_servers(self, context, domain_id):
LOG.info(_LI("get_domain_servers: "
"Calling central's get_domain_servers."))
return self.client.call(context, 'get_domain_servers',
domain_id=domain_id)
def get_zone_ns_records(self, context, zone_id):
LOG.info(_LI("get_zone_ns_records: "
"Calling central's get_zone_ns_records."))
return self.client.call(context, 'get_zone_ns_records',
zone_id=zone_id)
def find_domains(self, context, criterion=None, marker=None, limit=None,
sort_key=None, sort_dir=None):
LOG.info(_LI("find_domains: Calling central's find_domains."))
return self.client.call(context, 'find_domains', criterion=criterion,
def find_zones(self, context, criterion=None, marker=None, limit=None,
sort_key=None, sort_dir=None):
LOG.info(_LI("find_zones: Calling central's find_zones."))
return self.client.call(context, 'find_zones', criterion=criterion,
marker=marker, limit=limit, sort_key=sort_key,
sort_dir=sort_dir)
def find_domain(self, context, criterion=None):
LOG.info(_LI("find_domain: Calling central's find_domain."))
return self.client.call(context, 'find_domain', criterion=criterion)
def find_zone(self, context, criterion=None):
LOG.info(_LI("find_zone: Calling central's find_zone."))
return self.client.call(context, 'find_zone', criterion=criterion)
def update_domain(self, context, domain, increment_serial=True):
LOG.info(_LI("update_domain: Calling central's update_domain."))
return self.client.call(context, 'update_domain', domain=domain,
def update_zone(self, context, zone, increment_serial=True):
LOG.info(_LI("update_zone: Calling central's update_zone."))
return self.client.call(context, 'update_zone', zone=zone,
increment_serial=increment_serial)
def delete_domain(self, context, domain_id):
LOG.info(_LI("delete_domain: Calling central's delete_domain."))
return self.client.call(context, 'delete_domain', domain_id=domain_id)
def delete_zone(self, context, zone_id):
LOG.info(_LI("delete_zone: Calling central's delete_zone."))
return self.client.call(context, 'delete_zone', zone_id=zone_id)
def purge_domains(self, context, criterion, limit=None):
def purge_zones(self, context, criterion, limit=None):
LOG.info(_LI(
"purge_domains: Calling central's purge_domains."
"purge_zones: Calling central's purge_zones."
))
cctxt = self.client.prepare(version='5.6')
return cctxt.call(context, 'purge_domains',
return self.client.call(context, 'purge_zones',
criterion=criterion, limit=limit)
def count_domains(self, context, criterion=None):
LOG.info(_LI("count_domains: Calling central's count_domains."))
return self.client.call(context, 'count_domains', criterion=criterion)
def count_zones(self, context, criterion=None):
LOG.info(_LI("count_zones: Calling central's count_zones."))
return self.client.call(context, 'count_zones', criterion=criterion)
def touch_domain(self, context, domain_id):
LOG.info(_LI("touch_domain: Calling central's touch_domain."))
return self.client.call(context, 'touch_domain', domain_id=domain_id)
def touch_zone(self, context, zone_id):
LOG.info(_LI("touch_zone: Calling central's touch_zone."))
return self.client.call(context, 'touch_zone', zone_id=zone_id)
# TLD Methods
def create_tld(self, context, tld):
@ -220,14 +220,14 @@ class CentralAPI(object):
return self.client.call(context, 'delete_tld', tld_id=tld_id)
# RecordSet Methods
def create_recordset(self, context, domain_id, recordset):
def create_recordset(self, context, zone_id, recordset):
LOG.info(_LI("create_recordset: Calling central's create_recordset."))
return self.client.call(context, 'create_recordset',
domain_id=domain_id, recordset=recordset)
zone_id=zone_id, recordset=recordset)
def get_recordset(self, context, domain_id, recordset_id):
def get_recordset(self, context, zone_id, recordset_id):
LOG.info(_LI("get_recordset: Calling central's get_recordset."))
return self.client.call(context, 'get_recordset', domain_id=domain_id,
return self.client.call(context, 'get_recordset', zone_id=zone_id,
recordset_id=recordset_id)
def find_recordsets(self, context, criterion=None, marker=None, limit=None,
@ -252,11 +252,11 @@ class CentralAPI(object):
recordset=recordset,
increment_serial=increment_serial)
def delete_recordset(self, context, domain_id, recordset_id,
def delete_recordset(self, context, zone_id, recordset_id,
increment_serial=True):
LOG.info(_LI("delete_recordset: Calling central's delete_recordset."))
return self.client.call(context, 'delete_recordset',
domain_id=domain_id,
zone_id=zone_id,
recordset_id=recordset_id,
increment_serial=increment_serial)
@ -266,19 +266,19 @@ class CentralAPI(object):
criterion=criterion)
# Record Methods
def create_record(self, context, domain_id, recordset_id, record,
def create_record(self, context, zone_id, recordset_id, record,
increment_serial=True):
LOG.info(_LI("create_record: Calling central's create_record."))
return self.client.call(context, 'create_record',
domain_id=domain_id,
zone_id=zone_id,
recordset_id=recordset_id,
record=record,
increment_serial=increment_serial)
def get_record(self, context, domain_id, recordset_id, record_id):
def get_record(self, context, zone_id, recordset_id, record_id):
LOG.info(_LI("get_record: Calling central's get_record."))
return self.client.call(context, 'get_record',
domain_id=domain_id,
zone_id=zone_id,
recordset_id=recordset_id,
record_id=record_id)
@ -299,11 +299,11 @@ class CentralAPI(object):
record=record,
increment_serial=increment_serial)
def delete_record(self, context, domain_id, recordset_id, record_id,
def delete_record(self, context, zone_id, recordset_id, record_id,
increment_serial=True):
LOG.info(_LI("delete_record: Calling central's delete_record."))
return self.client.call(context, 'delete_record',
domain_id=domain_id,
zone_id=zone_id,
recordset_id=recordset_id,
record_id=record_id,
increment_serial=increment_serial)
@ -312,24 +312,24 @@ class CentralAPI(object):
LOG.info(_LI("count_records: Calling central's count_records."))
return self.client.call(context, 'count_records', criterion=criterion)
# Misc. Report combining counts for tenants, domains and records
# Misc. Report combining counts for tenants, zones and records
def count_report(self, context, criterion=None):
LOG.info(_LI("count_report: Calling central's count_report."))
return self.client.call(context, 'count_report', criterion=criterion)
# Sync Methods
def sync_domains(self, context):
LOG.info(_LI("sync_domains: Calling central's sync_domains."))
return self.client.call(context, 'sync_domains')
def sync_zones(self, context):
LOG.info(_LI("sync_zones: Calling central's sync_zones."))
return self.client.call(context, 'sync_zones')
def sync_domain(self, context, domain_id):
LOG.info(_LI("sync_domain: Calling central's sync_domains."))
return self.client.call(context, 'sync_domain', domain_id=domain_id)
def sync_zone(self, context, zone_id):
LOG.info(_LI("sync_zone: Calling central's sync_zones."))
return self.client.call(context, 'sync_zone', zone_id=zone_id)
def sync_record(self, context, domain_id, recordset_id, record_id):
def sync_record(self, context, zone_id, recordset_id, record_id):
LOG.info(_LI("sync_record: Calling central's sync_record."))
return self.client.call(context, 'sync_record',
domain_id=domain_id,
zone_id=zone_id,
recordset_id=recordset_id,
record_id=record_id)
@ -348,7 +348,7 @@ class CentralAPI(object):
return self.client.call(context, 'update_floatingip', region=region,
floatingip_id=floatingip_id, values=values)
# Blacklisted Domain Methods
# Blacklisted Zone Methods
def create_blacklist(self, context, blacklist):
LOG.info(_LI("create_blacklist: Calling central's create_blacklist"))
return self.client.call(context, 'create_blacklist',
@ -409,11 +409,11 @@ class CentralAPI(object):
return self.client.call(context, 'delete_pool', pool_id=pool_id)
# Pool Manager Integration Methods
def update_status(self, context, domain_id, status, serial):
def update_status(self, context, zone_id, status, serial):
LOG.info(_LI("update_status: Calling central's update_status "
"for %(domain_id)s : %(status)s : %(serial)s") %
{'domain_id': domain_id, 'status': status, 'serial': serial})
self.client.cast(context, 'update_status', domain_id=domain_id,
"for %(zone_id)s : %(status)s : %(serial)s") %
{'zone_id': zone_id, 'status': status, 'serial': serial})
self.client.cast(context, 'update_status', zone_id=zone_id,
status=status, serial=serial)
# Zone Ownership Transfers
@ -509,10 +509,9 @@ class CentralAPI(object):
'delete_zone_transfer_accept',
zone_transfer_accept_id=zone_transfer_accept_id)
def xfr_domain(self, context, domain_id):
LOG.info(_LI("xfr_domain: Calling central's xfr_domain"))
cctxt = self.client.prepare(version='5.3')
return cctxt.call(context, 'xfr_domain', domain_id=domain_id)
def xfr_zone(self, context, zone_id):
LOG.info(_LI("xfr_zone: Calling central's xfr_zone"))
return self.client.call(context, 'xfr_zone', zone_id=zone_id)
# Zone Import Methods
def create_zone_import(self, context, request_body):

File diff suppressed because it is too large Load Diff

View File

@ -169,7 +169,7 @@ class DesignateContext(context.RequestContext):
@abandon.setter
def abandon(self, value):
if value:
policy.check('abandon_domain', self)
policy.check('abandon_zone', self)
self._abandon = value
@property

View File

@ -281,7 +281,7 @@ def from_dnspython_zone(dnspython_zone):
'expire': soa[0].expire
}
zone = objects.Domain(**values)
zone = objects.Zone(**values)
rrsets = dnspyrecords_to_recordsetlist(dnspython_zone.nodes)
zone.recordsets = rrsets
@ -348,7 +348,7 @@ def do_axfr(zone_name, servers, timeout=None, source=None):
LOG.error(msg % log_info)
continue
except dns.exception.FormError:
msg = _LE("Domain %(name)s is not present on %(host)s."
msg = _LE("Zone %(name)s is not present on %(host)s."
"Trying next server.")
LOG.error(msg % log_info)
except socket.error:

View File

@ -179,9 +179,9 @@ class UnsupportedContentType(BadRequest):
error_type = 'unsupported_content_type'
class InvalidDomainName(Base):
class InvalidZoneName(Base):
error_code = 400
error_type = 'invalid_domain_name'
error_type = 'invalid_zone_name'
expected = True
@ -205,9 +205,9 @@ class InvalidTTL(Base):
error_type = 'invalid_ttl'
class DomainHasSubdomain(Base):
class ZoneHasSubZone(Base):
error_code = 400
error_type = 'domain_has_subdomain'
error_type = 'zone_has_sub_zone'
class Forbidden(Base):
@ -216,11 +216,11 @@ class Forbidden(Base):
expected = True
class IllegalChildDomain(Forbidden):
class IllegalChildZone(Forbidden):
error_type = 'illegal_child'
class IllegalParentDomain(Forbidden):
class IllegalParentZone(Forbidden):
error_type = 'illegal_parent'
@ -246,8 +246,8 @@ class DuplicateTsigKey(Duplicate):
error_type = 'duplicate_tsigkey'
class DuplicateDomain(Duplicate):
error_type = 'duplicate_domain'
class DuplicateZone(Duplicate):
error_type = 'duplicate_zone'
class DuplicateTld(Duplicate):
@ -278,8 +278,8 @@ class DuplicatePoolAttribute(Duplicate):
error_type = 'duplicate_pool_attribute'
class DuplicateDomainAttribute(Duplicate):
error_type = 'duplicate_domain_attribute'
class DuplicateZoneAttribute(Duplicate):
error_type = 'duplicate_zone_attribute'
class DuplicatePoolNsRecord(Duplicate):
@ -330,16 +330,16 @@ class BlacklistNotFound(NotFound):
error_type = 'blacklist_not_found'
class DomainNotFound(NotFound):
error_type = 'domain_not_found'
class ZoneNotFound(NotFound):
error_type = 'zone_not_found'
class DomainMasterNotFound(NotFound):
error_type = 'domain_master_not_found'
class ZoneMasterNotFound(NotFound):
error_type = 'zone_master_not_found'
class DomainAttributeNotFound(NotFound):
error_type = 'domain_attribute_not_found'
class ZoneAttributeNotFound(NotFound):
error_type = 'zone_attribute_not_found'
class TldNotFound(NotFound):

View File

@ -61,7 +61,7 @@ class AkamaiCommands(base.Commands):
client = impl_akamai.EnhancedDNSClient(
target.options.get("username"), target.options.get("password"))
zone = self.central_api.find_domain(self.context, {"name": zone_name})
zone = self.central_api.find_zone(self.context, {"name": zone_name})
akamai_zone = client.getZone(zone_name)
print("Designate zone\n%s" % pformat(zone.to_dict()))
@ -70,7 +70,7 @@ class AkamaiCommands(base.Commands):
@base.args('pool-id', help="Pool to Sync", type=str)
@base.args('pool-target-id', help="Pool Target to Sync", type=str)
@base.args('--batch-size', default=20, type=int)
def sync_domains(self, pool_id, pool_target_id, batch_size):
def sync_zones(self, pool_id, pool_target_id, batch_size):
pool, target = self._get_config(pool_id, pool_target_id)
client = impl_akamai.EnhancedDNSClient(
@ -82,7 +82,7 @@ class AkamaiCommands(base.Commands):
marker = None
while (marker is not False):
zones = self.central_api.find_domains(
zones = self.central_api.find_zones(
self.context, criterion, limit=batch_size, marker=marker)
update = []

View File

@ -116,8 +116,8 @@ class RequestHandler(xfr.XFRMixin):
}
try:
domain = self.storage.find_domain(context, criterion)
except exceptions.DomainNotFound:
zone = self.storage.find_zone(context, criterion)
except exceptions.ZoneNotFound:
response.set_rcode(dns.rcode.NOTAUTH)
yield response
raise StopIteration
@ -127,11 +127,11 @@ class RequestHandler(xfr.XFRMixin):
# We check if the src_master which is the assumed master for the zone
# that is sending this NOTIFY OP is actually the master. If it's not
# We'll reply but don't do anything with the NOTIFY.
master_addr = domain.get_master_by_ip(notify_addr)
master_addr = zone.get_master_by_ip(notify_addr)
if not master_addr:
msg = _LW("NOTIFY for %(name)s from non-master server "
"%(addr)s, ignoring.")
LOG.warn(msg % {"name": domain.name, "addr": notify_addr})
LOG.warn(msg % {"name": zone.name, "addr": notify_addr})
response.set_rcode(dns.rcode.REFUSED)
yield response
raise StopIteration
@ -140,17 +140,17 @@ class RequestHandler(xfr.XFRMixin):
# According to RFC we should query the server that sent the NOTIFY
resolver.nameservers = [notify_addr]
soa_answer = resolver.query(domain.name, 'SOA')
soa_answer = resolver.query(zone.name, 'SOA')
soa_serial = soa_answer[0].serial
if soa_serial == domain.serial:
if soa_serial == zone.serial:
msg = _LI("Serial %(serial)s is the same for master and us for "
"%(domain_id)s")
LOG.info(msg % {"serial": soa_serial, "domain_id": domain.id})
"%(zone_id)s")
LOG.info(msg % {"serial": soa_serial, "zone_id": zone.id})
else:
msg = _LI("Scheduling AXFR for %(domain_id)s from %(master_addr)s")
info = {"domain_id": domain.id, "master_addr": master_addr}
msg = _LI("Scheduling AXFR for %(zone_id)s from %(master_addr)s")
info = {"zone_id": zone.id, "master_addr": master_addr}
LOG.info(msg % info)
self.tg.add_thread(self.domain_sync, context, domain,
self.tg.add_thread(self.zone_sync, context, zone,
[master_addr])
response.flags |= dns.flags.AA
@ -170,7 +170,7 @@ class RequestHandler(xfr.XFRMixin):
return response
def _domain_criterion_from_request(self, request, criterion=None):
def _zone_criterion_from_request(self, request, criterion=None):
"""Builds a bare criterion dict based on the request attributes"""
criterion = criterion or {}
@ -197,12 +197,12 @@ class RequestHandler(xfr.XFRMixin):
return criterion
def _convert_to_rrset(self, domain, recordset):
# Fetch the domain or the config ttl if the recordset ttl is null
def _convert_to_rrset(self, zone, recordset):
# Fetch the zone or the config ttl if the recordset ttl is null
if recordset.ttl:
ttl = recordset.ttl
else:
ttl = domain.ttl
ttl = zone.ttl
# construct rdata from all the records
rdata = []
@ -233,12 +233,12 @@ class RequestHandler(xfr.XFRMixin):
# TODO(vinod) once validation is separated from the api,
# validate the parameters
try:
criterion = self._domain_criterion_from_request(
criterion = self._zone_criterion_from_request(
request, {'name': q_rrset.name.to_text()})
domain = self.storage.find_domain(context, criterion)
zone = self.storage.find_zone(context, criterion)
except exceptions.DomainNotFound:
LOG.warning(_LW("DomainNotFound while handling axfr request. "
except exceptions.ZoneNotFound:
LOG.warning(_LW("ZoneNotFound while handling axfr request. "
"Question was %(qr)s") % {'qr': q_rrset})
yield self._handle_query_error(request, dns.rcode.REFUSED)
@ -252,11 +252,11 @@ class RequestHandler(xfr.XFRMixin):
raise StopIteration
# The AXFR response needs to have a SOA at the beginning and end.
criterion = {'domain_id': domain.id, 'type': 'SOA'}
criterion = {'zone_id': zone.id, 'type': 'SOA'}
soa_records = self.storage.find_recordsets_axfr(context, criterion)
# Get all the records other than SOA
criterion = {'domain_id': domain.id, 'type': '!SOA'}
criterion = {'zone_id': zone.id, 'type': '!SOA'}
records = self.storage.find_recordsets_axfr(context, criterion)
# Place the SOA RRSet at the front and end of the RRSet list
@ -296,7 +296,7 @@ class RequestHandler(xfr.XFRMixin):
# Build a DNSPython RRSet from the RR
rrset = dns.rrset.from_text_list(
str(record[3]), # name
int(record[2]) if record[2] is not None else domain.ttl, # ttl
int(record[2]) if record[2] is not None else zone.ttl, # ttl
dns.rdataclass.IN, # class
str(record[1]), # rrtype
[str(record[4])], # rdata
@ -309,10 +309,10 @@ class RequestHandler(xfr.XFRMixin):
if renderer.counts[dns.renderer.ANSWER] == 0:
# We've received a TooBig from the first attempted RRSet in
# this packet. Log a warning and abort the AXFR.
LOG.warning(_LW('Aborted AXFR of %(domain)s, a single RR '
LOG.warning(_LW('Aborted AXFR of %(zone)s, a single RR '
'(%(rrset_type)s %(rrset_name)s) '
'exceeded the max message size.'),
{'domain': domain.name,
{'zone': zone.name,
'rrset_type': record[1],
'rrset_name': record[3]})
@ -356,17 +356,17 @@ class RequestHandler(xfr.XFRMixin):
criterion = {
'name': q_rrset.name.to_text(),
'type': dns.rdatatype.to_text(q_rrset.rdtype),
'domains_deleted': False
'zones_deleted': False
}
recordset = self.storage.find_recordset(context, criterion)
try:
criterion = self._domain_criterion_from_request(
request, {'id': recordset.domain_id})
domain = self.storage.find_domain(context, criterion)
criterion = self._zone_criterion_from_request(
request, {'id': recordset.zone_id})
zone = self.storage.find_zone(context, criterion)
except exceptions.DomainNotFound:
LOG.warning(_LW("DomainNotFound while handling query request"
except exceptions.ZoneNotFound:
LOG.warning(_LW("ZoneNotFound while handling query request"
". Question was %(qr)s") % {'qr': q_rrset})
yield self._handle_query_error(request, dns.rcode.REFUSED)
@ -379,7 +379,7 @@ class RequestHandler(xfr.XFRMixin):
yield self._handle_query_error(request, dns.rcode.REFUSED)
raise StopIteration
r_rrset = self._convert_to_rrset(domain, recordset)
r_rrset = self._convert_to_rrset(zone, recordset)
response.set_rcode(dns.rcode.NOERROR)
response.answer = [r_rrset]
# For all the data stored in designate mdns is Authoritative
@ -396,9 +396,9 @@ class RequestHandler(xfr.XFRMixin):
# However, an authoritative nameserver shouldn't return NXDOMAIN
# for a zone it isn't authoritative for. It would be more
# appropriate for it to return REFUSED. It should still return
# NXDOMAIN if it is authoritative for a domain but the FQDN doesn't
# NXDOMAIN if it is authoritative for a zone but the FQDN doesn't
# exist, like abcdef.rackspace.com. Of course, a wildcard within a
# domain would mean that NXDOMAIN isn't ever returned for a domain.
# zone would mean that NXDOMAIN isn't ever returned for a zone.
#
# To simply things currently this returns a REFUSED in all cases.
# If zone transfers needs different errors, we could revisit this.

View File

@ -40,11 +40,11 @@ class NotifyEndpoint(base.BaseEndpoint):
RPC_API_VERSION = '2.0'
RPC_API_NAMESPACE = 'notify'
def notify_zone_changed(self, context, domain, host, port, timeout,
def notify_zone_changed(self, context, zone, host, port, timeout,
retry_interval, max_retries, delay):
"""
:param context: The user context.
:param domain: The designate domain object. This contains the domain
:param zone: The designate zone object. This contains the zone
name.
:param host: A notify is sent to this host.
:param port: A notify is sent to this port.
@ -61,15 +61,15 @@ class NotifyEndpoint(base.BaseEndpoint):
"""
time.sleep(delay)
return self._make_and_send_dns_message(
domain, host, port, timeout, retry_interval, max_retries,
zone, host, port, timeout, retry_interval, max_retries,
notify=True)
def poll_for_serial_number(self, context, domain, nameserver, timeout,
def poll_for_serial_number(self, context, zone, nameserver, timeout,
retry_interval, max_retries, delay):
"""
:param context: The user context.
:param domain: The designate domain object. This contains the domain
name. domain.serial = expected_serial
:param zone: The designate zone object. This contains the zone
name. zone.serial = expected_serial
:param nameserver: Destination for the poll
:param timeout: The time (in seconds) to wait for a SOA response from
nameserver.
@ -81,17 +81,17 @@ class NotifyEndpoint(base.BaseEndpoint):
:return: The pool manager is informed of the status with update_status.
"""
(status, actual_serial, retries) = self.get_serial_number(
context, domain, nameserver.host, nameserver.port, timeout,
context, zone, nameserver.host, nameserver.port, timeout,
retry_interval, max_retries, delay)
self.pool_manager_api.update_status(
context, domain, nameserver, status, actual_serial)
context, zone, nameserver, status, actual_serial)
def get_serial_number(self, context, domain, host, port, timeout,
def get_serial_number(self, context, zone, host, port, timeout,
retry_interval, max_retries, delay):
"""
:param context: The user context.
:param domain: The designate domain object. This contains the domain
name. domain.serial = expected_serial
:param zone: The designate zone object. This contains the zone
name. zone.serial = expected_serial
:param host: A notify is sent to this host.
:param port: A notify is sent to this port.
:param timeout: The time (in seconds) to wait for a SOA response from
@ -115,26 +115,26 @@ class NotifyEndpoint(base.BaseEndpoint):
time.sleep(delay)
while (True):
(response, retry) = self._make_and_send_dns_message(
domain, host, port, timeout, retry_interval, retries)
zone, host, port, timeout, retry_interval, retries)
if response and response.rcode() in (
dns.rcode.NXDOMAIN, dns.rcode.REFUSED, dns.rcode.SERVFAIL):
status = 'NO_DOMAIN'
elif response and len(response.answer) == 1 \
and str(response.answer[0].name) == str(domain.name) \
and str(response.answer[0].name) == str(zone.name) \
and response.answer[0].rdclass == dns.rdataclass.IN \
and response.answer[0].rdtype == dns.rdatatype.SOA:
# parse the SOA response and get the serial number
rrset = response.answer[0]
actual_serial = rrset.to_rdataset().items[0].serial
if actual_serial is None or actual_serial < domain.serial:
if actual_serial is None or actual_serial < zone.serial:
# TODO(vinod): Account for serial number wrap around.
retries = retries - retry
LOG.warn(_LW("Got lower serial for '%(zone)s' to '%(host)s:"
"%(port)s'. Expected:'%(es)d'. Got:'%(as)s'."
"Retries left='%(retries)d'") %
{'zone': domain.name, 'host': host,
'port': port, 'es': domain.serial,
{'zone': zone.name, 'host': host,
'port': port, 'es': zone.serial,
'as': actual_serial, 'retries': retries})
if retries > 0:
# retry again
@ -150,10 +150,10 @@ class NotifyEndpoint(base.BaseEndpoint):
# Return retries for testing purposes.
return (status, actual_serial, retries)
def _make_and_send_dns_message(self, domain, host, port, timeout,
def _make_and_send_dns_message(self, zone, host, port, timeout,
retry_interval, max_retries, notify=False):
"""
:param domain: The designate domain object. This contains the domain
:param zone: The designate zone object. This contains the zone
name.
:param host: The destination host for the dns message.
:param port: The destination port for the dns message.
@ -168,7 +168,7 @@ class NotifyEndpoint(base.BaseEndpoint):
response is the response on success or None on failure.
current_retry is the current retry number
"""
dns_message = self._make_dns_message(domain.name, notify=notify)
dns_message = self._make_dns_message(zone.name, notify=notify)
retry = 0
response = None
@ -178,7 +178,7 @@ class NotifyEndpoint(base.BaseEndpoint):
LOG.info(_LI("Sending '%(msg)s' for '%(zone)s' to '%(server)s:"
"%(port)d'.") %
{'msg': 'NOTIFY' if notify else 'SOA',
'zone': domain.name, 'server': host,
'zone': zone.name, 'server': host,
'port': port})
response = self._send_dns_message(
dns_message, host, port, timeout)
@ -188,7 +188,7 @@ class NotifyEndpoint(base.BaseEndpoint):
"'%(zone)s' to '%(server)s:%(port)d'. Timeout="
"'%(timeout)d' seconds. Retry='%(retry)d'") %
{'msg': 'NOTIFY' if notify else 'SOA',
'zone': domain.name, 'server': host,
'zone': zone.name, 'server': host,
'port': port, 'timeout': timeout,
'retry': retry})
response = None
@ -200,7 +200,7 @@ class NotifyEndpoint(base.BaseEndpoint):
"for '%(zone)s' to '%(server)s:%(port)d'. Timeout"
"='%(timeout)d' seconds. Retry='%(retry)d'") %
{'msg': 'NOTIFY' if notify else 'SOA',
'zone': domain.name, 'server': host,
'zone': zone.name, 'server': host,
'port': port, 'timeout': timeout,
'retry': retry})
response = None
@ -210,7 +210,7 @@ class NotifyEndpoint(base.BaseEndpoint):
elif response.rcode() in (dns.rcode.NXDOMAIN, dns.rcode.REFUSED,
dns.rcode.SERVFAIL):
LOG.info(_LI("%(zone)s not found on %(server)s:%(port)d") %
{'zone': domain.name, 'server': host,
{'zone': zone.name, 'server': host,
'port': port})
break
elif not (response.flags & dns.flags.AA) or dns.rcode.from_flags(
@ -219,7 +219,7 @@ class NotifyEndpoint(base.BaseEndpoint):
"send '%(msg)s' for '%(zone)s' to '%(server)s:"
"%(port)d'.\nResponse message:\n%(resp)s\n") %
{'msg': 'NOTIFY' if notify else 'SOA',
'zone': domain.name, 'server': host,
'zone': zone.name, 'server': host,
'port': port, 'resp': str(response)})
response = None
break

View File

@ -69,51 +69,51 @@ class MdnsAPI(object):
MDNS_API = cls()
return MDNS_API
def notify_zone_changed(self, context, domain, host, port, timeout,
def notify_zone_changed(self, context, zone, host, port, timeout,
retry_interval, max_retries, delay):
LOG.info(_LI("notify_zone_changed: Calling mdns for zone '%(zone)s', "
"serial '%(serial)s' to nameserver '%(host)s:%(port)s'") %
{'zone': domain.name, 'serial': domain.serial,
{'zone': zone.name, 'serial': zone.serial,
'host': host, 'port': port})
# The notify_zone_changed method is a cast rather than a call since the
# caller need not wait for the notify to complete.
return self.notify_client.cast(
context, 'notify_zone_changed', domain=domain,
context, 'notify_zone_changed', zone=zone,
host=host, port=port, timeout=timeout,
retry_interval=retry_interval, max_retries=max_retries,
delay=delay)
def poll_for_serial_number(self, context, domain, nameserver, timeout,
def poll_for_serial_number(self, context, zone, nameserver, timeout,
retry_interval, max_retries, delay):
LOG.info(
_LI("poll_for_serial_number: Calling mdns for zone '%(zone)s', "
"serial '%(serial)s' on nameserver '%(host)s:%(port)s'") %
{'zone': domain.name, 'serial': domain.serial,
{'zone': zone.name, 'serial': zone.serial,
'host': nameserver.host, 'port': nameserver.port})
# The poll_for_serial_number method is a cast rather than a call since
# the caller need not wait for the poll to complete. Mdns informs pool
# manager of the return value using update_status
return self.notify_client.cast(
context, 'poll_for_serial_number', domain=domain,
context, 'poll_for_serial_number', zone=zone,
nameserver=nameserver, timeout=timeout,
retry_interval=retry_interval, max_retries=max_retries,
delay=delay)
def get_serial_number(self, context, domain, host, port, timeout,
def get_serial_number(self, context, zone, host, port, timeout,
retry_interval, max_retries, delay):
LOG.info(
_LI("get_serial_number: Calling mdns for zone '%(zone)s', serial "
"%(serial)s' on nameserver '%(host)s:%(port)s'") %
{'zone': domain.name, 'serial': domain.serial,
{'zone': zone.name, 'serial': zone.serial,
'host': host, 'port': port})
cctxt = self.notify_client.prepare()
return cctxt.call(
context, 'get_serial_number', domain=domain,
context, 'get_serial_number', zone=zone,
host=host, port=port, timeout=timeout,
retry_interval=retry_interval, max_retries=max_retries,
delay=delay)
def perform_zone_xfr(self, context, domain):
def perform_zone_xfr(self, context, zone):
LOG.info(_LI("perform_zone_xfr: Calling mdns for zone %(zone)s") %
{"zone": domain.name})
return self.xfr_client.cast(context, 'perform_zone_xfr', domain=domain)
{"zone": zone.name})
return self.xfr_client.cast(context, 'perform_zone_xfr', zone=zone)

View File

@ -29,29 +29,29 @@ class XFRMixin(object):
"""
Utility mixin that holds common methods for XFR functionality.
"""
def domain_sync(self, context, domain, servers=None):
servers = servers or domain.masters
def zone_sync(self, context, zone, servers=None):
servers = servers or zone.masters
servers = servers.to_list()
timeout = cfg.CONF["service:mdns"].xfr_timeout
try:
dnspython_zone = dnsutils.do_axfr(domain.name, servers,
dnspython_zone = dnsutils.do_axfr(zone.name, servers,
timeout=timeout)
except exceptions.XFRFailure as e:
LOG.warning(e.message)
return
zone = dnsutils.from_dnspython_zone(dnspython_zone)
domain.update(zone)
zone.update(zone)
domain.transferred_at = timeutils.utcnow()
zone.transferred_at = timeutils.utcnow()
self.central_api.update_domain(context, domain, increment_serial=False)
self.central_api.update_zone(context, zone, increment_serial=False)
class XfrEndpoint(base.BaseEndpoint, XFRMixin):
RPC_API_VERSION = '1.0'
RPC_API_NAMESPACE = 'xfr'
def perform_zone_xfr(self, context, domain):
self.domain_sync(context, domain)
def perform_zone_xfr(self, context, zone):
self.zone_sync(context, zone)

View File

@ -57,14 +57,14 @@ class NotificationHandler(ExtensionPlugin):
def process_notification(self, context, event_type, payload):
"""Processes a given notification"""
def get_domain(self, domain_id):
def get_zone(self, zone_id):
"""
Return the domain for this context
Return the zone for this context
"""
context = DesignateContext.get_admin_context(all_tenants=True)
return self.central_api.get_domain(context, domain_id)
return self.central_api.get_zone(context, zone_id)
def _find_or_create_recordset(self, context, domain_id, name, type,
def _find_or_create_recordset(self, context, zone_id, name, type,
ttl=None):
name = name.encode('idna').decode('utf-8')
@ -76,12 +76,12 @@ class NotificationHandler(ExtensionPlugin):
'ttl': ttl,
}
recordset = self.central_api.create_recordset(
context, domain_id, RecordSet(**values))
context, zone_id, RecordSet(**values))
except exceptions.DuplicateRecordSet:
# Fetch the existing recordset
recordset = self.central_api.find_recordset(context, {
'domain_id': domain_id,
'zone_id': zone_id,
'name': name,
'type': type,
})
@ -106,7 +106,7 @@ class BaseAddressHandler(NotificationHandler):
data["octet%s" % i] = ip_data[i]
return data
def _create(self, addresses, extra, domain_id, managed=True,
def _create(self, addresses, extra, zone_id, managed=True,
resource_type=None, resource_id=None):
"""
Create a a record from addresses
@ -123,13 +123,13 @@ class BaseAddressHandler(NotificationHandler):
'Deprecation notice: Unmanaged designate-sink records are '
'being deprecated please update the call '
'to remove managed=False'))
LOG.debug('Using DomainID: %s' % domain_id)
domain = self.get_domain(domain_id)
LOG.debug('Domain: %r' % domain)
LOG.debug('Using Zone ID: %s' % zone_id)
zone = self.get_zone(zone_id)
LOG.debug('Zone: %r' % zone)
data = extra.copy()
LOG.debug('Event data: %s' % data)
data['domain'] = domain['name']
data['zone'] = zone['name']
context = DesignateContext().elevated()
context.all_tenants = True
@ -141,7 +141,7 @@ class BaseAddressHandler(NotificationHandler):
for fmt in cfg.CONF[self.name].get('format'):
recordset_values = {
'domain_id': domain['id'],
'zone_id': zone['id'],
'name': fmt % event_data,
'type': 'A' if addr['version'] == 4 else 'AAAA'}
@ -160,16 +160,16 @@ class BaseAddressHandler(NotificationHandler):
'managed_resource_id': resource_id})
LOG.debug('Creating record in %s / %s with values %r' %
(domain['id'], recordset['id'], record_values))
(zone['id'], recordset['id'], record_values))
self.central_api.create_record(context,
domain['id'],
zone['id'],
recordset['id'],
Record(**record_values))
def _delete(self, domain_id, managed=True, resource_id=None,
def _delete(self, zone_id, managed=True, resource_id=None,
resource_type='instance', criterion=None):
"""
Handle a generic delete of a fixed ip within a domain
Handle a generic delete of a fixed ip within a zone
:param criterion: Criterion to search and destroy records
"""
@ -184,7 +184,7 @@ class BaseAddressHandler(NotificationHandler):
context.all_tenants = True
context.edit_managed_records = True
criterion.update({'domain_id': domain_id})
criterion.update({'zone_id': zone_id})
if managed:
criterion.update({
@ -201,6 +201,6 @@ class BaseAddressHandler(NotificationHandler):
LOG.debug('Deleting record %s' % record['id'])
self.central_api.delete_record(context,
domain_id,
zone_id,
record['recordset_id'],
record['id'])

View File

@ -29,9 +29,9 @@ cfg.CONF.register_group(cfg.OptGroup(
cfg.CONF.register_opts([
cfg.ListOpt('notification-topics', default=['notifications']),
cfg.StrOpt('control-exchange', default='neutron'),
cfg.StrOpt('domain-id'),
cfg.StrOpt('zone-id'),
cfg.MultiStrOpt('format', default=[
'%(octet0)s-%(octet1)s-%(octet2)s-%(octet3)s.%(domain)s'])
'%(octet0)s-%(octet1)s-%(octet2)s-%(octet3)s.%(zone)s'])
], group='handler:neutron_floatingip')
@ -56,9 +56,9 @@ class NeutronFloatingHandler(BaseAddressHandler):
LOG.debug('%s received notification - %s' %
(self.get_canonical_name(), event_type))
domain_id = cfg.CONF[self.name].domain_id
zone_id = cfg.CONF[self.name].zone_id
if event_type.startswith('floatingip.delete'):
self._delete(domain_id=domain_id,
self._delete(zone_id=zone_id,
resource_id=payload['floatingip_id'],
resource_type='floatingip')
elif event_type.startswith('floatingip.update'):
@ -69,10 +69,10 @@ class NeutronFloatingHandler(BaseAddressHandler):
}
self._create(addresses=[address],
extra=payload['floatingip'],
domain_id=domain_id,
zone_id=zone_id,
resource_id=payload['floatingip']['id'],
resource_type='floatingip')
elif not payload['floatingip']['fixed_ip_address']:
self._delete(domain_id=domain_id,
self._delete(zone_id=zone_id,
resource_id=payload['floatingip']['id'],
resource_type='floatingip')

View File

@ -29,9 +29,9 @@ cfg.CONF.register_group(cfg.OptGroup(
cfg.CONF.register_opts([
cfg.ListOpt('notification-topics', default=['notifications']),
cfg.StrOpt('control-exchange', default='nova'),
cfg.StrOpt('domain-id'),
cfg.StrOpt('zone-id'),
cfg.MultiStrOpt('format', default=[
'%(octet0)s-%(octet1)s-%(octet2)s-%(octet3)s.%(domain)s'])
'%(octet0)s-%(octet1)s-%(octet2)s-%(octet3)s.%(zone)s'])
], group='handler:nova_fixed')
@ -60,15 +60,15 @@ class NovaFixedHandler(BaseAddressHandler):
def process_notification(self, context, event_type, payload):
LOG.debug('NovaFixedHandler received notification - %s' % event_type)
domain_id = cfg.CONF[self.name].domain_id
zone_id = cfg.CONF[self.name].zone_id
if event_type == 'compute.instance.create.end':
self._create(addresses=payload['fixed_ips'],
extra=payload,
domain_id=domain_id,
zone_id=zone_id,
resource_id=payload['instance_id'],
resource_type='instance')
elif event_type == 'compute.instance.delete.start':
self._delete(domain_id=domain_id,
self._delete(zone_id=zone_id,
resource_id=payload['instance_id'],
resource_type='instance')

View File

@ -12,16 +12,14 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from designate.objects.base import DesignateObject # noqa
from designate.objects.base import DictObjectMixin # noqa
from designate.objects.base import ListObjectMixin # noqa
from designate.objects.base import PagedListObjectMixin # noqa
from designate.objects.blacklist import Blacklist, BlacklistList # noqa
from designate.objects.domain import Domain, DomainList # noqa
from designate.objects.domain_attribute import DomainAttribute, DomainAttributeList # noqa
from designate.objects.domain_master import DomainMaster, DomainMasterList # noqa
from designate.objects.zone import Zone, ZoneList # noqa
from designate.objects.zone_attribute import ZoneAttribute, ZoneAttributeList # noqa
from designate.objects.zone_master import ZoneMaster, ZoneMasterList # noqa
from designate.objects.floating_ip import FloatingIP, FloatingIPList # noqa
from designate.objects.pool_manager_status import PoolManagerStatus, PoolManagerStatusList # noqa
from designate.objects.pool import Pool, PoolList # noqa

View File

@ -15,8 +15,8 @@
from designate.objects.adapters.base import DesignateAdapter # noqa
# API v2
from designate.objects.adapters.api_v2.blacklist import BlacklistAPIv2Adapter, BlacklistListAPIv2Adapter # noqa
from designate.objects.adapters.api_v2.domain import DomainAPIv2Adapter, DomainListAPIv2Adapter # noqa
from designate.objects.adapters.api_v2.domain_master import DomainMasterAPIv2Adapter, DomainMasterListAPIv2Adapter # noqa
from designate.objects.adapters.api_v2.zone import ZoneAPIv2Adapter, ZoneListAPIv2Adapter # noqa
from designate.objects.adapters.api_v2.zone_master import ZoneMasterAPIv2Adapter, ZoneMasterListAPIv2Adapter # noqa
from designate.objects.adapters.api_v2.floating_ip import FloatingIPAPIv2Adapter, FloatingIPListAPIv2Adapter # noqa
from designate.objects.adapters.api_v2.record import RecordAPIv2Adapter, RecordListAPIv2Adapter # noqa
from designate.objects.adapters.api_v2.recordset import RecordSetAPIv2Adapter, RecordSetListAPIv2Adapter # noqa

View File

@ -27,7 +27,7 @@ class RecordSetAPIv2Adapter(base.APIv2Adapter):
'fields': {
"id": {},
"zone_id": {
'rename': 'domain_id'
'rename': 'zone_id'
},
"name": {
'immutable': True

View File

@ -19,9 +19,9 @@ from designate import objects
LOG = logging.getLogger(__name__)
class DomainAPIv2Adapter(base.APIv2Adapter):
class ZoneAPIv2Adapter(base.APIv2Adapter):
ADAPTER_OBJECT = objects.Domain
ADAPTER_OBJECT = objects.Zone
MODIFICATIONS = {
'fields': {
@ -69,18 +69,18 @@ class DomainAPIv2Adapter(base.APIv2Adapter):
object.masters = objects.adapters.DesignateAdapter.parse(
cls.ADAPTER_FORMAT,
values['masters'],
objects.DomainMasterList(),
objects.ZoneMasterList(),
*args, **kwargs)
del values['masters']
return super(DomainAPIv2Adapter, cls)._parse_object(
return super(ZoneAPIv2Adapter, cls)._parse_object(
values, object, *args, **kwargs)
class DomainListAPIv2Adapter(base.APIv2Adapter):
class ZoneListAPIv2Adapter(base.APIv2Adapter):
ADAPTER_OBJECT = objects.DomainList
ADAPTER_OBJECT = objects.ZoneList
MODIFICATIONS = {
'options': {

View File

@ -31,7 +31,7 @@ class ZoneExportAPIv2Adapter(base.APIv2Adapter):
"message": {},
"location": {},
"zone_id": {
'rename': 'domain_id',
'rename': 'zone_id',
},
"project_id": {
'rename': 'tenant_id'

View File

@ -30,7 +30,7 @@ class ZoneImportAPIv2Adapter(base.APIv2Adapter):
"status": {},
"message": {},
"zone_id": {
'rename': 'domain_id',
'rename': 'zone_id',
},
"project_id": {
'rename': 'tenant_id'

View File

@ -19,9 +19,9 @@ from designate import utils
LOG = logging.getLogger(__name__)
class DomainMasterAPIv2Adapter(base.APIv2Adapter):
class ZoneMasterAPIv2Adapter(base.APIv2Adapter):
ADAPTER_OBJECT = objects.DomainMaster
ADAPTER_OBJECT = objects.ZoneMaster
MODIFICATIONS = {
'fields': {
@ -49,9 +49,9 @@ class DomainMasterAPIv2Adapter(base.APIv2Adapter):
return object
class DomainMasterListAPIv2Adapter(base.APIv2Adapter):
class ZoneMasterListAPIv2Adapter(base.APIv2Adapter):
ADAPTER_OBJECT = objects.DomainMasterList
ADAPTER_OBJECT = objects.ZoneMasterList
MODIFICATIONS = {
'options': {

View File

@ -36,7 +36,7 @@ class ZoneTransferAcceptAPIv2Adapter(base.APIv2Adapter):
},
"status": {},
"zone_id": {
'rename': 'domain_id',
'rename': 'zone_id',
},
"created_at": {},
"updated_at": {},

View File

@ -30,7 +30,6 @@ class ZoneTransferRequestAPIv2Adapter(base.APIv2Adapter):
'protected': False
},
"zone_id": {
'rename': 'domain_id',
'immutable': True,
'protected': False
},
@ -50,7 +49,6 @@ class ZoneTransferRequestAPIv2Adapter(base.APIv2Adapter):
'protected': False
},
"zone_name": {
'rename': 'domain_name',
'protected': False
},
"created_at": {},

View File

@ -209,7 +209,7 @@ class DesignateAdapter(object):
##############################################################
# Check if the field should be allowed change after it is
# initially set (eg domain name)
# initially set (eg zone name)
if cls.MODIFICATIONS['fields'][key].get('immutable', False):
if getattr(output_object, obj_key, False) and \
getattr(output_object, obj_key) != value:

View File

@ -26,7 +26,7 @@ class PoolManagerStatus(base.DictObjectMixin, base.PersistentObjectMixin,
},
'required': True
},
'domain_id': {
'zone_id': {
'schema': {
'type': 'string',
'format': 'uuid',
@ -54,7 +54,7 @@ class PoolManagerStatus(base.DictObjectMixin, base.PersistentObjectMixin,
}
STRING_KEYS = [
'id', 'action', 'status', 'server_id', 'domain_id'
'id', 'action', 'status', 'server_id', 'zone_id'
]

View File

@ -28,7 +28,7 @@ class Record(base.DictObjectMixin, base.PersistentObjectMixin,
}
},
'data': {},
'domain_id': {
'zone_id': {
'schema': {
'type': 'string',
'format': 'uuid',

View File

@ -84,7 +84,7 @@ class RecordSet(base.DictObjectMixin, base.PersistentObjectMixin,
},
'read_only': True
},
'domain_id': {
'zone_id': {
'schema': {
'type': 'string',
'description': 'Zone identifier',
@ -277,7 +277,7 @@ class RecordSet(base.DictObjectMixin, base.PersistentObjectMixin,
self.records = old_records
STRING_KEYS = [
'id', 'type', 'name', 'domain_id'
'id', 'type', 'name', 'zone_id'
]

View File

@ -18,8 +18,8 @@ from designate.objects import base
class Tenant(base.DictObjectMixin, base.DesignateObject):
FIELDS = {
'id': {},
'domain_count': {},
'domains': {}
'zone_count': {},
'zones': {}
}
STRING_KEYS = [

View File

@ -19,8 +19,8 @@ from designate.objects.validation_error import ValidationError
from designate.objects.validation_error import ValidationErrorList
class Domain(base.DictObjectMixin, base.SoftDeleteObjectMixin,
base.PersistentObjectMixin, base.DesignateObject):
class Zone(base.DictObjectMixin, base.SoftDeleteObjectMixin,
base.PersistentObjectMixin, base.DesignateObject):
FIELDS = {
'shard': {
'schema': {
@ -93,7 +93,7 @@ class Domain(base.DictObjectMixin, base.SoftDeleteObjectMixin,
},
'read_only': True
},
'parent_domain_id': {
'parent_zone_id': {
'schema': {
'type': ['string', 'null'],
'format': 'uuid'
@ -141,11 +141,11 @@ class Domain(base.DictObjectMixin, base.SoftDeleteObjectMixin,
},
'attributes': {
'relation': True,
'relation_cls': 'DomainAttributeList'
'relation_cls': 'ZoneAttributeList'
},
'masters': {
'relation': True,
'relation_cls': 'DomainMasterList'
'relation_cls': 'ZoneMasterList'
},
'type': {
'schema': {
@ -169,7 +169,7 @@ class Domain(base.DictObjectMixin, base.SoftDeleteObjectMixin,
def get_master_by_ip(self, host):
"""
Utility to get the master by it's ip for this domain.
Utility to get the master by it's ip for this zone.
"""
for srv in self.masters:
srv_host, _ = utils.split_host_port(srv.to_data())
@ -224,7 +224,7 @@ class Domain(base.DictObjectMixin, base.SoftDeleteObjectMixin,
errors.append(e)
self._raise(errors)
super(Domain, self).validate()
super(Zone, self).validate()
except exceptions.RelationNotLoaded as ex:
errors = ValidationErrorList()
e = ValidationError()
@ -236,6 +236,6 @@ class Domain(base.DictObjectMixin, base.SoftDeleteObjectMixin,
self._raise(errors)
class DomainList(base.ListObjectMixin, base.DesignateObject,
base.PagedListObjectMixin):
LIST_ITEM_TYPE = Domain
class ZoneList(base.ListObjectMixin, base.DesignateObject,
base.PagedListObjectMixin):
LIST_ITEM_TYPE = Zone

View File

@ -16,18 +16,18 @@
from designate.objects import base
class DomainAttribute(base.DictObjectMixin, base.PersistentObjectMixin,
base.DesignateObject):
class ZoneAttribute(base.DictObjectMixin, base.PersistentObjectMixin,
base.DesignateObject):
FIELDS = {
'domain_id': {},
'zone_id': {},
'key': {},
'value': {}
}
STRING_KEYS = [
'id', 'key', 'value', 'domain_id'
'id', 'key', 'value', 'zone_id'
]
class DomainAttributeList(base.ListObjectMixin, base.DesignateObject):
LIST_ITEM_TYPE = DomainAttribute
class ZoneAttributeList(base.ListObjectMixin, base.DesignateObject):
LIST_ITEM_TYPE = ZoneAttribute

View File

@ -53,7 +53,7 @@ class ZoneExport(base.DictObjectMixin, base.PersistentObjectMixin,
},
'read_only': True
},
'domain_id': {
'zone_id': {
'schema': {
"type": "string",
"format": "uuid"

View File

@ -46,7 +46,7 @@ class ZoneImport(base.DictObjectMixin, base.PersistentObjectMixin,
},
'read_only': True
},
'domain_id': {
'zone_id': {
'schema': {
"type": "string",
"format": "uuid"

View File

@ -17,10 +17,10 @@ from designate.objects import base
from designate import utils
class DomainMaster(base.DictObjectMixin, base.PersistentObjectMixin,
base.DesignateObject):
class ZoneMaster(base.DictObjectMixin, base.PersistentObjectMixin,
base.DesignateObject):
FIELDS = {
'domain_id': {},
'zone_id': {},
'host': {
'schema': {
'type': 'string',
@ -47,8 +47,8 @@ class DomainMaster(base.DictObjectMixin, base.PersistentObjectMixin,
return cls.from_dict({"host": host, "port": port})
class DomainMasterList(base.ListObjectMixin, base.DesignateObject):
LIST_ITEM_TYPE = DomainMaster
class ZoneMasterList(base.ListObjectMixin, base.DesignateObject):
LIST_ITEM_TYPE = ZoneMaster
def to_data(self):
rlist = []

View File

@ -46,7 +46,7 @@ class ZoneTransferAccept(base.DictObjectMixin, base.PersistentObjectMixin,
},
'required': True
},
'domain_id': {
'zone_id': {
'schema': {
"type": "string",
"format": "uuid"
@ -56,7 +56,7 @@ class ZoneTransferAccept(base.DictObjectMixin, base.PersistentObjectMixin,
}
STRING_KEYS = [
'id', 'domain_id', 'tenant_id', 'zone_transfer_request_id'
'id', 'zone_id', 'tenant_id', 'zone_transfer_request_id'
]

View File

@ -25,7 +25,7 @@ class ZoneTransferRequest(base.DictObjectMixin, base.PersistentObjectMixin,
"maxLength": 160
},
},
'domain_id': {
'zone_id': {
'schema': {
"type": "string",
"description": "Zone identifier",
@ -57,7 +57,7 @@ class ZoneTransferRequest(base.DictObjectMixin, base.PersistentObjectMixin,
"enum": ["ACTIVE", "PENDING", "DELETED", "ERROR", "COMPLETE"],
}
},
'domain_name': {
'zone_name': {
'schema': {
"type": ["string", "null"],
"maxLength": 255,
@ -67,7 +67,7 @@ class ZoneTransferRequest(base.DictObjectMixin, base.PersistentObjectMixin,
}
STRING_KEYS = [
'id', 'domain_id', 'domain_name', 'target_tenant_id'
'id', 'zone_id', 'zone_name', 'target_tenant_id'
]

View File

@ -31,7 +31,7 @@ OPTS = [
'Pool Manager'),
cfg.IntOpt('threshold-percentage', default=100,
help='The percentage of servers requiring a successful update '
'for a domain change to be considered active'),
'for a zone change to be considered active'),
cfg.IntOpt('poll-timeout', default=30,
help='The time to wait for a response from a server'),
cfg.IntOpt('poll-retry-interval', default=15,

View File

@ -49,7 +49,7 @@ class PoolManagerCache(DriverPlugin):
"""
@abc.abstractmethod
def retrieve(self, context, nameserver_id, domain_id, action):
def retrieve(self, context, nameserver_id, zone_id, action):
"""
Retrieve the pool manager status object.
@ -57,7 +57,7 @@ class PoolManagerCache(DriverPlugin):
:param context: Security context information
:param nameserver_id: the nameserver ID of the pool manager status
object
:param domain_id: the domain ID of the pool manger status object
:param zone_id: the zone ID of the pool manger status object
:param action: the action of the pool manager status object
:return: the pool manager status object
"""

View File

@ -75,10 +75,10 @@ class MemcachePoolManagerCache(cache_base.PoolManagerCache):
serial_number_key, pool_manager_status.serial_number,
self.expiration)
def retrieve(self, context, nameserver_id, domain_id, action):
def retrieve(self, context, nameserver_id, zone_id, action):
values = {
'nameserver_id': nameserver_id,
'domain_id': domain_id,
'zone_id': zone_id,
'action': action,
}
pool_manager_status = objects.PoolManagerStatus(**values)
@ -103,9 +103,9 @@ class MemcachePoolManagerCache(cache_base.PoolManagerCache):
@staticmethod
def _status_key(pool_manager_status, tail):
key = '{nameserver}-{domain}-{action}-{tail}'.format(
key = '{nameserver}-{zone}-{action}-{tail}'.format(
nameserver=pool_manager_status.nameserver_id,
domain=pool_manager_status.domain_id,
zone=pool_manager_status.zone_id,
action=pool_manager_status.action,
tail=tail
)

View File

@ -32,5 +32,5 @@ class NoopPoolManagerCache(cache_base.PoolManagerCache):
def store(self, context, pool_manager_status):
pass
def retrieve(self, context, nameserver_id, domain_id, action):
def retrieve(self, context, nameserver_id, zone_id, action):
raise exceptions.PoolManagerStatusNotFound

View File

@ -50,7 +50,7 @@ class SQLAlchemyPoolManagerCache(sqlalchemy_base.SQLAlchemy,
if not pool_manager_status.id:
pool_manager_status = self.retrieve(
context, pool_manager_status.nameserver_id,
pool_manager_status.domain_id, pool_manager_status.action)
pool_manager_status.zone_id, pool_manager_status.action)
self._delete(
context, tables.pool_manager_statuses, pool_manager_status,
exceptions.PoolManagerStatusNotFound)
@ -66,10 +66,10 @@ class SQLAlchemyPoolManagerCache(sqlalchemy_base.SQLAlchemy,
tables.pool_manager_statuses, pool_manager_status,
exceptions.DuplicatePoolManagerStatus)
def retrieve(self, context, nameserver_id, domain_id, action):
def retrieve(self, context, nameserver_id, zone_id, action):
criterion = {
'nameserver_id': nameserver_id,
'domain_id': domain_id,
'zone_id': zone_id,
'action': action
}
return self._find(

View File

@ -0,0 +1,34 @@
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Author: Kiall Mac Innes <kiall@hp.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# See https://blueprints.launchpad.net/nova/+spec/backportable-db-migrations
# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
from sqlalchemy.schema import Table, MetaData
meta = MetaData()
def upgrade(migrate_engine):
meta.bind = migrate_engine
status_table = Table('pool_manager_statuses', meta, autoload=True)
status_table.c.domain_id.alter(name='zone_id')
def downgrade(migration_engine):
pass

View File

@ -33,7 +33,7 @@ pool_manager_statuses = Table(
Column('created_at', DateTime, default=lambda: timeutils.utcnow()),
Column('updated_at', DateTime, onupdate=lambda: timeutils.utcnow()),
Column('nameserver_id', UUID, nullable=False),
Column('domain_id', UUID, nullable=False),
Column('zone_id', UUID, nullable=False),
Column('action', Enum(name='update_actions', *UPDATE_ACTIONS),
nullable=False),
Column('status', Enum(name='update_statuses', *UPDATE_STATUSES),
@ -41,9 +41,9 @@ pool_manager_statuses = Table(
Column('serial_number', Integer, nullable=False),
UniqueConstraint('nameserver_id', 'domain_id', 'action',
UniqueConstraint('nameserver_id', 'zone_id', 'action',
name='unique_pool_manager_status'),
ForeignKeyConstraint(['domain_id'], ['domains.id']),
ForeignKeyConstraint(['zone_id'], ['zones.id']),
mysql_engine='InnoDB',
mysql_charset='utf8',

View File

@ -35,15 +35,16 @@ class PoolManagerAPI(object):
API version history:
1.0 - Initial version
2.0 - Rename domains to zones
"""
RPC_API_VERSION = '1.0'
RPC_API_VERSION = '2.0'
def __init__(self, topic=None):
self.topic = topic if topic else cfg.CONF.pool_manager_topic
target = messaging.Target(topic=self.topic,
version=self.RPC_API_VERSION)
self.client = rpc.get_client(target, version_cap='1.0')
self.client = rpc.get_client(target, version_cap='2.0')
@classmethod
def get_instance(cls):
@ -59,51 +60,51 @@ class PoolManagerAPI(object):
MNGR_API = cls()
return MNGR_API
def create_domain(self, context, domain):
LOG.info(_LI("create_domain: Calling pool manager for %(domain)s, "
def create_zone(self, context, zone):
LOG.info(_LI("create_zone: Calling pool manager for %(zone)s, "
"serial:%(serial)s") %
{'domain': domain.name, 'serial': domain.serial})
{'zone': zone.name, 'serial': zone.serial})
# Modifying the topic so it is pool manager instance specific.
topic = '%s.%s' % (self.topic, domain.pool_id)
topic = '%s.%s' % (self.topic, zone.pool_id)
cctxt = self.client.prepare(topic=topic)
return cctxt.cast(
context, 'create_domain', domain=domain)
context, 'create_zone', zone=zone)
def delete_domain(self, context, domain):
LOG.info(_LI("delete_domain: Calling pool manager for %(domain)s, "
def delete_zone(self, context, zone):
LOG.info(_LI("delete_zone: Calling pool manager for %(zone)s, "
"serial:%(serial)s") %
{'domain': domain.name, 'serial': domain.serial})
{'zone': zone.name, 'serial': zone.serial})
# Modifying the topic so it is pool manager instance specific.
topic = '%s.%s' % (self.topic, domain.pool_id)
topic = '%s.%s' % (self.topic, zone.pool_id)
cctxt = self.client.prepare(topic=topic)
return cctxt.cast(
context, 'delete_domain', domain=domain)
context, 'delete_zone', zone=zone)
def update_domain(self, context, domain):
LOG.info(_LI("update_domain: Calling pool manager for %(domain)s, "
def update_zone(self, context, zone):
LOG.info(_LI("update_zone: Calling pool manager for %(zone)s, "
"serial:%(serial)s") %
{'domain': domain.name, 'serial': domain.serial})
{'zone': zone.name, 'serial': zone.serial})
# Modifying the topic so it is pool manager instance specific.
topic = '%s.%s' % (self.topic, domain.pool_id)
topic = '%s.%s' % (self.topic, zone.pool_id)
cctxt = self.client.prepare(topic=topic)
return cctxt.cast(
context, 'update_domain', domain=domain)
context, 'update_zone', zone=zone)
def update_status(self, context, domain, nameserver, status,
def update_status(self, context, zone, nameserver, status,
actual_serial):
LOG.info(_LI("update_status: Calling pool manager for %(domain)s : "
LOG.info(_LI("update_status: Calling pool manager for %(zone)s : "
"%(action)s : %(status)s : %(serial)s on nameserver "
"'%(host)s:%(port)s'") %
{'domain': domain.name, 'action': domain.action,
{'zone': zone.name, 'action': zone.action,
'status': status, 'serial': actual_serial,
'host': nameserver.host, 'port': nameserver.port})
# Modifying the topic so it is pool manager instance specific.
topic = '%s.%s' % (self.topic, domain.pool_id)
topic = '%s.%s' % (self.topic, zone.pool_id)
cctxt = self.client.prepare(topic=topic)
return cctxt.cast(
context, 'update_status', domain=domain, nameserver=nameserver,
context, 'update_status', zone=zone, nameserver=nameserver,
status=status, actual_serial=actual_serial)

View File

@ -71,7 +71,7 @@ class Service(service.RPCService, coordination.CoordinationMixin,
1.0 - Initial version
"""
RPC_API_VERSION = '1.0'
RPC_API_VERSION = '2.0'
target = messaging.Target(version=RPC_API_VERSION)
@ -180,22 +180,22 @@ class Service(service.RPCService, coordination.CoordinationMixin,
try:
# Handle Deletion Failures
domains = self._get_failed_domains(context, DELETE_ACTION)
zones = self._get_failed_zones(context, DELETE_ACTION)
for domain in domains:
self.delete_domain(context, domain)
for zone in zones:
self.delete_zone(context, zone)
# Handle Creation Failures
domains = self._get_failed_domains(context, CREATE_ACTION)
zones = self._get_failed_zones(context, CREATE_ACTION)
for domain in domains:
self.create_domain(context, domain)
for zone in zones:
self.create_zone(context, zone)
# Handle Update Failures
domains = self._get_failed_domains(context, UPDATE_ACTION)
zones = self._get_failed_zones(context, UPDATE_ACTION)
for domain in domains:
self.update_domain(context, domain)
for zone in zones:
self.update_zone(context, zone)
except Exception:
LOG.exception(_LE('An unhandled exception in periodic '
@ -224,14 +224,14 @@ class Service(service.RPCService, coordination.CoordinationMixin,
current = utils.increment_serial()
criterion['serial'] = ">%s" % (current - periodic_sync_seconds)
domains = self.central_api.find_domains(context, criterion)
zones = self.central_api.find_zones(context, criterion)
try:
for domain in domains:
# TODO(kiall): If the domain was created within the last
for zone in zones:
# TODO(kiall): If the zone was created within the last
# periodic_sync_seconds, attempt to recreate
# to fill in targets which may have failed.
self.update_domain(context, domain)
self.update_zone(context, zone)
except Exception:
LOG.exception(_LE('An unhandled exception in periodic '
@ -239,73 +239,73 @@ class Service(service.RPCService, coordination.CoordinationMixin,
# Standard Create/Update/Delete Methods
def create_domain(self, context, domain):
def create_zone(self, context, zone):
"""
:param context: Security context information.
:param domain: Domain to be created
:param zone: Zone to be created
:return: None
"""
LOG.info(_LI("Creating new domain %s"), domain.name)
LOG.info(_LI("Creating new zone %s"), zone.name)
results = []
# Create the domain on each of the Pool Targets
# Create the zone on each of the Pool Targets
for target in self.pool.targets:
results.append(
self._create_domain_on_target(context, target, domain)
self._create_zone_on_target(context, target, zone)
)
if self._exceed_or_meet_threshold(results.count(True)):
LOG.debug('Consensus reached for creating domain %(domain)s '
'on pool targets' % {'domain': domain.name})
LOG.debug('Consensus reached for creating zone %(zone)s '
'on pool targets' % {'zone': zone.name})
else:
LOG.warn(_LW('Consensus not reached for creating domain %(domain)s'
' on pool targets') % {'domain': domain.name})
LOG.warn(_LW('Consensus not reached for creating zone %(zone)s'
' on pool targets') % {'zone': zone.name})
self.central_api.update_status(
context, domain.id, ERROR_STATUS, domain.serial)
context, zone.id, ERROR_STATUS, zone.serial)
return
# Send a NOTIFY to each also-notifies
for also_notify in self.pool.also_notifies:
self._update_domain_on_also_notify(context, also_notify, domain)
self._update_zone_on_also_notify(context, also_notify, zone)
# Send a NOTIFY to each nameserver
for nameserver in self.pool.nameservers:
create_status = self._build_status_object(
nameserver, domain, CREATE_ACTION)
nameserver, zone, CREATE_ACTION)
self.cache.store(context, create_status)
self.mdns_api.poll_for_serial_number(
context, domain, nameserver, self.timeout,
context, zone, nameserver, self.timeout,
self.retry_interval, self.max_retries, self.delay)
def _create_domain_on_target(self, context, target, domain):
def _create_zone_on_target(self, context, target, zone):
"""
:param context: Security context information.
:param target: Target to create Domain on
:param domain: Domain to be created
:param target: Target to create Zone on
:param zone: Zone to be created
:return: True/False
"""
LOG.debug("Creating domain %s on target %s", domain.name, target.id)
LOG.debug("Creating zone %s on target %s", zone.name, target.id)
backend = self.target_backends[target.id]
retries = 0
while retries < self.max_retries:
try:
backend.create_domain(context, domain)
backend.create_zone(context, zone)
return True
except Exception:
retries += 1
LOG.exception(_LE("Failed to create domain %(domain)s on "
LOG.exception(_LE("Failed to create zone %(zone)s on "
"target %(target)s on attempt %(attempt)d"),
{
'domain': domain.name,
'zone': zone.name,
'target': target.id,
'attempt': retries
})
@ -313,140 +313,140 @@ class Service(service.RPCService, coordination.CoordinationMixin,
return False
def update_domain(self, context, domain):
def update_zone(self, context, zone):
"""
:param context: Security context information.
:param domain: Domain to be updated
:param zone: Zone to be updated
:return: None
"""
LOG.info(_LI("Updating domain %s"), domain.name)
LOG.info(_LI("Updating zone %s"), zone.name)
results = []
# Update the domain on each of the Pool Targets
# Update the zone on each of the Pool Targets
for target in self.pool.targets:
results.append(
self._update_domain_on_target(context, target, domain))
self._update_zone_on_target(context, target, zone))
if self._exceed_or_meet_threshold(results.count(True)):
LOG.debug('Consensus reached for updating domain %(domain)s '
'on pool targets' % {'domain': domain.name})
LOG.debug('Consensus reached for updating zone %(zone)s '
'on pool targets' % {'zone': zone.name})
else:
LOG.warn(_LW('Consensus not reached for updating domain %(domain)s'
' on pool targets') % {'domain': domain.name})
LOG.warn(_LW('Consensus not reached for updating zone %(zone)s'
' on pool targets') % {'zone': zone.name})
self.central_api.update_status(
context, domain.id, ERROR_STATUS, domain.serial)
context, zone.id, ERROR_STATUS, zone.serial)
return
# Send a NOTIFY to each also-notifies
for also_notify in self.pool.also_notifies:
self._update_domain_on_also_notify(context, also_notify, domain)
self._update_zone_on_also_notify(context, also_notify, zone)
# Ensure the change has propogated to each nameserver
for nameserver in self.pool.nameservers:
# See if there is already another update in progress
try:
update_status = self.cache.retrieve(
context, nameserver.id, domain.id, UPDATE_ACTION)
context, nameserver.id, zone.id, UPDATE_ACTION)
except exceptions.PoolManagerStatusNotFound:
update_status = self._build_status_object(
nameserver, domain, UPDATE_ACTION)
nameserver, zone, UPDATE_ACTION)
self.cache.store(context, update_status)
self.mdns_api.poll_for_serial_number(
context, domain, nameserver, self.timeout,
context, zone, nameserver, self.timeout,
self.retry_interval, self.max_retries, self.delay)
def _update_domain_on_target(self, context, target, domain):
def _update_zone_on_target(self, context, target, zone):
"""
:param context: Security context information.
:param target: Target to update Domain on
:param domain: Domain to be updated
:param target: Target to update Zone on
:param zone: Zone to be updated
:return: True/False
"""
LOG.debug("Updating domain %s on target %s", domain.name, target.id)
LOG.debug("Updating zone %s on target %s", zone.name, target.id)
backend = self.target_backends[target.id]
try:
backend.update_domain(context, domain)
backend.update_zone(context, zone)
return True
except Exception:
LOG.exception(_LE("Failed to update domain %(domain)s on target "
LOG.exception(_LE("Failed to update zone %(zone)s on target "
"%(target)s"),
{'domain': domain.name, 'target': target.id})
{'zone': zone.name, 'target': target.id})
return False
def _update_domain_on_also_notify(self, context, also_notify, domain):
LOG.info(_LI('Updating domain %(domain)s on also_notify %(server)s.') %
{'domain': domain.name,
def _update_zone_on_also_notify(self, context, also_notify, zone):
LOG.info(_LI('Updating zone %(zone)s on also_notify %(server)s.') %
{'zone': zone.name,
'server': self._get_destination(also_notify)})
self.mdns_api.notify_zone_changed(
context, domain, also_notify.host, also_notify.port, self.timeout,
context, zone, also_notify.host, also_notify.port, self.timeout,
self.retry_interval, self.max_retries, 0)
def delete_domain(self, context, domain):
def delete_zone(self, context, zone):
"""
:param context: Security context information.
:param domain: Domain to be deleted
:param zone: Zone to be deleted
:return: None
"""
LOG.info(_LI("Deleting domain %s"), domain.name)
LOG.info(_LI("Deleting zone %s"), zone.name)
results = []
# Delete the domain on each of the Pool Targets
# Delete the zone on each of the Pool Targets
for target in self.pool.targets:
results.append(
self._delete_domain_on_target(context, target, domain))
self._delete_zone_on_target(context, target, zone))
# TODO(kiall): We should monitor that the Domain is actually deleted
# TODO(kiall): We should monitor that the Zone is actually deleted
# correctly on each of the nameservers, rather than
# assuming a successful delete-on-target is OK as we have
# in the past.
if self._exceed_or_meet_threshold(
results.count(True), MAXIMUM_THRESHOLD):
LOG.debug('Consensus reached for deleting domain %(domain)s '
'on pool targets' % {'domain': domain.name})
LOG.debug('Consensus reached for deleting zone %(zone)s '
'on pool targets' % {'zone': zone.name})
self.central_api.update_status(
context, domain.id, SUCCESS_STATUS, domain.serial)
context, zone.id, SUCCESS_STATUS, zone.serial)
else:
LOG.warn(_LW('Consensus not reached for deleting domain %(domain)s'
' on pool targets') % {'domain': domain.name})
LOG.warn(_LW('Consensus not reached for deleting zone %(zone)s'
' on pool targets') % {'zone': zone.name})
self.central_api.update_status(
context, domain.id, ERROR_STATUS, domain.serial)
context, zone.id, ERROR_STATUS, zone.serial)
def _delete_domain_on_target(self, context, target, domain):
def _delete_zone_on_target(self, context, target, zone):
"""
:param context: Security context information.
:param target: Target to delete Domain from
:param domain: Domain to be deleted
:param target: Target to delete Zone from
:param zone: Zone to be deleted
:return: True/False
"""
LOG.debug("Deleting domain %s on target %s", domain.name, target.id)
LOG.debug("Deleting zone %s on target %s", zone.name, target.id)
backend = self.target_backends[target.id]
retries = 0
while retries < self.max_retries:
try:
backend.delete_domain(context, domain)
backend.delete_zone(context, zone)
return True
except Exception:
retries += 1
LOG.exception(_LE("Failed to delete domain %(domain)s on "
LOG.exception(_LE("Failed to delete zone %(zone)s on "
"target %(target)s on attempt %(attempt)d"),
{
'domain': domain.name,
'zone': zone.name,
'target': target.id,
'attempt': retries
})
@ -454,38 +454,38 @@ class Service(service.RPCService, coordination.CoordinationMixin,
return False
def update_status(self, context, domain, nameserver, status,
def update_status(self, context, zone, nameserver, status,
actual_serial):
"""
update_status is called by mdns for creates and updates.
deletes are handled by the backend entirely and status is determined
at the time of delete itself.
:param context: Security context information.
:param domain: The designate domain object.
:param zone: The designate zone object.
:param nameserver: The nameserver for which a status update is being
sent.
:param status: The status, 'SUCCESS' or 'ERROR'.
:param actual_serial: The actual serial number received from the name
server for the domain.
server for the zone.
:return: None
"""
LOG.debug("Calling update_status for %s : %s : %s : %s" %
(domain.name, domain.action, status, actual_serial))
action = UPDATE_ACTION if domain.action == 'NONE' else domain.action
(zone.name, zone.action, status, actual_serial))
action = UPDATE_ACTION if zone.action == 'NONE' else zone.action
with lockutils.lock('update-status-%s' % domain.id):
with lockutils.lock('update-status-%s' % zone.id):
try:
current_status = self.cache.retrieve(
context, nameserver.id, domain.id, action)
context, nameserver.id, zone.id, action)
except exceptions.PoolManagerStatusNotFound:
current_status = self._build_status_object(
nameserver, domain, action)
nameserver, zone, action)
self.cache.store(context, current_status)
cache_serial = current_status.serial_number
LOG.debug('For domain %s : %s on nameserver %s the cache serial '
LOG.debug('For zone %s : %s on nameserver %s the cache serial '
'is %s and the actual serial is %s.' %
(domain.name, action,
(zone.name, action,
self._get_destination(nameserver),
cache_serial, actual_serial))
if actual_serial and cache_serial <= actual_serial:
@ -493,51 +493,51 @@ class Service(service.RPCService, coordination.CoordinationMixin,
current_status.serial_number = actual_serial
self.cache.store(context, current_status)
consensus_serial = self._get_consensus_serial(context, domain)
consensus_serial = self._get_consensus_serial(context, zone)
# If there is a valid consensus serial we can still send a success
# for that serial.
# If there is a higher error serial we can also send an error for
# the error serial.
if consensus_serial != 0 and cache_serial <= consensus_serial \
and domain.status != 'ACTIVE':
LOG.info(_LI('For domain %(domain)s '
and zone.status != 'ACTIVE':
LOG.info(_LI('For zone %(zone)s '
'the consensus serial is %(consensus_serial)s.') %
{'domain': domain.name,
{'zone': zone.name,
'consensus_serial': consensus_serial})
self.central_api.update_status(
context, domain.id, SUCCESS_STATUS, consensus_serial)
context, zone.id, SUCCESS_STATUS, consensus_serial)
if status == ERROR_STATUS:
error_serial = self._get_error_serial(
context, domain, consensus_serial)
context, zone, consensus_serial)
if error_serial > consensus_serial or error_serial == 0:
LOG.warn(_LW('For domain %(domain)s '
LOG.warn(_LW('For zone %(zone)s '
'the error serial is %(error_serial)s.') %
{'domain': domain.name,
{'zone': zone.name,
'error_serial': error_serial})
self.central_api.update_status(
context, domain.id, ERROR_STATUS, error_serial)
context, zone.id, ERROR_STATUS, error_serial)
if status == NO_DOMAIN_STATUS and action != DELETE_ACTION:
LOG.warn(_LW('Domain %(domain)s is not present in some '
'targets') % {'domain': domain.name})
LOG.warn(_LW('Zone %(zone)s is not present in some '
'targets') % {'zone': zone.name})
self.central_api.update_status(
context, domain.id, NO_DOMAIN_STATUS, 0)
context, zone.id, NO_DOMAIN_STATUS, 0)
if consensus_serial == domain.serial and self._is_consensus(
context, domain, action, SUCCESS_STATUS,
if consensus_serial == zone.serial and self._is_consensus(
context, zone, action, SUCCESS_STATUS,
MAXIMUM_THRESHOLD):
self._clear_cache(context, domain, action)
self._clear_cache(context, zone, action)
# Utility Methods
def _get_failed_domains(self, context, action):
def _get_failed_zones(self, context, action):
criterion = {
'pool_id': CONF['service:pool_manager'].pool_id,
'action': action,
'status': 'ERROR'
}
return self.central_api.find_domains(context, criterion)
return self.central_api.find_zones(context, criterion)
@staticmethod
def _get_destination(nameserver):
@ -567,10 +567,10 @@ class Service(service.RPCService, coordination.CoordinationMixin,
def _get_serials_descending(self, pool_manager_statuses):
return self._get_sorted_serials(pool_manager_statuses, descending=True)
def _is_consensus(self, context, domain, action, status, threshold=None):
def _is_consensus(self, context, zone, action, status, threshold=None):
status_count = 0
pool_manager_statuses = self._retrieve_statuses(
context, domain, action)
context, zone, action)
for pool_manager_status in pool_manager_statuses:
if pool_manager_status.status == status:
status_count += 1
@ -578,11 +578,11 @@ class Service(service.RPCService, coordination.CoordinationMixin,
threshold = self.threshold
return self._exceed_or_meet_threshold(status_count, threshold)
def _get_consensus_serial(self, context, domain):
def _get_consensus_serial(self, context, zone):
consensus_serial = 0
action = UPDATE_ACTION if domain.action == 'NONE' else domain.action
action = UPDATE_ACTION if zone.action == 'NONE' else zone.action
pm_statuses = self._retrieve_statuses(context, domain, action)
pm_statuses = self._retrieve_statuses(context, zone, action)
for serial in self._get_serials_descending(pm_statuses):
serial_count = 0
for pm_status in pm_statuses:
@ -593,12 +593,12 @@ class Service(service.RPCService, coordination.CoordinationMixin,
break
return consensus_serial
def _get_error_serial(self, context, domain, consensus_serial):
def _get_error_serial(self, context, zone, consensus_serial):
error_serial = 0
action = UPDATE_ACTION if domain.action == 'NONE' else domain.action
action = UPDATE_ACTION if zone.action == 'NONE' else zone.action
if self._is_consensus(context, domain, action, ERROR_STATUS):
pm_statuses = self._retrieve_statuses(context, domain, action)
if self._is_consensus(context, zone, action, ERROR_STATUS):
pm_statuses = self._retrieve_statuses(context, zone, action)
for serial in self._get_serials_ascending(pm_statuses):
if serial > consensus_serial:
error_serial = serial
@ -608,10 +608,10 @@ class Service(service.RPCService, coordination.CoordinationMixin,
# When we hear back from the nameserver, the serial_number is set to the
# value the nameserver
@staticmethod
def _build_status_object(nameserver, domain, action):
def _build_status_object(nameserver, zone, action):
values = {
'nameserver_id': nameserver.id,
'domain_id': domain.id,
'zone_id': zone.id,
'status': None,
'serial_number': 0,
'action': action
@ -619,9 +619,9 @@ class Service(service.RPCService, coordination.CoordinationMixin,
return objects.PoolManagerStatus(**values)
# Methods for manipulating the cache.
def _clear_cache(self, context, domain, action=None):
LOG.debug('Clearing cache for domain %s with action %s.' %
(domain.name, action))
def _clear_cache(self, context, zone, action=None):
LOG.debug('Clearing cache for zone %s with action %s.' %
(zone.name, action))
pool_manager_statuses = []
if action:
@ -632,7 +632,7 @@ class Service(service.RPCService, coordination.CoordinationMixin,
for nameserver in self.pool.nameservers:
for action in actions:
pool_manager_status = self._build_status_object(
nameserver, domain, action)
nameserver, zone, action)
pool_manager_statuses.append(pool_manager_status)
for pool_manager_status in pool_manager_statuses:
@ -642,22 +642,22 @@ class Service(service.RPCService, coordination.CoordinationMixin,
except exceptions.PoolManagerStatusNotFound:
pass
def _retrieve_from_mdns(self, context, nameserver, domain, action):
def _retrieve_from_mdns(self, context, nameserver, zone, action):
try:
(status, actual_serial, retries) = \
self.mdns_api.get_serial_number(
context, domain, nameserver.host, nameserver.port,
context, zone, nameserver.host, nameserver.port,
self.timeout, self.retry_interval, self.max_retries,
self.delay)
except messaging.MessagingException as msg_ex:
LOG.debug('Could not retrieve status and serial for domain %s on '
LOG.debug('Could not retrieve status and serial for zone %s on '
'nameserver %s with action %s (%s: %s)' %
(domain.name, self._get_destination(nameserver), action,
(zone.name, self._get_destination(nameserver), action,
type(msg_ex), str(msg_ex)))
return None
pool_manager_status = self._build_status_object(
nameserver, domain, action)
nameserver, zone, action)
if status == NO_DOMAIN_STATUS:
if action == CREATE_ACTION:
@ -671,37 +671,37 @@ class Service(service.RPCService, coordination.CoordinationMixin,
pool_manager_status.status = status
pool_manager_status.serial_number = actual_serial \
if actual_serial is not None else 0
LOG.debug('Retrieved status %s and serial %s for domain %s '
LOG.debug('Retrieved status %s and serial %s for zone %s '
'on nameserver %s with action %s from mdns.' %
(pool_manager_status.status,
pool_manager_status.serial_number,
domain.name, self._get_destination(nameserver), action))
zone.name, self._get_destination(nameserver), action))
self.cache.store(context, pool_manager_status)
return pool_manager_status
def _retrieve_statuses(self, context, domain, action):
def _retrieve_statuses(self, context, zone, action):
pool_manager_statuses = []
for nameserver in self.pool.nameservers:
try:
pool_manager_status = self.cache.retrieve(
context, nameserver.id, domain.id, action)
context, nameserver.id, zone.id, action)
LOG.debug('Cache hit! Retrieved status %s and serial %s '
'for domain %s on nameserver %s with action %s from '
'for zone %s on nameserver %s with action %s from '
'the cache.' %
(pool_manager_status.status,
pool_manager_status.serial_number,
domain.name,
zone.name,
self._get_destination(nameserver), action))
except exceptions.PoolManagerStatusNotFound:
LOG.debug('Cache miss! Did not retrieve status and serial '
'for domain %s on nameserver %s with action %s from '
'for zone %s on nameserver %s with action %s from '
'the cache. Getting it from the server.' %
(domain.name,
(zone.name,
self._get_destination(nameserver),
action))
pool_manager_status = self._retrieve_from_mdns(
context, nameserver, domain, action)
context, nameserver, zone, action)
if pool_manager_status is not None:
pool_manager_statuses.append(pool_manager_status)

View File

@ -24,12 +24,12 @@ LOG = logging.getLogger(__name__)
cfg.CONF.register_opts([
cfg.StrOpt('quota-driver', default='storage', help='Quota driver to use'),
cfg.IntOpt('quota-domains', default=10,
help='Number of domains allowed per tenant'),
cfg.IntOpt('quota-domain-recordsets', default=500,
help='Number of recordsets allowed per domain'),
cfg.IntOpt('quota-domain-records', default=500,
help='Number of records allowed per domain'),
cfg.IntOpt('quota-zones', default=10,
help='Number of zones allowed per tenant'),
cfg.IntOpt('quota-zone-recordsets', default=500,
help='Number of recordsets allowed per zone'),
cfg.IntOpt('quota-zone-records', default=500,
help='Number of records allowed per zone'),
cfg.IntOpt('quota-recordset-records', default=20,
help='Number of records allowed per recordset'),
cfg.IntOpt('quota-api-export-size', default=1000,

View File

@ -36,7 +36,8 @@ class Quota(DriverPlugin):
if value >= quotas[resource]:
raise exceptions.OverQuota()
else:
raise exceptions.QuotaResourceUnknown()
raise exceptions.QuotaResourceUnknown("%s is not a valid quota"
" resource", resource)
def get_quotas(self, context, tenant_id):
quotas = self.get_default_quotas(context)
@ -51,9 +52,9 @@ class Quota(DriverPlugin):
def get_default_quotas(self, context):
return {
'domains': cfg.CONF.quota_domains,
'domain_recordsets': cfg.CONF.quota_domain_recordsets,
'domain_records': cfg.CONF.quota_domain_records,
'zones': cfg.CONF.quota_zones,
'zone_recordsets': cfg.CONF.quota_zone_recordsets,
'zone_records': cfg.CONF.quota_zone_records,
'recordset_records': cfg.CONF.quota_recordset_records,
'api_export_size': cfg.CONF.quota_api_export_size,
}
@ -62,7 +63,8 @@ class Quota(DriverPlugin):
quotas = self._get_quotas(context, tenant_id)
if resource not in quotas:
raise exceptions.QuotaResourceUnknown()
raise exceptions.QuotaResourceUnknown("%s is not a valid quota "
"resource", resource)
return quotas[resource]

View File

@ -1,5 +1,5 @@
$ORIGIN {{ domain.name }}
$TTL {{ domain.ttl }}
$ORIGIN {{ zone.name }}
$TTL {{ zone.ttl }}
{% for recordset in recordsets -%}
{{recordset[0]}} {{recordset[1] or ''}} IN {{recordset[2]}} {{recordset[3]}}

View File

@ -27,7 +27,7 @@ LOG = logging.getLogger(__name__)
# "\Z", rather than simply "$" to ensure a string with a
# trailing newline is NOT matched. See bug #1471158.
RE_DOMAINNAME = r'^(?!.{255,})(?:(?!\-)[A-Za-z0-9_\-]{1,63}(?<!\-)\.)+\Z'
RE_ZONENAME = r'^(?!.{255,})(?:(?!\-)[A-Za-z0-9_\-]{1,63}(?<!\-)\.)+\Z'
RE_HOSTNAME = r'^(?!.{255,})(?:(?:^\*|(?!\-)[A-Za-z0-9_\-]{1,63})(?<!\-)\.)+\Z'
RE_SRV_HOST_NAME = r'^(?:(?!\-)(?:\_[A-Za-z0-9_\-]{1,63}\.){2})(?!.{255,})' \
@ -108,7 +108,7 @@ def is_ip_or_host(instance):
if not isinstance(instance, compat.str_types):
return True
if not re.match(RE_DOMAINNAME, instance)\
if not re.match(RE_ZONENAME, instance)\
and not is_ipv4(instance)\
and not is_ipv6(instance):
return False
@ -118,11 +118,13 @@ def is_ip_or_host(instance):
@draft3_format_checker.checks("domain-name")
@draft4_format_checker.checks("domainname")
def is_domainname(instance):
@draft3_format_checker.checks("zone-name")
@draft4_format_checker.checks("zonename")
def is_zonename(instance):
if not isinstance(instance, compat.str_types):
return True
if not re.match(RE_DOMAINNAME, instance):
if not re.match(RE_ZONENAME, instance):
return False
return True
@ -163,7 +165,7 @@ def is_email(instance):
rname = instance.replace('@', '.', 1)
if not re.match(RE_DOMAINNAME, "%s." % rname):
if not re.match(RE_ZONENAME, "%s." % rname):
return False
return True

View File

@ -322,7 +322,7 @@ class SQLAlchemy(object):
table.c.created_at, # 2 - RS Created
table.c.updated_at, # 3 - RS Updated
table.c.tenant_id, # 4 - RS Tenant
table.c.domain_id, # 5 - RS Domain
table.c.zone_id, # 5 - RS Zone
table.c.name, # 6 - RS Name
table.c.type, # 7 - RS Type
table.c.ttl, # 8 - RS TTL
@ -333,7 +333,7 @@ class SQLAlchemy(object):
relation_table.c.created_at, # 12 - R Created
relation_table.c.updated_at, # 13 - R Updated
relation_table.c.tenant_id, # 14 - R Tenant
relation_table.c.domain_id, # 15 - R Domain
relation_table.c.zone_id, # 15 - R Zone
relation_table.c.recordset_id, # 16 - R RSet
relation_table.c.data, # 17 - R Data
relation_table.c.description, # 18 - R Desc
@ -364,7 +364,7 @@ class SQLAlchemy(object):
"created_at": 2,
"updated_at": 3,
"tenant_id": 4,
"domain_id": 5,
"zone_id": 5,
"name": 6,
"type": 7,
"ttl": 8,
@ -377,7 +377,7 @@ class SQLAlchemy(object):
"created_at": 12,
"updated_at": 13,
"tenant_id": 14,
"domain_id": 15,
"zone_id": 15,
"recordset_id": 16,
"data": 17,
"description": 18,

View File

@ -227,28 +227,28 @@ class Storage(DriverPlugin):
"""
@abc.abstractmethod
def create_domain(self, context, domain):
def create_zone(self, context, zone):
"""
Create a new Domain.
Create a new Zone.
:param context: RPC Context.
:param domain: Domain object with the values to be created.
:param zone: Zone object with the values to be created.
"""
@abc.abstractmethod
def get_domain(self, context, domain_id):
def get_zone(self, context, zone_id):
"""
Get a Domain via its ID.
Get a Zone via its ID.
:param context: RPC Context.
:param domain_id: ID of the Domain.
:param zone_id: ID of the Zone.
"""
@abc.abstractmethod
def find_domains(self, context, criterion=None, marker=None,
limit=None, sort_key=None, sort_dir=None):
def find_zones(self, context, criterion=None, marker=None,
limit=None, sort_key=None, sort_dir=None):
"""
Find Domains
Find zones
:param context: RPC Context.
:param criterion: Criteria to filter by.
@ -261,57 +261,57 @@ class Storage(DriverPlugin):
"""
@abc.abstractmethod
def find_domain(self, context, criterion):
def find_zone(self, context, criterion):
"""
Find a single Domain.
Find a single Zone.
:param context: RPC Context.
:param criterion: Criteria to filter by.
"""
@abc.abstractmethod
def update_domain(self, context, domain):
def update_zone(self, context, zone):
"""
Update a Domain
Update a Zone
:param context: RPC Context.
:param domain: Domain object.
:param zone: Zone object.
"""
@abc.abstractmethod
def delete_domain(self, context, domain_id):
def delete_zone(self, context, zone_id):
"""
Delete a Domain
Delete a Zone
:param context: RPC Context.
:param domain_id: Domain ID to delete.
:param zone_id: Zone ID to delete.
"""
@abc.abstractmethod
def purge_domain(self, context, zone):
def purge_zone(self, context, zone):
"""
Purge a Domain
Purge a Zone
:param context: RPC Context.
:param domain: Zone to delete.
:param zone: Zone to delete.
"""
@abc.abstractmethod
def count_domains(self, context, criterion=None):
def count_zones(self, context, criterion=None):
"""
Count domains
Count zones
:param context: RPC Context.
:param criterion: Criteria to filter by.
"""
@abc.abstractmethod
def create_recordset(self, context, domain_id, recordset):
def create_recordset(self, context, zone_id, recordset):
"""
Create a recordset on a given Domain ID
Create a recordset on a given Zone ID
:param context: RPC Context.
:param domain_id: Domain ID to create the recordset in.
:param zone_id: Zone ID to create the recordset in.
:param recordset: RecordSet object with the values to be created.
"""
@ -386,12 +386,12 @@ class Storage(DriverPlugin):
"""
@abc.abstractmethod
def create_record(self, context, domain_id, recordset_id, record):
def create_record(self, context, zone_id, recordset_id, record):
"""
Create a record on a given Domain ID
Create a record on a given Zone ID
:param context: RPC Context.
:param domain_id: Domain ID to create the record in.
:param zone_id: Zone ID to create the record in.
:param recordset_id: RecordSet ID to create the record in.
:param record: Record object with the values to be created.
"""

View File

@ -32,7 +32,7 @@ from designate.storage.impl_sqlalchemy import tables
LOG = logging.getLogger(__name__)
MAXIMUM_SUBDOMAIN_DEPTH = 128
MAXIMUM_SUBZONE_DEPTH = 128
cfg.CONF.register_group(cfg.OptGroup(
name='storage:sqlalchemy', title="Configuration for SQLAlchemy Storage"
@ -51,7 +51,7 @@ class SQLAlchemyStorage(sqlalchemy_base.SQLAlchemy, storage_base.Storage):
def get_name(self):
return self.name
# CRUD for our resources (quota, server, tsigkey, tenant, domain & record)
# CRUD for our resources (quota, server, tsigkey, tenant, zone & record)
# R - get_*, find_*s
#
# Standard Arguments
@ -172,18 +172,18 @@ class SQLAlchemyStorage(sqlalchemy_base.SQLAlchemy, storage_base.Storage):
# Tenant Methods
##
def find_tenants(self, context):
# returns an array of tenant_id & count of their domains
query = select([tables.domains.c.tenant_id,
func.count(tables.domains.c.id)])
query = self._apply_tenant_criteria(context, tables.domains, query)
query = self._apply_deleted_criteria(context, tables.domains, query)
query = query.group_by(tables.domains.c.tenant_id)
# returns an array of tenant_id & count of their zones
query = select([tables.zones.c.tenant_id,
func.count(tables.zones.c.id)])
query = self._apply_tenant_criteria(context, tables.zones, query)
query = self._apply_deleted_criteria(context, tables.zones, query)
query = query.group_by(tables.zones.c.tenant_id)
resultproxy = self.session.execute(query)
results = resultproxy.fetchall()
tenant_list = objects.TenantList(
objects=[objects.Tenant(id=t[0], domain_count=t[1]) for t in
objects=[objects.Tenant(id=t[0], zone_count=t[1]) for t in
results])
tenant_list.obj_reset_changes()
@ -191,26 +191,26 @@ class SQLAlchemyStorage(sqlalchemy_base.SQLAlchemy, storage_base.Storage):
return tenant_list
def get_tenant(self, context, tenant_id):
# get list list & count of all domains owned by given tenant_id
query = select([tables.domains.c.name])
query = self._apply_tenant_criteria(context, tables.domains, query)
query = self._apply_deleted_criteria(context, tables.domains, query)
query = query.where(tables.domains.c.tenant_id == tenant_id)
# get list list & count of all zones owned by given tenant_id
query = select([tables.zones.c.name])
query = self._apply_tenant_criteria(context, tables.zones, query)
query = self._apply_deleted_criteria(context, tables.zones, query)
query = query.where(tables.zones.c.tenant_id == tenant_id)
resultproxy = self.session.execute(query)
results = resultproxy.fetchall()
return objects.Tenant(
id=tenant_id,
domain_count=len(results),
domains=[r[0] for r in results])
zone_count=len(results),
zones=[r[0] for r in results])
def count_tenants(self, context):
# tenants are the owner of domains, count the number of unique tenants
# select count(distinct tenant_id) from domains
query = select([func.count(distinct(tables.domains.c.tenant_id))])
query = self._apply_tenant_criteria(context, tables.domains, query)
query = self._apply_deleted_criteria(context, tables.domains, query)
# tenants are the owner of zones, count the number of unique tenants
# select count(distinct tenant_id) from zones
query = select([func.count(distinct(tables.zones.c.tenant_id))])
query = self._apply_tenant_criteria(context, tables.zones, query)
query = self._apply_deleted_criteria(context, tables.zones, query)
resultproxy = self.session.execute(query)
result = resultproxy.fetchone()
@ -221,95 +221,95 @@ class SQLAlchemyStorage(sqlalchemy_base.SQLAlchemy, storage_base.Storage):
return result[0]
##
# Domain Methods
# Zone Methods
##
def _find_domains(self, context, criterion, one=False, marker=None,
limit=None, sort_key=None, sort_dir=None):
def _find_zones(self, context, criterion, one=False, marker=None,
limit=None, sort_key=None, sort_dir=None):
# Check to see if the criterion can use the reverse_name column
criterion = self._rname_check(criterion)
domains = self._find(
context, tables.domains, objects.Domain, objects.DomainList,
exceptions.DomainNotFound, criterion, one, marker, limit,
zones = self._find(
context, tables.zones, objects.Zone, objects.ZoneList,
exceptions.ZoneNotFound, criterion, one, marker, limit,
sort_key, sort_dir)
def _load_relations(domain):
if domain.type == 'SECONDARY':
domain.masters = self._find_domain_masters(
context, {'domain_id': domain.id})
def _load_relations(zone):
if zone.type == 'SECONDARY':
zone.masters = self._find_zone_masters(
context, {'zone_id': zone.id})
else:
# This avoids an extra DB call per primary zone. This will
# always have 0 results for a PRIMARY zone.
domain.masters = objects.DomainMasterList()
zone.masters = objects.ZoneMasterList()
domain.attributes = self._find_domain_attributes(
context, {'domain_id': domain.id, "key": "!master"})
zone.attributes = self._find_zone_attributes(
context, {'zone_id': zone.id, "key": "!master"})
domain.obj_reset_changes(['masters', 'attributes'])
zone.obj_reset_changes(['masters', 'attributes'])
if one:
_load_relations(domains)
_load_relations(zones)
else:
domains.total_count = self.count_domains(context, criterion)
for d in domains:
zones.total_count = self.count_zones(context, criterion)
for d in zones:
_load_relations(d)
return domains
return zones
def create_domain(self, context, domain):
def create_zone(self, context, zone):
# Patch in the reverse_name column
extra_values = {"reverse_name": domain.name[::-1]}
extra_values = {"reverse_name": zone.name[::-1]}
# Don't handle recordsets for now
domain = self._create(
tables.domains, domain, exceptions.DuplicateDomain,
zone = self._create(
tables.zones, zone, exceptions.DuplicateZone,
['attributes', 'recordsets', 'masters'],
extra_values=extra_values)
if domain.obj_attr_is_set('attributes'):
for attrib in domain.attributes:
self.create_domain_attribute(context, domain.id, attrib)
if zone.obj_attr_is_set('attributes'):
for attrib in zone.attributes:
self.create_zone_attribute(context, zone.id, attrib)
else:
domain.attributes = objects.DomainAttributeList()
if domain.obj_attr_is_set('masters'):
for master in domain.masters:
self.create_domain_master(context, domain.id, master)
zone.attributes = objects.ZoneAttributeList()
if zone.obj_attr_is_set('masters'):
for master in zone.masters:
self.create_zone_master(context, zone.id, master)
else:
domain.masters = objects.DomainMasterList()
domain.obj_reset_changes(['masters', 'attributes'])
zone.masters = objects.ZoneMasterList()
zone.obj_reset_changes(['masters', 'attributes'])
return domain
return zone
def get_domain(self, context, domain_id):
domain = self._find_domains(context, {'id': domain_id}, one=True)
return domain
def get_zone(self, context, zone_id):
zone = self._find_zones(context, {'id': zone_id}, one=True)
return zone
def find_domains(self, context, criterion=None, marker=None, limit=None,
sort_key=None, sort_dir=None):
domains = self._find_domains(context, criterion, marker=marker,
limit=limit, sort_key=sort_key,
sort_dir=sort_dir)
return domains
def find_zones(self, context, criterion=None, marker=None, limit=None,
sort_key=None, sort_dir=None):
zones = self._find_zones(context, criterion, marker=marker,
limit=limit, sort_key=sort_key,
sort_dir=sort_dir)
return zones
def find_domain(self, context, criterion):
domain = self._find_domains(context, criterion, one=True)
return domain
def find_zone(self, context, criterion):
zone = self._find_zones(context, criterion, one=True)
return zone
def update_domain(self, context, domain):
def update_zone(self, context, zone):
tenant_id_changed = False
if 'tenant_id' in domain.obj_what_changed():
if 'tenant_id' in zone.obj_what_changed():
tenant_id_changed = True
# Don't handle recordsets for now
updated_domain = self._update(
context, tables.domains, domain, exceptions.DuplicateDomain,
exceptions.DomainNotFound,
updated_zone = self._update(
context, tables.zones, zone, exceptions.DuplicateZone,
exceptions.ZoneNotFound,
['attributes', 'recordsets', 'masters'])
if domain.obj_attr_is_set('attributes'):
if zone.obj_attr_is_set('attributes'):
# Gather the Attribute ID's we have
have = set([r.id for r in self._find_domain_attributes(
context, {'domain_id': domain.id})])
have = set([r.id for r in self._find_zone_attributes(
context, {'zone_id': zone.id})])
# Prep some lists of changes
keep = set([])
@ -317,7 +317,7 @@ class SQLAlchemyStorage(sqlalchemy_base.SQLAlchemy, storage_base.Storage):
update = []
# Determine what to change
for i in domain.attributes:
for i in zone.attributes:
keep.add(i.id)
try:
i.obj_get_original_value('id')
@ -329,27 +329,27 @@ class SQLAlchemyStorage(sqlalchemy_base.SQLAlchemy, storage_base.Storage):
# NOTE: Since we're dealing with mutable objects, the return value
# of create/update/delete attribute is not needed.
# The original item will be mutated in place on the input
# "domain.attributes" list.
# "zone.attributes" list.
# Delete Attributes
for i_id in have - keep:
attr = self._find_domain_attributes(
attr = self._find_zone_attributes(
context, {'id': i_id}, one=True)
self.delete_domain_attribute(context, attr.id)
self.delete_zone_attribute(context, attr.id)
# Update Attributes
for i in update:
self.update_domain_attribute(context, i)
self.update_zone_attribute(context, i)
# Create Attributes
for attr in create:
attr.domain_id = domain.id
self.create_domain_attribute(context, domain.id, attr)
attr.zone_id = zone.id
self.create_zone_attribute(context, zone.id, attr)
if domain.obj_attr_is_set('masters'):
if zone.obj_attr_is_set('masters'):
# Gather the Attribute ID's we have
have = set([r.id for r in self._find_domain_masters(
context, {'domain_id': domain.id})])
have = set([r.id for r in self._find_zone_masters(
context, {'zone_id': zone.id})])
# Prep some lists of changes
keep = set([])
@ -357,7 +357,7 @@ class SQLAlchemyStorage(sqlalchemy_base.SQLAlchemy, storage_base.Storage):
update = []
# Determine what to change
for i in domain.masters:
for i in zone.masters:
keep.add(i.id)
try:
i.obj_get_original_value('id')
@ -369,32 +369,32 @@ class SQLAlchemyStorage(sqlalchemy_base.SQLAlchemy, storage_base.Storage):
# NOTE: Since we're dealing with mutable objects, the return value
# of create/update/delete attribute is not needed.
# The original item will be mutated in place on the input
# "domain.attributes" list.
# "zone.attributes" list.
# Delete Attributes
for i_id in have - keep:
attr = self._find_domain_masters(
attr = self._find_zone_masters(
context, {'id': i_id}, one=True)
self.delete_domain_master(context, attr.id)
self.delete_zone_master(context, attr.id)
# Update Attributes
for i in update:
self.update_domain_master(context, i)
self.update_zone_master(context, i)
# Create Attributes
for attr in create:
attr.domain_id = domain.id
self.create_domain_master(context, domain.id, attr)
attr.zone_id = zone.id
self.create_zone_master(context, zone.id, attr)
if domain.obj_attr_is_set('recordsets'):
existing = self.find_recordsets(context, {'domain_id': domain.id})
if zone.obj_attr_is_set('recordsets'):
existing = self.find_recordsets(context, {'zone_id': zone.id})
data = {}
for rrset in existing:
data[rrset.name, rrset.type] = rrset
keep = set()
for rrset in domain.recordsets:
for rrset in zone.recordsets:
current = data.get((rrset.name, rrset.type))
if current:
@ -403,67 +403,67 @@ class SQLAlchemyStorage(sqlalchemy_base.SQLAlchemy, storage_base.Storage):
self.update_recordset(context, current)
keep.add(current.id)
else:
self.create_recordset(context, domain.id, rrset)
self.create_recordset(context, zone.id, rrset)
keep.add(rrset.id)
if domain.type == 'SECONDARY':
if zone.type == 'SECONDARY':
# Purge anything that shouldn't be there :P
for i in set([i.id for i in data.values()]) - keep:
self.delete_recordset(context, i)
if tenant_id_changed:
recordsets_query = tables.recordsets.update().\
where(tables.recordsets.c.domain_id == domain.id)\
.values({'tenant_id': domain.tenant_id})
where(tables.recordsets.c.zone_id == zone.id)\
.values({'tenant_id': zone.tenant_id})
records_query = tables.records.update().\
where(tables.records.c.domain_id == domain.id).\
values({'tenant_id': domain.tenant_id})
where(tables.records.c.zone_id == zone.id).\
values({'tenant_id': zone.tenant_id})
self.session.execute(records_query)
self.session.execute(recordsets_query)
return updated_domain
return updated_zone
def delete_domain(self, context, domain_id):
def delete_zone(self, context, zone_id):
"""
"""
# Fetch the existing domain, we'll need to return it.
domain = self._find_domains(context, {'id': domain_id}, one=True)
return self._delete(context, tables.domains, domain,
exceptions.DomainNotFound)
# Fetch the existing zone, we'll need to return it.
zone = self._find_zones(context, {'id': zone_id}, one=True)
return self._delete(context, tables.zones, zone,
exceptions.ZoneNotFound)
def purge_domain(self, context, zone):
def purge_zone(self, context, zone):
"""Effectively remove a zone database record.
"""
return self._delete(context, tables.domains, zone,
exceptions.DomainNotFound, hard_delete=True)
return self._delete(context, tables.zones, zone,
exceptions.ZoneNotFound, hard_delete=True)
def _walk_up_domains(self, current, zones_by_id):
def _walk_up_zones(self, current, zones_by_id):
"""Walk upwards in a zone hierarchy until we find a parent zone
that does not belong to "zones_by_id"
:returns: parent zone ID or None
"""
max_steps = MAXIMUM_SUBDOMAIN_DEPTH
while current.parent_domain_id in zones_by_id:
current = zones_by_id[current.parent_domain_id]
max_steps = MAXIMUM_SUBZONE_DEPTH
while current.parent_zone_id in zones_by_id:
current = zones_by_id[current.parent_zone_id]
max_steps -= 1
if max_steps == 0:
raise exceptions.IllegalParentDomain("Loop detected in the"
" domain hierarchy")
raise exceptions.IllegalParentZone("Loop detected in the"
" zone hierarchy")
return current.parent_domain_id
return current.parent_zone_id
def purge_domains(self, context, criterion, limit):
def purge_zones(self, context, criterion, limit):
"""Purge deleted zones.
Reparent orphan childrens, if any.
Transactions/locks are not needed.
:returns: number of purged domains
:returns: number of purged zones
"""
if 'deleted' in criterion:
context.show_deleted = True
zones = self.find_domains(
zones = self.find_zones(
context=context,
criterion=criterion,
limit=limit,
@ -479,24 +479,24 @@ class SQLAlchemyStorage(sqlalchemy_base.SQLAlchemy, storage_base.Storage):
for zone in zones:
# Reparent child zones, if any.
surviving_parent_id = self._walk_up_domains(zone, zones_by_id)
query = tables.domains.update().\
where(tables.domains.c.parent_domain_id == zone.id).\
values(parent_domain_id=surviving_parent_id)
surviving_parent_id = self._walk_up_zones(zone, zones_by_id)
query = tables.zones.update().\
where(tables.zones.c.parent_zone_id == zone.id).\
values(parent_zone_id=surviving_parent_id)
resultproxy = self.session.execute(query)
LOG.debug(_LI("%d child zones updated"), resultproxy.rowcount)
self.purge_domain(context, zone)
self.purge_zone(context, zone)
LOG.info(_LI("Purged %d zones"), len(zones))
return len(zones)
def count_domains(self, context, criterion=None):
query = select([func.count(tables.domains.c.id)])
query = self._apply_criterion(tables.domains, query, criterion)
query = self._apply_tenant_criteria(context, tables.domains, query)
query = self._apply_deleted_criteria(context, tables.domains, query)
def count_zones(self, context, criterion=None):
query = select([func.count(tables.zones.c.id)])
query = self._apply_criterion(tables.zones, query, criterion)
query = self._apply_tenant_criteria(context, tables.zones, query)
query = self._apply_deleted_criteria(context, tables.zones, query)
resultproxy = self.session.execute(query)
result = resultproxy.fetchone()
@ -506,112 +506,112 @@ class SQLAlchemyStorage(sqlalchemy_base.SQLAlchemy, storage_base.Storage):
return result[0]
# Domain attribute methods
def _find_domain_attributes(self, context, criterion, one=False,
marker=None, limit=None, sort_key=None,
sort_dir=None):
return self._find(context, tables.domain_attributes,
objects.DomainAttribute, objects.DomainAttributeList,
exceptions.DomainAttributeNotFound, criterion, one,
# Zone attribute methods
def _find_zone_attributes(self, context, criterion, one=False,
marker=None, limit=None, sort_key=None,
sort_dir=None):
return self._find(context, tables.zone_attributes,
objects.ZoneAttribute, objects.ZoneAttributeList,
exceptions.ZoneAttributeNotFound, criterion, one,
marker, limit, sort_key, sort_dir)
def create_domain_attribute(self, context, domain_id, domain_attribute):
domain_attribute.domain_id = domain_id
return self._create(tables.domain_attributes, domain_attribute,
exceptions.DuplicateDomainAttribute)
def create_zone_attribute(self, context, zone_id, zone_attribute):
zone_attribute.zone_id = zone_id
return self._create(tables.zone_attributes, zone_attribute,
exceptions.DuplicateZoneAttribute)
def get_domain_attributes(self, context, domain_attribute_id):
return self._find_domain_attributes(
context, {'id': domain_attribute_id}, one=True)
def get_zone_attributes(self, context, zone_attribute_id):
return self._find_zone_attributes(
context, {'id': zone_attribute_id}, one=True)
def find_domain_attributes(self, context, criterion=None, marker=None,
limit=None, sort_key=None, sort_dir=None):
return self._find_domain_attributes(context, criterion, marker=marker,
limit=limit, sort_key=sort_key,
sort_dir=sort_dir)
def find_zone_attributes(self, context, criterion=None, marker=None,
limit=None, sort_key=None, sort_dir=None):
return self._find_zone_attributes(context, criterion, marker=marker,
limit=limit, sort_key=sort_key,
sort_dir=sort_dir)
def find_domain_attribute(self, context, criterion):
return self._find_domain_attributes(context, criterion, one=True)
def find_zone_attribute(self, context, criterion):
return self._find_zone_attributes(context, criterion, one=True)
def update_domain_attribute(self, context, domain_attribute):
return self._update(context, tables.domain_attributes,
domain_attribute,
exceptions.DuplicateDomainAttribute,
exceptions.DomainAttributeNotFound)
def update_zone_attribute(self, context, zone_attribute):
return self._update(context, tables.zone_attributes,
zone_attribute,
exceptions.DuplicateZoneAttribute,
exceptions.ZoneAttributeNotFound)
def delete_domain_attribute(self, context, domain_attribute_id):
domain_attribute = self._find_domain_attributes(
context, {'id': domain_attribute_id}, one=True)
deleted_domain_attribute = self._delete(
context, tables.domain_attributes, domain_attribute,
exceptions.DomainAttributeNotFound)
def delete_zone_attribute(self, context, zone_attribute_id):
zone_attribute = self._find_zone_attributes(
context, {'id': zone_attribute_id}, one=True)
deleted_zone_attribute = self._delete(
context, tables.zone_attributes, zone_attribute,
exceptions.ZoneAttributeNotFound)
return deleted_domain_attribute
return deleted_zone_attribute
# Domain master methods
def _find_domain_masters(self, context, criterion, one=False,
marker=None, limit=None, sort_key=None,
sort_dir=None):
# Zone master methods
def _find_zone_masters(self, context, criterion, one=False,
marker=None, limit=None, sort_key=None,
sort_dir=None):
criterion['key'] = 'master'
attribs = self._find(context, tables.domain_attributes,
objects.DomainAttribute,
objects.DomainAttributeList,
exceptions.DomainMasterNotFound,
attribs = self._find(context, tables.zone_attributes,
objects.ZoneAttribute,
objects.ZoneAttributeList,
exceptions.ZoneMasterNotFound,
criterion, one,
marker, limit, sort_key, sort_dir)
masters = objects.DomainMasterList()
masters = objects.ZoneMasterList()
for attrib in attribs:
masters.append(objects.DomainMaster().from_data(attrib.value))
masters.append(objects.ZoneMaster().from_data(attrib.value))
return masters
def create_domain_master(self, context, domain_id, domain_master):
def create_zone_master(self, context, zone_id, zone_master):
domain_attribute = objects.DomainAttribute()
domain_attribute.domain_id = domain_id
domain_attribute.key = 'master'
domain_attribute.value = domain_master.to_data()
zone_attribute = objects.ZoneAttribute()
zone_attribute.zone_id = zone_id
zone_attribute.key = 'master'
zone_attribute.value = zone_master.to_data()
return self._create(tables.domain_attributes, domain_attribute,
exceptions.DuplicateDomainAttribute)
return self._create(tables.zone_attributes, zone_attribute,
exceptions.DuplicateZoneAttribute)
def get_domain_masters(self, context, domain_attribute_id):
return self._find_domain_masters(
context, {'id': domain_attribute_id}, one=True)
def get_zone_masters(self, context, zone_attribute_id):
return self._find_zone_masters(
context, {'id': zone_attribute_id}, one=True)
def find_domain_masters(self, context, criterion=None, marker=None,
limit=None, sort_key=None, sort_dir=None):
return self._find_domain_masters(context, criterion, marker=marker,
limit=limit, sort_key=sort_key,
sort_dir=sort_dir)
def find_zone_masters(self, context, criterion=None, marker=None,
limit=None, sort_key=None, sort_dir=None):
return self._find_zone_masters(context, criterion, marker=marker,
limit=limit, sort_key=sort_key,
sort_dir=sort_dir)
def find_domain_master(self, context, criterion):
return self._find_domain_master(context, criterion, one=True)
def find_zone_master(self, context, criterion):
return self._find_zone_master(context, criterion, one=True)
def update_domain_master(self, context, domain_master):
def update_zone_master(self, context, zone_master):
domain_attribute = objects.DomainAttribute()
domain_attribute.domain_id = domain_master.domain_id
domain_attribute.key = 'master'
domain_attribute.value = domain_master.to_data()
zone_attribute = objects.ZoneAttribute()
zone_attribute.zone_id = zone_master.zone_id
zone_attribute.key = 'master'
zone_attribute.value = zone_master.to_data()
return self._update(context, tables.domain_attributes,
domain_attribute,
exceptions.DuplicateDomainAttribute,
exceptions.DomainAttributeNotFound)
return self._update(context, tables.zone_attributes,
zone_attribute,
exceptions.DuplicateZoneAttribute,
exceptions.ZoneAttributeNotFound)
def delete_domain_master(self, context, domain_master_id):
domain_attribute = self._find_domain_attributes(
context, {'id': domain_master_id}, one=True)
deleted_domain_attribute = self._delete(
context, tables.domain_attributes, domain_attribute,
exceptions.DomainAttributeNotFound)
def delete_zone_master(self, context, zone_master_id):
zone_attribute = self._find_zone_attributes(
context, {'id': zone_master_id}, one=True)
deleted_zone_attribute = self._delete(
context, tables.zone_attributes, zone_attribute,
exceptions.ZoneAttributeNotFound)
return deleted_domain_attribute
return deleted_zone_attribute
# RecordSet Methods
def _find_recordsets(self, context, criterion, one=False, marker=None,
@ -621,17 +621,17 @@ class SQLAlchemyStorage(sqlalchemy_base.SQLAlchemy, storage_base.Storage):
criterion = self._rname_check(criterion)
if criterion is not None \
and not criterion.get('domains_deleted', True):
# remove 'domains_deleted' from the criterion, as _apply_criterion
and not criterion.get('zones_deleted', True):
# remove 'zones_deleted' from the criterion, as _apply_criterion
# assumes each key in criterion to be a column name.
del criterion['domains_deleted']
del criterion['zones_deleted']
if one:
rjoin = tables.recordsets.join(
tables.domains,
tables.recordsets.c.domain_id == tables.domains.c.id)
tables.zones,
tables.recordsets.c.zone_id == tables.zones.c.id)
query = select([tables.recordsets]).select_from(rjoin).\
where(tables.domains.c.deleted == '0')
where(tables.zones.c.deleted == '0')
recordsets = self._find(
context, tables.recordsets, objects.RecordSet,
@ -678,12 +678,12 @@ class SQLAlchemyStorage(sqlalchemy_base.SQLAlchemy, storage_base.Storage):
return raw_rows
def create_recordset(self, context, domain_id, recordset):
# Fetch the domain as we need the tenant_id
domain = self._find_domains(context, {'id': domain_id}, one=True)
def create_recordset(self, context, zone_id, recordset):
# Fetch the zone as we need the tenant_id
zone = self._find_zones(context, {'id': zone_id}, one=True)
recordset.tenant_id = domain.tenant_id
recordset.domain_id = domain_id
recordset.tenant_id = zone.tenant_id
recordset.zone_id = zone_id
# Patch in the reverse_name column
extra_values = {"reverse_name": recordset.name[::-1]}
@ -697,7 +697,7 @@ class SQLAlchemyStorage(sqlalchemy_base.SQLAlchemy, storage_base.Storage):
# NOTE: Since we're dealing with a mutable object, the return
# value is not needed. The original item will be mutated
# in place on the input "recordset.records" list.
self.create_record(context, domain_id, recordset.id, record)
self.create_record(context, zone_id, recordset.id, record)
else:
recordset.records = objects.RecordList()
@ -777,7 +777,7 @@ class SQLAlchemyStorage(sqlalchemy_base.SQLAlchemy, storage_base.Storage):
# Create Records
for record in create_records:
self.create_record(
context, recordset.domain_id, recordset.id, record)
context, recordset.zone_id, recordset.id, record)
return recordset
@ -792,12 +792,12 @@ class SQLAlchemyStorage(sqlalchemy_base.SQLAlchemy, storage_base.Storage):
def count_recordsets(self, context, criterion=None):
# Ensure that we return only active recordsets
rjoin = tables.recordsets.join(
tables.domains,
tables.recordsets.c.domain_id == tables.domains.c.id)
tables.zones,
tables.recordsets.c.zone_id == tables.zones.c.id)
query = select([func.count(tables.recordsets.c.id)]).\
select_from(rjoin).\
where(tables.domains.c.deleted == '0')
where(tables.zones.c.deleted == '0')
query = self._apply_criterion(tables.recordsets, query, criterion)
query = self._apply_tenant_criteria(context, tables.recordsets, query)
@ -829,12 +829,12 @@ class SQLAlchemyStorage(sqlalchemy_base.SQLAlchemy, storage_base.Storage):
return md5.hexdigest()
def create_record(self, context, domain_id, recordset_id, record):
# Fetch the domain as we need the tenant_id
domain = self._find_domains(context, {'id': domain_id}, one=True)
def create_record(self, context, zone_id, recordset_id, record):
# Fetch the zone as we need the tenant_id
zone = self._find_zones(context, {'id': zone_id}, one=True)
record.tenant_id = domain.tenant_id
record.domain_id = domain_id
record.tenant_id = zone.tenant_id
record.zone_id = zone_id
record.recordset_id = recordset_id
record.hash = self._recalculate_record_hash(record)
@ -870,12 +870,12 @@ class SQLAlchemyStorage(sqlalchemy_base.SQLAlchemy, storage_base.Storage):
def count_records(self, context, criterion=None):
# Ensure that we return only active records
rjoin = tables.records.join(
tables.domains,
tables.records.c.domain_id == tables.domains.c.id)
tables.zones,
tables.records.c.zone_id == tables.zones.c.id)
query = select([func.count(tables.records.c.id)]).\
select_from(rjoin).\
where(tables.domains.c.deleted == '0')
where(tables.zones.c.deleted == '0')
query = self._apply_criterion(tables.records, query, criterion)
query = self._apply_tenant_criteria(context, tables.records, query)
@ -1180,11 +1180,11 @@ class SQLAlchemyStorage(sqlalchemy_base.SQLAlchemy, storage_base.Storage):
table = tables.zone_transfer_requests
ljoin = tables.zone_transfer_requests.join(
tables.domains,
tables.zone_transfer_requests.c.domain_id == tables.domains.c.id)
tables.zones,
tables.zone_transfer_requests.c.zone_id == tables.zones.c.id)
query = select(
[table, tables.domains.c.name.label("domain_name")]
[table, tables.zones.c.name.label("zone_name")]
).select_from(ljoin)
if not context.all_tenants:
@ -1204,7 +1204,7 @@ class SQLAlchemyStorage(sqlalchemy_base.SQLAlchemy, storage_base.Storage):
def create_zone_transfer_request(self, context, zone_transfer_request):
try:
criterion = {"domain_id": zone_transfer_request.domain_id,
criterion = {"zone_id": zone_transfer_request.zone_id,
"status": "ACTIVE"}
self.find_zone_transfer_request(
context, criterion)
@ -1239,7 +1239,7 @@ class SQLAlchemyStorage(sqlalchemy_base.SQLAlchemy, storage_base.Storage):
def update_zone_transfer_request(self, context, zone_transfer_request):
zone_transfer_request.obj_reset_changes(('domain_name'))
zone_transfer_request.obj_reset_changes(('zone_name'))
updated_zt_request = self._update(
context,
@ -1247,7 +1247,7 @@ class SQLAlchemyStorage(sqlalchemy_base.SQLAlchemy, storage_base.Storage):
zone_transfer_request,
exceptions.DuplicateZoneTransferRequest,
exceptions.ZoneTransferRequestNotFound,
skip_values=['domain_name'])
skip_values=['zone_name'])
return updated_zt_request

View File

@ -0,0 +1,139 @@
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Author: Graham Hayes <graham.hayes@hpe.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# See https://blueprints.launchpad.net/nova/+spec/backportable-db-migrations
# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
from sqlalchemy.schema import MetaData, Table, Index
from migrate.changeset.constraint import UniqueConstraint, \
ForeignKeyConstraint, PathNotFoundError
# This migration removes all references to domain from our Database.
# We rename the domains and domain_attribute tables, and rename any columns
# that had "domain" in the name.as
# There is a follow on patch to recreate the FKs for the newly renamed
# tables as the lib we use doesn't seem to like creating FKs on renamed
# tables until after the migration is complete.
meta = MetaData()
def index_exists(index):
table = index[1]._get_table()
cols = sorted([str(x).split('.')[1] for x in index[1:]])
for idx in table.indexes:
if sorted(idx.columns.keys()) == cols:
return True
return False
def drop_index(index):
if index_exists(index):
index = Index(*index)
index.drop()
def drop_foreign_key(fk_def):
table = fk_def[0]._get_table()
col = fk_def[0]
ref_col = fk_def[1]
# Use .copy() to avoid the set changing during the for operation
for fk in table.foreign_keys.copy():
# Check if the fk is the one we want
if fk.column == col and fk.parent == ref_col:
fkc = ForeignKeyConstraint([fk.column], [fk.parent],
name=fk.constraint.name)
fkc.drop()
# Check if the fk is the one we want (sometimes it seems the parent
# / col is switched
if fk.parent == col and fk.column == ref_col:
fkc = ForeignKeyConstraint([fk.parent], [fk.column],
name=fk.constraint.name)
fkc.drop()
def drop_unique_constraint(uc_def):
uc = UniqueConstraint(*uc_def[2], table=uc_def[0], name=uc_def[1])
try:
uc.drop()
except PathNotFoundError:
pass
def upgrade(migrate_engine):
meta.bind = migrate_engine
# Get all the tables
domains_table = Table('domains', meta, autoload=True)
domain_attrib_table = Table('domain_attributes', meta, autoload=True)
recordsets_table = Table('recordsets', meta, autoload=True)
records_table = Table('records', meta, autoload=True)
ztr_table = Table('zone_transfer_requests', meta, autoload=True)
zta_table = Table('zone_transfer_accepts', meta, autoload=True)
zt_table = Table('zone_tasks', meta, autoload=True)
# Remove the affected FKs
# Define FKs
fks = [
[domains_table.c.id, domains_table.c.parent_domain_id],
[domain_attrib_table.c.domain_id,
domains_table.c.id],
[recordsets_table.c.domain_id, domains_table.c.id],
[records_table.c.domain_id, domains_table.c.id],
[ztr_table.c.domain_id, domains_table.c.id],
[zta_table.c.domain_id, domains_table.c.id]
]
# Drop FKs
for fk in fks:
drop_foreign_key(fk)
# Change the table structures
# Domains Table changes
domains_table.c.parent_domain_id.alter(name='parent_zone_id')
domains_table.rename('zones')
# Domain Attributes
domain_attrib_table.c.domain_id.alter(name='zone_id')
domain_attrib_table.rename('zone_attributes')
# Recordsets
recordsets_table.c.domain_id.alter(name='zone_id')
recordsets_table.c.domain_shard.alter(name='zone_shard')
# Records
records_table.c.domain_id.alter(name="zone_id")
records_table.c.domain_shard.alter(name="zone_shard")
# Zone Transfer Requests
ztr_table.c.domain_id.alter(name='zone_id')
# Zone Transfer Requests
zta_table.c.domain_id.alter(name='zone_id')
# Zone Tasks
zt_table.c.domain_id.alter(name='zone_id')
def downgrade(migration_engine):
pass

View File

@ -0,0 +1,66 @@
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Author: Graham Hayes <graham.hayes@hpe.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# See https://blueprints.launchpad.net/nova/+spec/backportable-db-migrations
# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
from migrate.changeset.constraint import ForeignKeyConstraint
from sqlalchemy.schema import MetaData, Table
# This migration adds back the FKs removed in migration 80, as sqlalchemy
# migrate seems to need to wait to add FKs to renamed tables.
meta = MetaData()
def upgrade(migrate_engine):
meta.bind = migrate_engine
# Get all the tables
zones_table = Table('zones', meta, autoload=True)
zone_attrib_table = Table('zone_attributes', meta, autoload=True)
recordsets_table = Table('recordsets', meta, autoload=True)
records_table = Table('records', meta, autoload=True)
ztr_table = Table('zone_transfer_requests', meta, autoload=True)
zta_table = Table('zone_transfer_accepts', meta, autoload=True)
zt_table = Table('zone_tasks', meta, autoload=True)
# Create new FKs
fks = []
fks.append(ForeignKeyConstraint([zones_table.c.parent_zone_id],
[zones_table.c.id], ondelete='SET NULL'))
fks.append(ForeignKeyConstraint([zone_attrib_table.c.zone_id],
[zones_table.c.id], ondelete='CASCADE'))
fks.append(ForeignKeyConstraint([recordsets_table.c.zone_id],
[zones_table.c.id], ondelete='CASCADE'))
fks.append(ForeignKeyConstraint([records_table.c.zone_id],
[zones_table.c.id], ondelete='CASCADE'))
fks.append(ForeignKeyConstraint([ztr_table.c.zone_id],
[zones_table.c.id], ondelete='CASCADE'))
fks.append(ForeignKeyConstraint([zta_table.c.zone_id],
[zones_table.c.id], ondelete='CASCADE'))
fks.append(ForeignKeyConstraint([zt_table.c.zone_id],
[zones_table.c.id], ondelete='CASCADE'))
for fk in fks:
fk.create()
def downgrade(migration_engine):
pass

View File

@ -76,7 +76,7 @@ tlds = Table('tlds', metadata,
mysql_charset='utf8',
)
domains = Table('domains', metadata,
zones = Table('zones', metadata,
Column('id', UUID, default=utils.generate_uuid, primary_key=True),
Column('version', Integer(), default=1, nullable=False),
Column('created_at', DateTime, default=lambda: timeutils.utcnow()),
@ -104,22 +104,22 @@ domains = Table('domains', metadata,
nullable=False),
Column('status', Enum(name='resource_statuses', *RESOURCE_STATUSES),
nullable=False, server_default='PENDING', default='PENDING'),
Column('parent_domain_id', UUID, default=None, nullable=True),
Column('parent_zone_id', UUID, default=None, nullable=True),
Column('action', Enum(name='actions', *ACTIONS),
default='CREATE', server_default='CREATE', nullable=False),
Column('pool_id', UUID, default=None, nullable=True),
Column('reverse_name', String(255), nullable=False),
UniqueConstraint('name', 'deleted', 'pool_id', name='unique_domain_name'),
ForeignKeyConstraint(['parent_domain_id'],
['domains.id'],
UniqueConstraint('name', 'deleted', 'pool_id', name='unique_zone_name'),
ForeignKeyConstraint(['parent_zone_id'],
['zones.id'],
ondelete='SET NULL'),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
domain_attributes = Table('domain_attributes', metadata,
zone_attributes = Table('zone_attributes', metadata,
Column('id', UUID(), default=utils.generate_uuid, primary_key=True),
Column('version', Integer(), default=1, nullable=False),
Column('created_at', DateTime, default=lambda: timeutils.utcnow()),
@ -127,10 +127,10 @@ domain_attributes = Table('domain_attributes', metadata,
Column('key', Enum(name='key', *ZONE_ATTRIBUTE_KEYS)),
Column('value', String(255), nullable=False),
Column('domain_id', UUID(), nullable=False),
Column('zone_id', UUID(), nullable=False),
UniqueConstraint('key', 'value', 'domain_id', name='unique_attributes'),
ForeignKeyConstraint(['domain_id'], ['domains.id'], ondelete='CASCADE'),
UniqueConstraint('key', 'value', 'zone_id', name='unique_attributes'),
ForeignKeyConstraint(['zone_id'], ['zones.id'], ondelete='CASCADE'),
mysql_engine='INNODB',
mysql_charset='utf8'
@ -141,19 +141,19 @@ recordsets = Table('recordsets', metadata,
Column('version', Integer(), default=1, nullable=False),
Column('created_at', DateTime, default=lambda: timeutils.utcnow()),
Column('updated_at', DateTime, onupdate=lambda: timeutils.utcnow()),
Column('domain_shard', SmallInteger(), nullable=False,
default=lambda ctxt: default_shard(ctxt, 'domain_id')),
Column('zone_shard', SmallInteger(), nullable=False,
default=lambda ctxt: default_shard(ctxt, 'zone_id')),
Column('tenant_id', String(36), default=None, nullable=True),
Column('domain_id', UUID, nullable=False),
Column('zone_id', UUID, nullable=False),
Column('name', String(255), nullable=False),
Column('type', Enum(name='record_types', *RECORD_TYPES), nullable=False),
Column('ttl', Integer, default=None, nullable=True),
Column('description', Unicode(160), nullable=True),
Column('reverse_name', String(255), nullable=False, default=''),
UniqueConstraint('domain_id', 'name', 'type', name='unique_recordset'),
ForeignKeyConstraint(['domain_id'], ['domains.id'], ondelete='CASCADE'),
UniqueConstraint('zone_id', 'name', 'type', name='unique_recordset'),
ForeignKeyConstraint(['zone_id'], ['zones.id'], ondelete='CASCADE'),
mysql_engine='InnoDB',
mysql_charset='utf8',
@ -164,11 +164,11 @@ records = Table('records', metadata,
Column('version', Integer(), default=1, nullable=False),
Column('created_at', DateTime, default=lambda: timeutils.utcnow()),
Column('updated_at', DateTime, onupdate=lambda: timeutils.utcnow()),
Column('domain_shard', SmallInteger(), nullable=False,
default=lambda ctxt: default_shard(ctxt, 'domain_id')),
Column('zone_shard', SmallInteger(), nullable=False,
default=lambda ctxt: default_shard(ctxt, 'zone_id')),
Column('tenant_id', String(36), default=None, nullable=True),
Column('domain_id', UUID, nullable=False),
Column('zone_id', UUID, nullable=False),
Column('recordset_id', UUID, nullable=False),
Column('data', Text, nullable=False),
Column('description', Unicode(160), nullable=True),
@ -189,7 +189,7 @@ records = Table('records', metadata,
Column('serial', Integer(), server_default='1', nullable=False),
UniqueConstraint('hash', name='unique_record'),
ForeignKeyConstraint(['domain_id'], ['domains.id'], ondelete='CASCADE'),
ForeignKeyConstraint(['zone_id'], ['zones.id'], ondelete='CASCADE'),
ForeignKeyConstraint(['recordset_id'], ['recordsets.id'],
ondelete='CASCADE'),
@ -282,7 +282,7 @@ zone_transfer_requests = Table('zone_transfer_requests', metadata,
Column('created_at', DateTime, default=lambda: timeutils.utcnow()),
Column('updated_at', DateTime, onupdate=lambda: timeutils.utcnow()),
Column('domain_id', UUID, nullable=False),
Column('zone_id', UUID, nullable=False),
Column("key", String(255), nullable=False),
Column("description", String(255), nullable=False),
Column("tenant_id", String(36), default=None, nullable=False),
@ -291,7 +291,7 @@ zone_transfer_requests = Table('zone_transfer_requests', metadata,
nullable=False, server_default='ACTIVE',
default='ACTIVE'),
ForeignKeyConstraint(['domain_id'], ['domains.id'], ondelete='CASCADE'),
ForeignKeyConstraint(['zone_id'], ['zones.id'], ondelete='CASCADE'),
mysql_engine='InnoDB',
mysql_charset='utf8',
@ -303,14 +303,14 @@ zone_transfer_accepts = Table('zone_transfer_accepts', metadata,
Column('created_at', DateTime, default=lambda: timeutils.utcnow()),
Column('updated_at', DateTime, onupdate=lambda: timeutils.utcnow()),
Column('domain_id', UUID, nullable=False),
Column('zone_id', UUID, nullable=False),
Column('zone_transfer_request_id', UUID, nullable=False),
Column("tenant_id", String(36), default=None, nullable=False),
Column("status", Enum(name='resource_statuses', *TASK_STATUSES),
nullable=False, server_default='ACTIVE',
default='ACTIVE'),
ForeignKeyConstraint(['domain_id'], ['domains.id'], ondelete='CASCADE'),
ForeignKeyConstraint(['zone_id'], ['zones.id'], ondelete='CASCADE'),
ForeignKeyConstraint(
['zone_transfer_request_id'],
['zone_transfer_requests.id'],
@ -327,7 +327,7 @@ zone_tasks = Table('zone_tasks', metadata,
Column('version', Integer(), default=1, nullable=False),
Column('tenant_id', String(36), default=None, nullable=True),
Column('domain_id', UUID(), nullable=True),
Column('zone_id', UUID(), nullable=True),
Column('task_type', Enum(name='task_types', *ZONE_TASK_TYPES),
nullable=True),
Column('message', String(160), nullable=True),

View File

@ -62,7 +62,7 @@ class TestTimeoutError(Exception):
class TestCase(base.BaseTestCase):
quota_fixtures = [{
'resource': 'domains',
'resource': 'zones',
'hard_limit': 5,
}, {
'resource': 'records',
@ -108,8 +108,8 @@ class TestCase(base.BaseTestCase):
'resource_id': '7fbb6304-5e74-4691-bd80-cef3cff5fe2f',
}]
# The last domain is invalid
domain_fixtures = {
# The last zone is invalid
zone_fixtures = {
'PRIMARY': [
{
'name': 'example.com.',
@ -179,8 +179,8 @@ class TestCase(base.BaseTestCase):
{'data': '10 1 5060 server2.example.org.'},
],
'CNAME': [
{'data': 'www.somedomain.org.'},
{'data': 'www.someotherdomain.com.'},
{'data': 'www.somezone.org.'},
{'data': 'www.someotherzone.com.'},
]
}
@ -247,17 +247,17 @@ class TestCase(base.BaseTestCase):
zone_import_fixtures = [{
'status': 'PENDING',
'domain_id': None,
'zone_id': None,
'message': None,
'task_type': 'IMPORT'
}, {
'status': 'ERROR',
'domain_id': None,
'zone_id': None,
'message': None,
'task_type': 'IMPORT'
}, {
'status': 'COMPLETE',
'domain_id': '6ca6baef-3305-4ad0-a52b-a82df5752b62',
'zone_id': '6ca6baef-3305-4ad0-a52b-a82df5752b62',
'message': None,
'task_type': 'IMPORT'
}]
@ -431,16 +431,16 @@ class TestCase(base.BaseTestCase):
_values.update(values)
return _values
def get_domain_fixture(self, domain_type=None, fixture=0, values=None):
domain_type = domain_type or 'PRIMARY'
def get_zone_fixture(self, zone_type=None, fixture=0, values=None):
zone_type = zone_type or 'PRIMARY'
_values = copy.copy(self.domain_fixtures[domain_type][fixture])
_values = copy.copy(self.zone_fixtures[zone_type][fixture])
if values:
_values.update(values)
return _values
def get_recordset_fixture(self, domain_name, type='A', fixture=0,
def get_recordset_fixture(self, zone_name, type='A', fixture=0,
values=None):
values = values or {}
@ -448,7 +448,7 @@ class TestCase(base.BaseTestCase):
_values.update(values)
try:
_values['name'] = _values['name'] % domain_name
_values['name'] = _values['name'] % zone_name
except TypeError:
pass
@ -569,34 +569,34 @@ class TestCase(base.BaseTestCase):
return self.central_service.create_tsigkey(
context, objects.TsigKey.from_dict(values))
def create_domain(self, **kwargs):
def create_zone(self, **kwargs):
context = kwargs.pop('context', self.admin_context)
fixture = kwargs.pop('fixture', 0)
domain_type = kwargs.pop('type', None)
zone_type = kwargs.pop('type', None)
values = self.get_domain_fixture(domain_type=domain_type,
fixture=fixture, values=kwargs)
values = self.get_zone_fixture(zone_type=zone_type,
fixture=fixture, values=kwargs)
if 'tenant_id' not in values:
values['tenant_id'] = context.tenant
return self.central_service.create_domain(
context, objects.Domain.from_dict(values))
return self.central_service.create_zone(
context, objects.Zone.from_dict(values))
def create_recordset(self, domain, type='A', increment_serial=True,
def create_recordset(self, zone, type='A', increment_serial=True,
**kwargs):
context = kwargs.pop('context', self.admin_context)
fixture = kwargs.pop('fixture', 0)
values = self.get_recordset_fixture(domain['name'], type=type,
values = self.get_recordset_fixture(zone['name'], type=type,
fixture=fixture,
values=kwargs)
return self.central_service.create_recordset(
context, domain['id'], objects.RecordSet.from_dict(values),
context, zone['id'], objects.RecordSet.from_dict(values),
increment_serial=increment_serial)
def create_record(self, domain, recordset, increment_serial=True,
def create_record(self, zone, recordset, increment_serial=True,
**kwargs):
context = kwargs.pop('context', self.admin_context)
fixture = kwargs.pop('fixture', 0)
@ -605,7 +605,7 @@ class TestCase(base.BaseTestCase):
values=kwargs)
return self.central_service.create_record(
context, domain['id'], recordset['id'],
context, zone['id'], recordset['id'],
objects.Record.from_dict(values),
increment_serial=increment_serial)
@ -645,15 +645,15 @@ class TestCase(base.BaseTestCase):
context, default_pool_id,
objects.PoolAttribute.from_dict(values))
def create_zone_transfer_request(self, domain, **kwargs):
def create_zone_transfer_request(self, zone, **kwargs):
context = kwargs.pop('context', self.admin_context)
fixture = kwargs.pop('fixture', 0)
values = self.get_zone_transfer_request_fixture(
fixture=fixture, values=kwargs)
if 'domain_id' not in values:
values['domain_id'] = domain.id
if 'zone_id' not in values:
values['zone_id'] = zone.id
return self.central_service.create_zone_transfer_request(
context, objects.ZoneTransferRequest.from_dict(values))
@ -669,8 +669,8 @@ class TestCase(base.BaseTestCase):
if 'zone_transfer_request_id' not in values:
values['zone_transfer_request_id'] = zone_transfer_request.id
if 'domain_id' not in values:
values['domain_id'] = zone_transfer_request.domain_id
if 'zone_id' not in values:
values['zone_id'] = zone_transfer_request.zone_id
if 'key' not in values:
values['key'] = zone_transfer_request.key

View File

@ -37,28 +37,28 @@ class Bind9AgentBackendTestCase(TestCase, BackendTestMixin):
self.backend.agent_service.stop()
self.backend.stop()
def test_find_domain_serial(self):
self.backend.find_domain_serial('example.org.')
def test_find_zone_serial(self):
self.backend.find_zone_serial('example.org.')
@mock.patch('designate.utils.execute')
@mock.patch(('designate.backend.agent_backend.impl_bind9.Bind9Backend'
'._sync_domain'))
def test_create_domain(self, execute, sync):
domain = self._create_dnspy_zone('example.org')
self.backend.create_domain(domain)
'._sync_zone'))
def test_create_zone(self, execute, sync):
zone = self._create_dnspy_zone('example.org')
self.backend.create_zone(zone)
@mock.patch('designate.utils.execute')
@mock.patch(('designate.backend.agent_backend.impl_bind9.Bind9Backend'
'._sync_domain'))
def test_update_domain(self, execute, sync):
domain = self._create_dnspy_zone('example.org')
self.backend.update_domain(domain)
'._sync_zone'))
def test_update_zone(self, execute, sync):
zone = self._create_dnspy_zone('example.org')
self.backend.update_zone(zone)
@mock.patch('designate.utils.execute')
@mock.patch(('designate.backend.agent_backend.impl_bind9.Bind9Backend'
'._sync_domain'))
def test_delete_domain(self, execute, sync):
self.backend.delete_domain('example.org.')
'._sync_zone'))
def test_delete_zone(self, execute, sync):
self.backend.delete_zone('example.org.')
# Helper
def _create_dnspy_zone(self, name):

View File

@ -40,8 +40,8 @@ class DenominatorAgentBackendTestCase(TestCase, BackendTestMixin):
@mock.patch('designate.utils.execute', return_value=(
'example.org SOA 86400 ns1.designate.com. '
'hostmaster@example.org. 475 3600 600 604800 1800', None))
def test_find_domain_serial(self, execute):
serial = self.backend.find_domain_serial('example.org.')
def test_find_zone_serial(self, execute):
serial = self.backend.find_zone_serial('example.org.')
# Ensure returned right serial number
self.assertEqual(475, serial)
@ -51,22 +51,22 @@ class DenominatorAgentBackendTestCase(TestCase, BackendTestMixin):
self.assertIn('get', execute.call_args[0])
@mock.patch('designate.utils.execute', return_value=('', None))
def test_find_domain_serial_fail(self, execute):
serial = self.backend.find_domain_serial('example.org.')
def test_find_zone_serial_fail(self, execute):
serial = self.backend.find_zone_serial('example.org.')
self.assertIsNone(serial)
@mock.patch('designate.utils.execute', return_value=(None, None))
def test_create_domain(self, execute):
domain = self._create_dnspy_zone('example.org.')
self.backend.create_domain(domain)
def test_create_zone(self, execute):
zone = self._create_dnspy_zone('example.org.')
self.backend.create_zone(zone)
# Ensure denominator called for each record (except SOA)
# plus one to update zone data
self.assertEqual(len(list(domain.iterate_rdatas())),
self.assertEqual(len(list(zone.iterate_rdatas())),
execute.call_count)
@mock.patch('designate.utils.execute')
def test_update_domain(self, execute):
def test_update_zone(self, execute):
# Output from 'designate record list' command
records = ('example.org SOA 86400 ns1.designate.com. '
'hostmaster@example.org. 475 3600 600 604800 1800\n'
@ -74,12 +74,12 @@ class DenominatorAgentBackendTestCase(TestCase, BackendTestMixin):
'example.org NS 86400 ns2.designator.net.\n'
'example.org MX 86400 10 mx1.designator.net.')
# That should force update_domain to delete A and AAAA records
# That should force update_zone to delete A and AAAA records
# from the zone and create a new MX record.
execute.return_value = (records, None)
domain = self._create_dnspy_zone('example.org.')
self.backend.update_domain(domain)
zone = self._create_dnspy_zone('example.org.')
self.backend.update_zone(zone)
# Ensure denominator called to:
# *update zone info
@ -99,7 +99,7 @@ class DenominatorAgentBackendTestCase(TestCase, BackendTestMixin):
setattr(self.backend.denominator, method, mock.Mock(
return_value=records))
self.backend.update_domain(domain)
self.backend.update_zone(zone)
self.assertEqual(1, self.backend.denominator.update_zone.call_count)
self.assertEqual(1, self.backend.denominator.get_records.call_count)
self.assertEqual(4, self.backend.denominator.create_record.call_count)
@ -107,8 +107,8 @@ class DenominatorAgentBackendTestCase(TestCase, BackendTestMixin):
self.assertEqual(1, self.backend.denominator.delete_record.call_count)
@mock.patch('designate.utils.execute', return_value=(None, None))
def test_delete_domain(self, execute):
self.backend.delete_domain('example.org.')
def test_delete_zone(self, execute):
self.backend.delete_zone('example.org.')
# Ensure called 'denominator zone delete'
self.assertEqual(1, execute.call_count)

View File

@ -36,19 +36,19 @@ class FakeAgentBackendTestCase(TestCase, BackendTestMixin):
self.backend.agent_service.stop()
self.backend.stop()
def test_find_domain_serial(self):
self.backend.find_domain_serial('example.org.')
def test_find_zone_serial(self):
self.backend.find_zone_serial('example.org.')
def test_create_domain(self):
domain = self._create_dnspy_zone('example.org')
self.backend.create_domain(domain)
def test_create_zone(self):
zone = self._create_dnspy_zone('example.org')
self.backend.create_zone(zone)
def test_update_domain(self):
domain = self._create_dnspy_zone('example.org')
self.backend.update_domain(domain)
def test_update_zone(self):
zone = self._create_dnspy_zone('example.org')
self.backend.update_zone(zone)
def test_delete_domain(self):
self.backend.delete_domain('example.org.')
def test_delete_zone(self):
self.backend.delete_zone('example.org.')
# Helper
def _create_dnspy_zone(self, name):

View File

@ -106,7 +106,7 @@ class AgentRequestHandlerTest(AgentTestCase):
request.environ = {'addr': ["0.0.0.0", 1234]}
with mock.patch.object(
designate.backend.agent_backend.impl_fake.FakeBackend,
'find_domain_serial', return_value=None):
'find_zone_serial', return_value=None):
response = next(self.handler(request)).to_wire()
self.assertEqual(expected_response, binascii.b2a_hex(response))
@ -205,7 +205,7 @@ class AgentRequestHandlerTest(AgentTestCase):
request.environ = {'addr': ["0.0.0.0", 1234]}
with mock.patch.object(
designate.backend.agent_backend.impl_fake.FakeBackend,
'find_domain_serial', return_value=None):
'find_zone_serial', return_value=None):
response = next(self.handler(request)).to_wire()
doaxfr.assert_called_with('example.com.', [], source="1.2.3.4")
self.assertEqual(expected_response, binascii.b2a_hex(response))

View File

@ -46,8 +46,8 @@ class AdminApiQuotasTest(AdminApiTestCase):
max_zones = response.json['quota']['zones']
max_zone_records = response.json['quota']['zone_records']
self.assertEqual(cfg.CONF.quota_domains, max_zones)
self.assertEqual(cfg.CONF.quota_domain_records, max_zone_records)
self.assertEqual(cfg.CONF.quota_zones, max_zones)
self.assertEqual(cfg.CONF.quota_zone_records, max_zone_records)
def test_patch_quotas(self):
self.policy({'set_quotas': '@'})

View File

@ -45,10 +45,10 @@ class AdminApiReportsTest(AdminApiTestCase):
self.assertEqual(0, response.json['counts'][0]['records'])
self.assertEqual(0, response.json['counts'][0]['tenants'])
# Add a domain and check the counts
self.create_domain()
# Add a zone and check the counts
self.create_zone()
response = self.client.get('/reports/counts')
# Should be one domain
# Should be one zone
self.assertEqual(1, response.json['counts'][0]['zones'])
# Should be 1 NS and 1 SOA records
self.assertEqual(2, response.json['counts'][0]['records'])
@ -56,7 +56,7 @@ class AdminApiReportsTest(AdminApiTestCase):
self.assertEqual(1, response.json['counts'][0]['tenants'])
def test_get_counts_zones(self):
self.policy({'count_domains': '@'})
self.policy({'count_zones': '@'})
response = self.client.get('/reports/counts/zones')
self.assertEqual(200, response.status_int)
@ -67,9 +67,9 @@ class AdminApiReportsTest(AdminApiTestCase):
self.assertEqual(0, response.json['counts'][0]['zones'])
# Create 2 domains
self.create_domain(fixture=0)
self.create_domain(fixture=1)
# Create 2 zones
self.create_zone(fixture=0)
self.create_zone(fixture=1)
response = self.client.get('/reports/counts/zones')
@ -87,8 +87,8 @@ class AdminApiReportsTest(AdminApiTestCase):
self.assertEqual(0, response.json['counts'][0]['records'])
# Create a domain
self.create_domain()
# Create a zone
self.create_zone()
response = self.client.get('/reports/counts/records')
@ -107,8 +107,8 @@ class AdminApiReportsTest(AdminApiTestCase):
self.assertEqual(0, response.json['counts'][0]['tenants'])
# Create a domain
self.create_domain()
# Create a zone
self.create_zone()
response = self.client.get('/reports/counts/tenants')
@ -117,7 +117,7 @@ class AdminApiReportsTest(AdminApiTestCase):
def test_get_tenants(self):
self.policy({'find_tenants': '@'})
self.create_domain()
self.create_zone()
response = self.client.get('/reports/tenants')
@ -131,8 +131,8 @@ class AdminApiReportsTest(AdminApiTestCase):
def test_get_tenant(self):
self.policy({'find_tenants': '@'})
domain = self.create_domain()
tenant = domain.tenant_id
zone = self.create_zone()
tenant = zone.tenant_id
response = self.client.get('/reports/tenants/%s' % tenant)
self.assertEqual(200, response.status_int)

View File

@ -230,7 +230,7 @@ class FaultMiddlewareTest(ApiTestCase):
class RaisingRequest(FakeRequest):
def get_response(self, request):
raise exceptions.DuplicateDomain()
raise exceptions.DuplicateZone()
request = RaisingRequest()
ctxt = context.DesignateContext()

Some files were not shown because too many files have changed in this diff Show More