Move rec.priority into rec.data

From V2 priority is stored in the data field of the record, this means that V1
code has errors when creating records that can lead to bad data at places.

This change moves the record's priority into the data instead of a sept.
column.

Change-Id: I8733f7767dcbe0263c2c54861ed892adf0251175
This commit is contained in:
Endre Karlson 2014-10-28 11:01:41 +01:00
parent bd5aae223f
commit 67cf6b7b6f
18 changed files with 166 additions and 75 deletions

View File

@ -19,6 +19,7 @@ from designate.openstack.common import log as logging
from designate import exceptions
from designate import objects
from designate import schema
from designate import utils
from designate.api import get_central_api
@ -53,8 +54,12 @@ def _find_or_create_recordset(context, domain_id, name, type, ttl):
def _extract_record_values(values):
record_values = ('data', 'priority', 'description',)
return dict((k, values[k]) for k in record_values if k in values)
record_values = dict((k, values[k]) for k in ('data', 'description',)
if k in values)
if 'priority' in values:
record_values['data'] = '%d %s' % (
values['priority'], record_values['data'])
return record_values
def _extract_recordset_values(values):
@ -64,6 +69,10 @@ def _extract_recordset_values(values):
def _format_record_v1(record, recordset):
record = dict(record)
record['priority'], record['data'] = utils.extract_priority_from_data(
recordset.type, record)
record.update({
'name': recordset['name'],
'type': recordset['type'],

View File

@ -32,7 +32,7 @@ class RecordSetsController(rest.RestController):
_resource_schema = schema.Schema('v2', 'recordset')
_collection_schema = schema.Schema('v2', 'recordsets')
SORT_KEYS = ['created_at', 'id', 'updated_at', 'domain_id', 'tenant_id',
'name', 'type', 'ttl', 'records', 'priority']
'name', 'type', 'ttl', 'records']
@pecan.expose(template='json:', content_type='application/json')
@utils.validate_uuid('zone_id', 'recordset_id')

View File

@ -90,7 +90,6 @@ class ZonesController(rest.RestController):
'name': recordset['name'],
'type': recordset['type'],
'ttl': recordset['ttl'],
'priority': record['priority'],
'data': record['data'],
})
@ -271,14 +270,12 @@ class ZonesController(rest.RestController):
def _record2json(self, record_type, rdata):
if record_type == 'MX':
return {
'data': rdata.exchange.to_text(),
'priority': rdata.preference
'data': '%d %s' % (rdata.preference, rdata.exchange.to_text()),
}
elif record_type == 'SRV':
return {
'data': '%d %d %s' % (rdata.weight, rdata.port,
rdata.target.to_text()),
'priority': rdata.priority
'data': '%d %d %d %s' % (rdata.priority, rdata.weight,
rdata.port, rdata.target.to_text()),
}
else:
return {

View File

@ -177,7 +177,6 @@ class Bind9Backend(base.Backend):
'name': recordset['name'],
'type': recordset['type'],
'ttl': recordset['ttl'],
'priority': record['priority'],
'data': record['data'],
})

View File

@ -91,10 +91,10 @@ domain2ipa = {'ttl': 'dnsttl', 'email': 'idnssoarname',
# map of designate record types to ipa
rectype2iparectype = {'A': ('arecord', '%(data)s'),
'AAAA': ('aaaarecord', '%(data)s'),
'MX': ('mxrecord', '%(priority)d %(data)s'),
'MX': ('mxrecord', '%(data)s'),
'CNAME': ('cnamerecord', '%(data)s'),
'TXT': ('txtrecord', '%(data)s'),
'SRV': ('srvrecord', '%(priority)d %(data)s'),
'SRV': ('srvrecord', '%(data)s'),
'NS': ('nsrecord', '%(data)s'),
'PTR': ('ptrrecord', '%(data)s'),
'SPF': ('spfrecord', '%(data)s'),

View File

@ -26,6 +26,7 @@ from sqlalchemy.sql import select
from designate.openstack.common import log as logging
from designate.i18n import _LC
from designate import exceptions
from designate import utils
from designate.backend import base
from designate.backend.impl_powerdns import tables
from designate.sqlalchemy import session
@ -352,7 +353,11 @@ class PowerDNSBackend(base.Backend):
exceptions.DomainNotFound,
id_col=tables.domains.c.designate_id)
content = self._sanitize_content(recordset['type'], record['data'])
# Priority is stored in the data field for MX / SRV
priority, data = utils.extract_priority_from_data(
recordset['type'], record)
content = self._sanitize_content(recordset['type'], data)
ttl = domain['ttl'] if recordset['ttl'] is None else recordset['ttl']
record_values = {
@ -364,7 +369,7 @@ class PowerDNSBackend(base.Backend):
'content': content,
'ttl': ttl,
'inherit_ttl': True if recordset['ttl'] is None else False,
'prio': record['priority'],
'prio': priority,
'auth': self._is_authoritative(domain, recordset, record)
}

View File

@ -306,30 +306,6 @@ class Service(service.RPCService):
return domain
# Methods to handle priority
def _get_priority(self, recordset):
if recordset.type != "MX" and recordset.type != "SRV":
return recordset
else:
if recordset.records is not None:
for r in recordset.records:
r.data = str(r.priority) + " " + r.data
return recordset
def _set_priority(self, recordset):
if recordset.type != "MX" and recordset.type != "SRV":
return recordset
else:
if recordset.records is not None:
for r in recordset.records:
head, sep, tail = r.data.partition(" ")
if sep:
r.priority = head
r.data = tail
return recordset
# SOA Recordset Methods
def _build_soa_record(self, zone, servers):
return "%s %s. %d %d %d %d %d" % (servers[0]['name'],
@ -985,9 +961,6 @@ class Service(service.RPCService):
self._is_valid_recordset_placement_subdomain(
context, domain, recordset.name)
# Extract the priority from the records
recordset = self._set_priority(recordset)
created_recordset = self.storage.create_recordset(context, domain_id,
recordset)
@ -1003,8 +976,7 @@ class Service(service.RPCService):
if len(recordset.records) > 0:
self._increment_domain_serial(context, domain.id)
# Get the correct format for priority
return self._get_priority(recordset)
return recordset
def get_recordset(self, context, domain_id, recordset_id):
domain = self.storage.get_domain(context, domain_id)
@ -1023,8 +995,7 @@ class Service(service.RPCService):
policy.check('get_recordset', context, target)
# Add the priority to the records
recordset = self._get_priority(recordset)
recordset = recordset
return recordset
@ -1036,10 +1007,6 @@ class Service(service.RPCService):
recordsets = self.storage.find_recordsets(context, criterion, marker,
limit, sort_key, sort_dir)
# Set the priority for each record
for rs in recordsets:
rs = self._get_priority(rs)
return recordsets
def find_recordset(self, context, criterion=None):
@ -1048,9 +1015,6 @@ class Service(service.RPCService):
recordset = self.storage.find_recordset(context, criterion)
# Add the priority to the records
recordset = self._get_priority(recordset)
return recordset
@transaction
@ -1058,9 +1022,6 @@ class Service(service.RPCService):
domain_id = recordset.obj_get_original_value('domain_id')
domain = self.storage.get_domain(context, domain_id)
# Set the priority for the records
recordset = self._set_priority(recordset)
changes = recordset.obj_get_changes()
# Ensure immutable fields are not changed
@ -1109,7 +1070,7 @@ class Service(service.RPCService):
# Send RecordSet update notification
self.notifier.info(context, 'dns.recordset.update', recordset)
return self._get_priority(recordset)
return recordset
@transaction
def delete_recordset(self, context, domain_id, recordset_id,

View File

@ -85,14 +85,7 @@ class RequestHandler(object):
# construct rdata from all the records
rdata = []
for record in recordset.records:
# dnspython expects data to be a string. convert record
# data from unicode to string
# For MX and SRV records add a priority field.
if recordset.type == "MX" or recordset.type == "SRV":
rdata.append(str.format(
"%d %s" % (record.priority, str(record.data))))
else:
rdata.append(str(record.data))
rdata.append(str(record.data))
# Now put the records into dnspython's RRsets
# answer section has 1 RR set. If the RR set has multiple

View File

@ -21,7 +21,6 @@ class Record(base.DictObjectMixin, base.PersistentObjectMixin,
# so we should remove it.
FIELDS = {
'data': {},
'priority': {},
'domain_id': {},
'managed': {},
'managed_resource_type': {},

View File

@ -20,8 +20,8 @@ class RRData_MX(Record):
MX Resource Record Type
Defined in: RFC1035
"""
# priority is maintained separately for MX records and not in 'data'
FIELDS = {
'priority': {},
'exchange': {}
}

View File

@ -20,8 +20,8 @@ class RRData_SRV(Record):
SRV Resource Record Type
Defined in: RFC2782
"""
# priority is maintained separately for SRV records and not in 'data'
FIELDS = {
'priority': {},
'weight': {},
'port': {},
'target': {}

View File

@ -2,7 +2,7 @@ $ORIGIN {{ domain.name }}
$TTL {{ domain.ttl }}
{% for record in records -%}
{{record.name}} {{record.ttl or ''}} IN {{record.type}} {{record.priority or ''}} {{record.data}}
{{record.name}} {{record.ttl or ''}} IN {{record.type}} {{record.data}}
{% else %}
{# Since the zone is created before the NS/SOA records are, we need to "fool" bind

View File

@ -463,8 +463,7 @@ class SQLAlchemyStorage(sqlalchemy_base.SQLAlchemy, storage_base.Storage):
Calculates the hash of the record, used to ensure record uniqueness.
"""
md5 = hashlib.md5()
md5.update("%s:%s:%s" % (record.recordset_id, record.data,
record.priority))
md5.update("%s:%s" % (record.recordset_id, record.data))
return md5.hexdigest()

View File

@ -0,0 +1,117 @@
# Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Author: Endre Karlson <endre.karlson@hp.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import hashlib
from sqlalchemy.sql import select
from sqlalchemy import Integer
from sqlalchemy.schema import Column, MetaData, Table
from migrate.changeset.constraint import UniqueConstraint
meta = MetaData()
def _build_hash(*args):
md5 = hashlib.md5()
md5.update(":".join(args))
return md5.hexdigest()
def _get_recordsets(table):
return select(columns=[table.c.id, table.c.type])\
.where((table.c.type == 'MX') | (table.c.type == 'SRV'))\
.execute().fetchall()
def upgrade(migrate_engine):
meta.bind = migrate_engine
rs_table = Table('recordsets', meta, autoload=True)
records_table = Table('records', meta, autoload=True)
recordsets = _get_recordsets(rs_table)
record_cols = [
records_table.c.id,
records_table.c.priority,
records_table.c.data]
for rs in recordsets:
query = select(columns=record_cols)\
.where(records_table.c.recordset_id == rs[0])\
.where(records_table.c.priority != None) # noqa
records = query.execute().fetchall()
for record in records:
new_data = '%s %s' % (int(record[1]), record[2])
# New style hashes are <rs_id>:<data> since prio is baked into data
new_hash = _build_hash(rs[0], new_data)
update = records_table.update()\
.where(records_table.c.id == record[0])\
.values(data=new_data, hash=new_hash)
migrate_engine.execute(update)
records_table.c.priority.drop()
dialect = migrate_engine.url.get_dialect().name
if dialect.startswith('sqlite'):
# Add missing unique index
constraint = UniqueConstraint('hash',
name='unique_recordset',
table=records_table)
constraint.create()
def downgrade(migrate_engine):
meta.bind = migrate_engine
rs_table = Table('recordsets', meta, autoload=True)
records_table = Table('records', meta, autoload=True)
recordsets = _get_recordsets(rs_table)
col = Column('priority', Integer, default=None, nullable=True)
col.create(records_table)
record_cols = [
records_table.c.id,
records_table.c.priority,
records_table.c.data]
for rs in recordsets:
records = select(columns=record_cols)\
.where(records_table.c.recordset_id == rs[0])\
.execute().fetchall()
for record in records:
priority, _, data = record[2].partition(" ")
# Old style hashes are <rs_id>:<data>:<priority>
new_hash = _build_hash(rs[0], data, priority)
update = records_table.update()\
.where(records_table.c.id == record[0])\
.values(priority=int(priority), data=data, hash=new_hash)
update.execute()
dialect = migrate_engine.url.get_dialect().name
if dialect.startswith('sqlite'):
# Add missing unique index
constraint = UniqueConstraint('hash',
name='unique_recordset',
table=records_table)
constraint.create()

View File

@ -140,7 +140,6 @@ records = Table('records', metadata,
Column('domain_id', UUID, nullable=False),
Column('recordset_id', UUID, nullable=False),
Column('data', Text, nullable=False),
Column('priority', Integer, default=None, nullable=True),
Column('description', Unicode(160), nullable=True),
Column('hash', String(32), nullable=False, unique=True),
Column('managed', Boolean, default=False),

View File

@ -222,12 +222,12 @@ class TestCase(base.BaseTestCase):
{'data': '192.0.2.2'}
],
'MX': [
{'data': 'mail.example.org.', 'priority': 5},
{'data': 'mail.example.com.', 'priority': 10},
{'data': '5 mail.example.org.'},
{'data': '10 mail.example.com.'},
],
'SRV': [
{'data': '0 5060 server1.example.org.', 'priority': 5},
{'data': '1 5060 server2.example.org.', 'priority': 10},
{'data': '5 0 5060 server1.example.org.'},
{'data': '10 1 5060 server2.example.org.'},
],
'CNAME': [
{'data': 'www.somedomain.org.'},

View File

@ -204,7 +204,11 @@ class ApiV1RecordsTest(ApiV1Test):
self.domain['name'], 'SRV')
fixture = self.get_record_fixture(recordset_fixture['type'])
priority, _, data = fixture['data'].partition(" ")
fixture.update({
'data': data,
'priority': int(priority),
'name': recordset_fixture['name'],
'type': recordset_fixture['type'],
})
@ -216,6 +220,7 @@ class ApiV1RecordsTest(ApiV1Test):
self.assertIn('id', response.json)
self.assertEqual(response.json['type'], fixture['type'])
self.assertEqual(response.json['name'], fixture['name'])
self.assertEqual(response.json['priority'], fixture['priority'])
self.assertEqual(response.json['data'], fixture['data'])

View File

@ -330,3 +330,11 @@ def get_proxies():
proxies['https'] = proxies['http']
return proxies
def extract_priority_from_data(recordset_type, record):
priority, data = None, record['data']
if recordset_type in ('MX', 'SRV'):
priority, _, data = record['data'].partition(" ")
priority = int(priority)
return priority, data