diff --git a/doc/source/conf.py b/doc/source/conf.py
index 95126b67e..e45f59133 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -55,8 +55,8 @@
# The encoding of source files.
#source_encoding = 'utf-8'
-# The master toctree document.
-master_doc = 'index'
+# The main toctree document.
+main_doc = 'index'
# General information about the project.
project = u'nova'
diff --git a/nova/api/openstack/compute/contrib/extended_ips.py b/nova/api/openstack/compute/contrib/extended_ips.py
index 20356c08d..19c80ee6f 100644
--- a/nova/api/openstack/compute/contrib/extended_ips.py
+++ b/nova/api/openstack/compute/contrib/extended_ips.py
@@ -47,7 +47,7 @@ def _extend_server(self, context, server, instance):
def show(self, req, resp_obj, id):
context = req.environ['nova.context']
if authorize(context):
- # Attach our slave template to the response object
+ # Attach our subordinate template to the response object
resp_obj.attach(xml=ExtendedIpsServerTemplate())
server = resp_obj.obj['server']
db_instance = req.get_db_instance(server['id'])
@@ -59,7 +59,7 @@ def show(self, req, resp_obj, id):
def detail(self, req, resp_obj):
context = req.environ['nova.context']
if authorize(context):
- # Attach our slave template to the response object
+ # Attach our subordinate template to the response object
resp_obj.attach(xml=ExtendedIpsServersTemplate())
servers = list(resp_obj.obj['servers'])
for server in servers:
@@ -96,7 +96,7 @@ def construct(self):
root = xmlutil.TemplateElement('server', selector='server')
xmlutil.SubTemplateElement(root, 'server', selector='servers')
make_server(root)
- return xmlutil.SlaveTemplate(root, 1, nsmap={
+ return xmlutil.SubordinateTemplate(root, 1, nsmap={
Extended_ips.alias: Extended_ips.namespace})
@@ -105,5 +105,5 @@ def construct(self):
root = xmlutil.TemplateElement('servers')
elem = xmlutil.SubTemplateElement(root, 'server', selector='servers')
make_server(elem)
- return xmlutil.SlaveTemplate(root, 1, nsmap={
+ return xmlutil.SubordinateTemplate(root, 1, nsmap={
Extended_ips.alias: Extended_ips.namespace})
diff --git a/nova/api/openstack/compute/contrib/extended_ips_mac.py b/nova/api/openstack/compute/contrib/extended_ips_mac.py
index e5757326f..1a273a44b 100644
--- a/nova/api/openstack/compute/contrib/extended_ips_mac.py
+++ b/nova/api/openstack/compute/contrib/extended_ips_mac.py
@@ -47,7 +47,7 @@ def _extend_server(self, context, server, instance):
def show(self, req, resp_obj, id):
context = req.environ['nova.context']
if authorize(context):
- # Attach our slave template to the response object
+ # Attach our subordinate template to the response object
resp_obj.attach(xml=ExtendedIpsMacServerTemplate())
server = resp_obj.obj['server']
db_instance = req.get_db_instance(server['id'])
@@ -59,7 +59,7 @@ def show(self, req, resp_obj, id):
def detail(self, req, resp_obj):
context = req.environ['nova.context']
if authorize(context):
- # Attach our slave template to the response object
+ # Attach our subordinate template to the response object
resp_obj.attach(xml=ExtendedIpsMacServersTemplate())
servers = list(resp_obj.obj['servers'])
for server in servers:
@@ -95,7 +95,7 @@ class ExtendedIpsMacServerTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('server', selector='server')
make_server(root)
- return xmlutil.SlaveTemplate(root, 1, nsmap={
+ return xmlutil.SubordinateTemplate(root, 1, nsmap={
Extended_ips_mac.alias: Extended_ips_mac.namespace})
@@ -104,5 +104,5 @@ def construct(self):
root = xmlutil.TemplateElement('servers')
elem = xmlutil.SubTemplateElement(root, 'server', selector='servers')
make_server(elem)
- return xmlutil.SlaveTemplate(root, 1, nsmap={
+ return xmlutil.SubordinateTemplate(root, 1, nsmap={
Extended_ips_mac.alias: Extended_ips_mac.namespace})
diff --git a/nova/api/openstack/compute/contrib/extended_server_attributes.py b/nova/api/openstack/compute/contrib/extended_server_attributes.py
index a8b441f80..87c823261 100644
--- a/nova/api/openstack/compute/contrib/extended_server_attributes.py
+++ b/nova/api/openstack/compute/contrib/extended_server_attributes.py
@@ -39,7 +39,7 @@ def _extend_server(self, context, server, instance):
def show(self, req, resp_obj, id):
context = req.environ['nova.context']
if authorize(context):
- # Attach our slave template to the response object
+ # Attach our subordinate template to the response object
resp_obj.attach(xml=ExtendedServerAttributeTemplate())
server = resp_obj.obj['server']
db_instance = req.get_db_instance(server['id'])
@@ -51,7 +51,7 @@ def show(self, req, resp_obj, id):
def detail(self, req, resp_obj):
context = req.environ['nova.context']
if authorize(context):
- # Attach our slave template to the response object
+ # Attach our subordinate template to the response object
resp_obj.attach(xml=ExtendedServerAttributesTemplate())
servers = list(resp_obj.obj['servers'])
@@ -92,7 +92,7 @@ def construct(self):
make_server(root)
alias = Extended_server_attributes.alias
namespace = Extended_server_attributes.namespace
- return xmlutil.SlaveTemplate(root, 1, nsmap={alias: namespace})
+ return xmlutil.SubordinateTemplate(root, 1, nsmap={alias: namespace})
class ExtendedServerAttributesTemplate(xmlutil.TemplateBuilder):
@@ -102,4 +102,4 @@ def construct(self):
make_server(elem)
alias = Extended_server_attributes.alias
namespace = Extended_server_attributes.namespace
- return xmlutil.SlaveTemplate(root, 1, nsmap={alias: namespace})
+ return xmlutil.SubordinateTemplate(root, 1, nsmap={alias: namespace})
diff --git a/nova/api/openstack/compute/contrib/extended_status.py b/nova/api/openstack/compute/contrib/extended_status.py
index 75086309a..65a3b61f4 100644
--- a/nova/api/openstack/compute/contrib/extended_status.py
+++ b/nova/api/openstack/compute/contrib/extended_status.py
@@ -36,7 +36,7 @@ def _extend_server(self, server, instance):
def show(self, req, resp_obj, id):
context = req.environ['nova.context']
if authorize(context):
- # Attach our slave template to the response object
+ # Attach our subordinate template to the response object
resp_obj.attach(xml=ExtendedStatusTemplate())
server = resp_obj.obj['server']
db_instance = req.get_db_instance(server['id'])
@@ -48,7 +48,7 @@ def show(self, req, resp_obj, id):
def detail(self, req, resp_obj):
context = req.environ['nova.context']
if authorize(context):
- # Attach our slave template to the response object
+ # Attach our subordinate template to the response object
resp_obj.attach(xml=ExtendedStatusesTemplate())
servers = list(resp_obj.obj['servers'])
for server in servers:
@@ -86,7 +86,7 @@ class ExtendedStatusTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('server', selector='server')
make_server(root)
- return xmlutil.SlaveTemplate(root, 1, nsmap={
+ return xmlutil.SubordinateTemplate(root, 1, nsmap={
Extended_status.alias: Extended_status.namespace})
@@ -95,5 +95,5 @@ def construct(self):
root = xmlutil.TemplateElement('servers')
elem = xmlutil.SubTemplateElement(root, 'server', selector='servers')
make_server(elem)
- return xmlutil.SlaveTemplate(root, 1, nsmap={
+ return xmlutil.SubordinateTemplate(root, 1, nsmap={
Extended_status.alias: Extended_status.namespace})
diff --git a/nova/api/openstack/compute/contrib/extended_virtual_interfaces_net.py b/nova/api/openstack/compute/contrib/extended_virtual_interfaces_net.py
index 3c6774656..ac93a5ecc 100644
--- a/nova/api/openstack/compute/contrib/extended_virtual_interfaces_net.py
+++ b/nova/api/openstack/compute/contrib/extended_virtual_interfaces_net.py
@@ -33,7 +33,7 @@ def construct(self):
elem = xmlutil.SubTemplateElement(root, 'virtual_interface',
selector='virtual_interfaces')
make_vif(elem)
- return xmlutil.SlaveTemplate(root, 1,
+ return xmlutil.SubordinateTemplate(root, 1,
nsmap={Extended_virtual_interfaces_net.alias:
Extended_virtual_interfaces_net.namespace})
@@ -48,7 +48,7 @@ def index(self, req, resp_obj, server_id):
key = "%s:net_id" % Extended_virtual_interfaces_net.alias
context = req.environ['nova.context']
if authorize(context):
- # Attach our slave template to the response object
+ # Attach our subordinate template to the response object
resp_obj.attach(xml=ExtendedVirtualInterfaceNetTemplate())
for vif in resp_obj.obj['virtual_interfaces']:
vif1 = self.network_api.get_vif_by_mac_address(context,
diff --git a/nova/api/openstack/compute/contrib/extended_volumes.py b/nova/api/openstack/compute/contrib/extended_volumes.py
index 0996f80aa..6177fac22 100644
--- a/nova/api/openstack/compute/contrib/extended_volumes.py
+++ b/nova/api/openstack/compute/contrib/extended_volumes.py
@@ -37,7 +37,7 @@ def _extend_server(self, context, server, instance):
def show(self, req, resp_obj, id):
context = req.environ['nova.context']
if authorize(context):
- # Attach our slave template to the response object
+ # Attach our subordinate template to the response object
resp_obj.attach(xml=ExtendedVolumesServerTemplate())
server = resp_obj.obj['server']
db_instance = req.get_db_instance(server['id'])
@@ -49,7 +49,7 @@ def show(self, req, resp_obj, id):
def detail(self, req, resp_obj):
context = req.environ['nova.context']
if authorize(context):
- # Attach our slave template to the response object
+ # Attach our subordinate template to the response object
resp_obj.attach(xml=ExtendedVolumesServersTemplate())
servers = list(resp_obj.obj['servers'])
for server in servers:
@@ -88,7 +88,7 @@ class ExtendedVolumesServerTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('server', selector='server')
make_server(root)
- return xmlutil.SlaveTemplate(root, 1, nsmap={
+ return xmlutil.SubordinateTemplate(root, 1, nsmap={
Extended_volumes.alias: Extended_volumes.namespace})
@@ -97,5 +97,5 @@ def construct(self):
root = xmlutil.TemplateElement('servers')
elem = xmlutil.SubTemplateElement(root, 'server', selector='servers')
make_server(elem)
- return xmlutil.SlaveTemplate(root, 1, nsmap={
+ return xmlutil.SubordinateTemplate(root, 1, nsmap={
Extended_volumes.alias: Extended_volumes.namespace})
diff --git a/nova/api/openstack/compute/contrib/flavor_access.py b/nova/api/openstack/compute/contrib/flavor_access.py
index c97e7ca09..e5d709404 100644
--- a/nova/api/openstack/compute/contrib/flavor_access.py
+++ b/nova/api/openstack/compute/contrib/flavor_access.py
@@ -46,7 +46,7 @@ def construct(self):
make_flavor(root)
alias = Flavor_access.alias
namespace = Flavor_access.namespace
- return xmlutil.SlaveTemplate(root, 1, nsmap={alias: namespace})
+ return xmlutil.SubordinateTemplate(root, 1, nsmap={alias: namespace})
class FlavorsTemplate(xmlutil.TemplateBuilder):
@@ -56,7 +56,7 @@ def construct(self):
make_flavor(elem)
alias = Flavor_access.alias
namespace = Flavor_access.namespace
- return xmlutil.SlaveTemplate(root, 1, nsmap={alias: namespace})
+ return xmlutil.SubordinateTemplate(root, 1, nsmap={alias: namespace})
class FlavorAccessTemplate(xmlutil.TemplateBuilder):
@@ -65,7 +65,7 @@ def construct(self):
elem = xmlutil.SubTemplateElement(root, 'access',
selector='flavor_access')
make_flavor_access(elem)
- return xmlutil.MasterTemplate(root, 1)
+ return xmlutil.MainTemplate(root, 1)
def _marshall_flavor_access(flavor):
@@ -127,7 +127,7 @@ def _extend_flavor(self, flavor_rval, flavor_ref):
def show(self, req, resp_obj, id):
context = req.environ['nova.context']
if soft_authorize(context):
- # Attach our slave template to the response object
+ # Attach our subordinate template to the response object
resp_obj.attach(xml=FlavorTemplate())
db_flavor = req.get_db_flavor(id)
@@ -137,7 +137,7 @@ def show(self, req, resp_obj, id):
def detail(self, req, resp_obj):
context = req.environ['nova.context']
if soft_authorize(context):
- # Attach our slave template to the response object
+ # Attach our subordinate template to the response object
resp_obj.attach(xml=FlavorsTemplate())
flavors = list(resp_obj.obj['flavors'])
@@ -149,7 +149,7 @@ def detail(self, req, resp_obj):
def create(self, req, body, resp_obj):
context = req.environ['nova.context']
if soft_authorize(context):
- # Attach our slave template to the response object
+ # Attach our subordinate template to the response object
resp_obj.attach(xml=FlavorTemplate())
db_flavor = req.get_db_flavor(resp_obj.obj['flavor']['id'])
diff --git a/nova/api/openstack/compute/contrib/image_size.py b/nova/api/openstack/compute/contrib/image_size.py
index 21998738f..f4cd98cdf 100644
--- a/nova/api/openstack/compute/contrib/image_size.py
+++ b/nova/api/openstack/compute/contrib/image_size.py
@@ -29,7 +29,7 @@ def construct(self):
root = xmlutil.TemplateElement('images')
elem = xmlutil.SubTemplateElement(root, 'image', selector='images')
make_image(elem)
- return xmlutil.SlaveTemplate(root, 1, nsmap={
+ return xmlutil.SubordinateTemplate(root, 1, nsmap={
Image_size.alias: Image_size.namespace})
@@ -37,7 +37,7 @@ class ImageSizeTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('image', selector='image')
make_image(root)
- return xmlutil.SlaveTemplate(root, 1, nsmap={
+ return xmlutil.SubordinateTemplate(root, 1, nsmap={
Image_size.alias: Image_size.namespace})
@@ -51,7 +51,7 @@ def _extend_image(self, image, image_cache):
def show(self, req, resp_obj, id):
context = req.environ["nova.context"]
if authorize(context):
- # Attach our slave template to the response object
+ # Attach our subordinate template to the response object
resp_obj.attach(xml=ImageSizeTemplate())
image_resp = resp_obj.obj['image']
# image guaranteed to be in the cache due to the core API adding
@@ -63,7 +63,7 @@ def show(self, req, resp_obj, id):
def detail(self, req, resp_obj):
context = req.environ['nova.context']
if authorize(context):
- # Attach our slave template to the response object
+ # Attach our subordinate template to the response object
resp_obj.attach(xml=ImagesSizeTemplate())
images_resp = list(resp_obj.obj['images'])
# images guaranteed to be in the cache due to the core API adding
diff --git a/nova/api/openstack/compute/contrib/server_usage.py b/nova/api/openstack/compute/contrib/server_usage.py
index e57b611cc..bb2e6bfdf 100644
--- a/nova/api/openstack/compute/contrib/server_usage.py
+++ b/nova/api/openstack/compute/contrib/server_usage.py
@@ -41,7 +41,7 @@ def _extend_server(self, server, instance):
def show(self, req, resp_obj, id):
context = req.environ['nova.context']
if authorize(context):
- # Attach our slave template to the response object
+ # Attach our subordinate template to the response object
resp_obj.attach(xml=ServerUsageTemplate())
server = resp_obj.obj['server']
db_instance = req.get_db_instance(server['id'])
@@ -53,7 +53,7 @@ def show(self, req, resp_obj, id):
def detail(self, req, resp_obj):
context = req.environ['nova.context']
if authorize(context):
- # Attach our slave template to the response object
+ # Attach our subordinate template to the response object
resp_obj.attach(xml=ServerUsagesTemplate())
servers = list(resp_obj.obj['servers'])
for server in servers:
@@ -89,7 +89,7 @@ class ServerUsageTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('server', selector='server')
make_server(root)
- return xmlutil.SlaveTemplate(root, 1, nsmap={
+ return xmlutil.SubordinateTemplate(root, 1, nsmap={
Server_usage.alias: Server_usage.namespace})
@@ -98,5 +98,5 @@ def construct(self):
root = xmlutil.TemplateElement('servers')
elem = xmlutil.SubTemplateElement(root, 'server', selector='servers')
make_server(elem)
- return xmlutil.SlaveTemplate(root, 1, nsmap={
+ return xmlutil.SubordinateTemplate(root, 1, nsmap={
Server_usage.alias: Server_usage.namespace})
diff --git a/nova/api/openstack/compute/servers.py b/nova/api/openstack/compute/servers.py
index 6d4974fc2..364f026d6 100644
--- a/nova/api/openstack/compute/servers.py
+++ b/nova/api/openstack/compute/servers.py
@@ -115,7 +115,7 @@ class ServerTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('server', selector='server')
make_server(root, detailed=True)
- return xmlutil.MasterTemplate(root, 1, nsmap=server_nsmap)
+ return xmlutil.MainTemplate(root, 1, nsmap=server_nsmap)
class MinimalServersTemplate(xmlutil.TemplateBuilder):
@@ -124,7 +124,7 @@ def construct(self):
elem = xmlutil.SubTemplateElement(root, 'server', selector='servers')
make_server(elem)
xmlutil.make_links(root, 'servers_links')
- return xmlutil.MasterTemplate(root, 1, nsmap=server_nsmap)
+ return xmlutil.MainTemplate(root, 1, nsmap=server_nsmap)
class ServersTemplate(xmlutil.TemplateBuilder):
@@ -132,27 +132,27 @@ def construct(self):
root = xmlutil.TemplateElement('servers')
elem = xmlutil.SubTemplateElement(root, 'server', selector='servers')
make_server(elem, detailed=True)
- return xmlutil.MasterTemplate(root, 1, nsmap=server_nsmap)
+ return xmlutil.MainTemplate(root, 1, nsmap=server_nsmap)
class ServerAdminPassTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('server')
root.set('adminPass')
- return xmlutil.SlaveTemplate(root, 1, nsmap=server_nsmap)
+ return xmlutil.SubordinateTemplate(root, 1, nsmap=server_nsmap)
class ServerMultipleCreateTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('server')
root.set('reservation_id')
- return xmlutil.MasterTemplate(root, 1, nsmap=server_nsmap)
+ return xmlutil.MainTemplate(root, 1, nsmap=server_nsmap)
def FullServerTemplate():
- master = ServerTemplate()
- master.attach(ServerAdminPassTemplate())
- return master
+ main = ServerTemplate()
+ main.attach(ServerAdminPassTemplate())
+ return main
class CommonDeserializer(wsgi.MetadataXMLDeserializer):
diff --git a/nova/api/openstack/wsgi.py b/nova/api/openstack/wsgi.py
index 13d72c252..e4639b532 100644
--- a/nova/api/openstack/wsgi.py
+++ b/nova/api/openstack/wsgi.py
@@ -606,7 +606,7 @@ def preserialize(self, content_type, default_serializers=None):
self.serializer = serializer()
def attach(self, **kwargs):
- """Attach slave templates to serializers."""
+ """Attach subordinate templates to serializers."""
if self.media_type in kwargs:
self.serializer.attach(kwargs[self.media_type])
diff --git a/nova/api/openstack/xmlutil.py b/nova/api/openstack/xmlutil.py
index 7bc5714a3..622c03367 100644
--- a/nova/api/openstack/xmlutil.py
+++ b/nova/api/openstack/xmlutil.py
@@ -671,13 +671,13 @@ def wrap(self):
# We are a template
return self
- def apply(self, master):
- """Hook method for determining slave applicability.
+ def apply(self, main):
+ """Hook method for determining subordinate applicability.
An overridable hook method used to determine if this template
- is applicable as a slave to a given master template.
+ is applicable as a subordinate to a given main template.
- :param master: The master template to test.
+ :param main: The main template to test.
"""
return True
@@ -692,17 +692,17 @@ def tree(self):
return "%r: %s" % (self, self.root.tree())
-class MasterTemplate(Template):
- """Represent a master template.
+class MainTemplate(Template):
+ """Represent a main template.
- Master templates are versioned derivatives of templates that
- additionally allow slave templates to be attached. Slave
+ Main templates are versioned derivatives of templates that
+ additionally allow subordinate templates to be attached. Subordinate
templates allow modification of the serialized result without
- directly changing the master.
+ directly changing the main.
"""
def __init__(self, root, version, nsmap=None):
- """Initialize a master template.
+ """Initialize a main template.
:param root: The root element of the template.
:param version: The version number of the template.
@@ -711,9 +711,9 @@ def __init__(self, root, version, nsmap=None):
template.
"""
- super(MasterTemplate, self).__init__(root, nsmap)
+ super(MainTemplate, self).__init__(root, nsmap)
self.version = version
- self.slaves = []
+ self.subordinates = []
def __repr__(self):
"""Return string representation of the template."""
@@ -727,88 +727,88 @@ def _siblings(self):
An overridable hook method to return the siblings of the root
element. This is the root element plus the root elements of
- all the slave templates.
+ all the subordinate templates.
"""
- return [self.root] + [slave.root for slave in self.slaves]
+ return [self.root] + [subordinate.root for subordinate in self.subordinates]
def _nsmap(self):
"""Hook method for computing the namespace dictionary.
An overridable hook method to return the namespace dictionary.
- The namespace dictionary is computed by taking the master
+ The namespace dictionary is computed by taking the main
template's namespace dictionary and updating it from all the
- slave templates.
+ subordinate templates.
"""
nsmap = self.nsmap.copy()
- for slave in self.slaves:
- nsmap.update(slave._nsmap())
+ for subordinate in self.subordinates:
+ nsmap.update(subordinate._nsmap())
return nsmap
- def attach(self, *slaves):
- """Attach one or more slave templates.
+ def attach(self, *subordinates):
+ """Attach one or more subordinate templates.
- Attaches one or more slave templates to the master template.
- Slave templates must have a root element with the same tag as
- the master template. The slave template's apply() method will
- be called to determine if the slave should be applied to this
- master; if it returns False, that slave will be skipped.
- (This allows filtering of slaves based on the version of the
- master template.)
+ Attaches one or more subordinate templates to the main template.
+ Subordinate templates must have a root element with the same tag as
+ the main template. The subordinate template's apply() method will
+ be called to determine if the subordinate should be applied to this
+ main; if it returns False, that subordinate will be skipped.
+ (This allows filtering of subordinates based on the version of the
+ main template.)
"""
- slave_list = []
- for slave in slaves:
- slave = slave.wrap()
+ subordinate_list = []
+ for subordinate in subordinates:
+ subordinate = subordinate.wrap()
# Make sure we have a tree match
- if slave.root.tag != self.root.tag:
- msg = _("Template tree mismatch; adding slave %(slavetag)s to "
- "master %(mastertag)s") % {'slavetag': slave.root.tag,
- 'mastertag': self.root.tag}
+ if subordinate.root.tag != self.root.tag:
+ msg = _("Template tree mismatch; adding subordinate %(subordinatetag)s to "
+ "main %(maintag)s") % {'subordinatetag': subordinate.root.tag,
+ 'maintag': self.root.tag}
raise ValueError(msg)
- # Make sure slave applies to this template
- if not slave.apply(self):
+ # Make sure subordinate applies to this template
+ if not subordinate.apply(self):
continue
- slave_list.append(slave)
+ subordinate_list.append(subordinate)
- # Add the slaves
- self.slaves.extend(slave_list)
+ # Add the subordinates
+ self.subordinates.extend(subordinate_list)
def copy(self):
- """Return a copy of this master template."""
+ """Return a copy of this main template."""
- # Return a copy of the MasterTemplate
+ # Return a copy of the MainTemplate
tmp = self.__class__(self.root, self.version, self.nsmap)
- tmp.slaves = self.slaves[:]
+ tmp.subordinates = self.subordinates[:]
return tmp
-class SlaveTemplate(Template):
- """Represent a slave template.
+class SubordinateTemplate(Template):
+ """Represent a subordinate template.
- Slave templates are versioned derivatives of templates. Each
- slave has a minimum version and optional maximum version of the
- master template to which they can be attached.
+ Subordinate templates are versioned derivatives of templates. Each
+ subordinate has a minimum version and optional maximum version of the
+ main template to which they can be attached.
"""
def __init__(self, root, min_vers, max_vers=None, nsmap=None):
- """Initialize a slave template.
+ """Initialize a subordinate template.
:param root: The root element of the template.
- :param min_vers: The minimum permissible version of the master
- template for this slave template to apply.
- :param max_vers: An optional upper bound for the master
+ :param min_vers: The minimum permissible version of the main
+ template for this subordinate template to apply.
+ :param max_vers: An optional upper bound for the main
template version.
:param nsmap: An optional namespace dictionary to be
associated with the root element of the
template.
"""
- super(SlaveTemplate, self).__init__(root, nsmap)
+ super(SubordinateTemplate, self).__init__(root, nsmap)
self.min_vers = min_vers
self.max_vers = max_vers
@@ -819,23 +819,23 @@ def __repr__(self):
(self.__class__.__module__, self.__class__.__name__,
self.min_vers, self.max_vers, id(self)))
- def apply(self, master):
- """Hook method for determining slave applicability.
+ def apply(self, main):
+ """Hook method for determining subordinate applicability.
An overridable hook method used to determine if this template
- is applicable as a slave to a given master template. This
- version requires the master template to have a version number
+ is applicable as a subordinate to a given main template. This
+ version requires the main template to have a version number
between min_vers and max_vers.
- :param master: The master template to test.
+ :param main: The main template to test.
"""
- # Does the master meet our minimum version requirement?
- if master.version < self.min_vers:
+ # Does the main meet our minimum version requirement?
+ if main.version < self.min_vers:
return False
# How about our maximum version requirement?
- if self.max_vers is not None and master.version > self.max_vers:
+ if self.max_vers is not None and main.version > self.max_vers:
return False
return True
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 97b92e941..f653108b6 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -890,7 +890,7 @@ def refresh_provider_fw_rules(self, context):
"""This call passes straight through to the virtualization driver."""
return self.driver.refresh_provider_fw_rules()
- def _get_instance_nw_info(self, context, instance, use_slave=False):
+ def _get_instance_nw_info(self, context, instance, use_subordinate=False):
"""Get a list of dictionaries of network data of an instance."""
if (not hasattr(instance, 'system_metadata') or
len(instance['system_metadata']) == 0):
@@ -901,7 +901,7 @@ def _get_instance_nw_info(self, context, instance, use_slave=False):
# succeed.
instance = instance_obj.Instance.get_by_uuid(context,
instance['uuid'],
- use_slave=use_slave)
+ use_subordinate=use_subordinate)
network_info = self.network_api.get_instance_nw_info(context,
instance)
@@ -1208,7 +1208,7 @@ def _check_instance_build_time(self, context):
'host': self.host}
building_insts = instance_obj.InstanceList.get_by_filters(context,
- filters, expected_attrs=[], use_slave=True)
+ filters, expected_attrs=[], use_subordinate=True)
for instance in building_insts:
if timeutils.is_older_than(instance['created_at'], timeout):
@@ -4556,14 +4556,14 @@ def _heal_instance_info_cache(self, context):
instance = instance_obj.Instance.get_by_uuid(
context, instance_uuids.pop(0),
expected_attrs=['system_metadata'],
- use_slave=True)
+ use_subordinate=True)
except exception.InstanceNotFound:
# Instance is gone. Try to grab another.
continue
else:
# No more in our copy of uuids. Pull from the DB.
db_instances = instance_obj.InstanceList.get_by_host(
- context, self.host, expected_attrs=[], use_slave=True)
+ context, self.host, expected_attrs=[], use_subordinate=True)
if not db_instances:
# None.. just return.
return
@@ -4575,7 +4575,7 @@ def _heal_instance_info_cache(self, context):
try:
# Call to network API to get instance info.. this will
# force an update to the instance's info_cache
- self._get_instance_nw_info(context, instance, use_slave=True)
+ self._get_instance_nw_info(context, instance, use_subordinate=True)
LOG.debug(_('Updated the info_cache for instance'),
instance=instance)
except Exception:
@@ -4587,7 +4587,7 @@ def _poll_rebooting_instances(self, context):
filters = {'task_state': task_states.REBOOTING,
'host': self.host}
rebooting = instance_obj.InstanceList.get_by_filters(
- context, filters, expected_attrs=[], use_slave=True)
+ context, filters, expected_attrs=[], use_subordinate=True)
to_poll = []
for instance in rebooting:
@@ -4603,7 +4603,7 @@ def _poll_rescued_instances(self, context):
filters = {'vm_state': vm_states.RESCUED,
'host': self.host}
rescued_instances = self.conductor_api.instance_get_all_by_filters(
- context, filters, columns_to_join=[], use_slave=True)
+ context, filters, columns_to_join=[], use_subordinate=True)
to_unrescue = []
for instance in rescued_instances:
@@ -4622,7 +4622,7 @@ def _poll_unconfirmed_resizes(self, context):
mig_list_cls = migration_obj.MigrationList
migrations = mig_list_cls.get_unconfirmed_by_dest_compute(
context, CONF.resize_confirm_window, self.host,
- use_slave=True)
+ use_subordinate=True)
migrations_info = dict(migration_count=len(migrations),
confirm_window=CONF.resize_confirm_window)
@@ -4650,7 +4650,7 @@ def _set_migration_to_error(migration, reason, **kwargs):
try:
instance = instance_obj.Instance.get_by_uuid(context,
instance_uuid, expected_attrs=expected_attrs,
- use_slave=True)
+ use_subordinate=True)
except exception.InstanceNotFound:
reason = (_("Instance %s not found") %
instance_uuid)
@@ -4687,7 +4687,7 @@ def _poll_shelved_instances(self, context):
'host': self.host}
shelved_instances = instance_obj.InstanceList.get_by_filters(
context, filters=filters, expected_attrs=['system_metadata'],
- use_slave=True)
+ use_subordinate=True)
to_gc = []
for instance in shelved_instances:
@@ -4900,7 +4900,7 @@ def _sync_power_states(self, context):
"""
db_instances = instance_obj.InstanceList.get_by_host(context,
self.host,
- use_slave=True)
+ use_subordinate=True)
num_vm_instances = self.driver.get_num_instances()
num_db_instances = len(db_instances)
@@ -4929,7 +4929,7 @@ def _sync_power_states(self, context):
self._sync_instance_power_state(context,
db_instance,
vm_power_state,
- use_slave=True)
+ use_subordinate=True)
except exception.InstanceNotFound:
# NOTE(hanlind): If the instance gets deleted during sync,
# silently ignore and move on to next instance.
@@ -4940,7 +4940,7 @@ def _sync_power_states(self, context):
instance=db_instance)
def _sync_instance_power_state(self, context, db_instance, vm_power_state,
- use_slave=False):
+ use_subordinate=False):
"""Align instance power state between the database and hypervisor.
If the instance is not found on the hypervisor, but is in the database,
@@ -4949,7 +4949,7 @@ def _sync_instance_power_state(self, context, db_instance, vm_power_state,
# We re-query the DB to get the latest instance info to minimize
# (not eliminate) race condition.
- db_instance.refresh(use_slave=use_slave)
+ db_instance.refresh(use_subordinate=use_subordinate)
db_power_state = db_instance.power_state
vm_state = db_instance.vm_state
@@ -5076,7 +5076,7 @@ def _reclaim_queued_deletes(self, context):
instances = instance_obj.InstanceList.get_by_filters(
context, filters,
expected_attrs=instance_obj.INSTANCE_DEFAULT_FIELDS,
- use_slave=True)
+ use_subordinate=True)
for instance in instances:
if self._deleted_old_enough(instance, interval):
capi = self.conductor_api
@@ -5258,11 +5258,11 @@ def _error_out_instance_on_exception(self, context, instance_uuid,
@aggregate_object_compat
@wrap_exception()
- def add_aggregate_host(self, context, aggregate, host, slave_info):
+ def add_aggregate_host(self, context, aggregate, host, subordinate_info):
"""Notify hypervisor of change (for hypervisor pools)."""
try:
self.driver.add_to_aggregate(context, aggregate, host,
- slave_info=slave_info)
+ subordinate_info=subordinate_info)
except NotImplementedError:
LOG.debug(_('Hypervisor driver does not support '
'add_aggregate_host'))
@@ -5275,11 +5275,11 @@ def add_aggregate_host(self, context, aggregate, host, slave_info):
@aggregate_object_compat
@wrap_exception()
- def remove_aggregate_host(self, context, host, slave_info, aggregate):
+ def remove_aggregate_host(self, context, host, subordinate_info, aggregate):
"""Removes a host from a physical hypervisor pool."""
try:
self.driver.remove_from_aggregate(context, aggregate, host,
- slave_info=slave_info)
+ subordinate_info=subordinate_info)
except NotImplementedError:
LOG.debug(_('Hypervisor driver does not support '
'remove_aggregate_host'))
@@ -5315,7 +5315,7 @@ def _run_image_cache_manager_pass(self, context):
'soft_deleted': True,
'host': nodes}
filtered_instances = instance_obj.InstanceList.get_by_filters(context,
- filters, expected_attrs=[], use_slave=True)
+ filters, expected_attrs=[], use_subordinate=True)
self.driver.manage_image_cache(context, filtered_instances)
diff --git a/nova/compute/rpcapi.py b/nova/compute/rpcapi.py
index 2cdd409a9..3ecf4d526 100644
--- a/nova/compute/rpcapi.py
+++ b/nova/compute/rpcapi.py
@@ -140,7 +140,7 @@ class ComputeAPI(object):
2.0 - Remove 1.x backwards compat
2.1 - Adds orig_sys_metadata to rebuild_instance()
- 2.2 - Adds slave_info parameter to add_aggregate_host() and
+ 2.2 - Adds subordinate_info parameter to add_aggregate_host() and
remove_aggregate_host()
2.3 - Adds volume_id to reserve_block_device_name()
2.4 - Add bdms to terminate_instance
@@ -255,7 +255,7 @@ def _get_compat_version(self, current, havana_compat):
return current
def add_aggregate_host(self, ctxt, aggregate, host_param, host,
- slave_info=None):
+ subordinate_info=None):
'''Add aggregate host.
:param ctxt: request context
@@ -276,7 +276,7 @@ def add_aggregate_host(self, ctxt, aggregate, host_param, host,
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'add_aggregate_host',
aggregate=aggregate, host=host_param,
- slave_info=slave_info)
+ subordinate_info=subordinate_info)
def add_fixed_ip_to_instance(self, ctxt, instance, network_id):
# NOTE(russellb) Havana compat
@@ -608,7 +608,7 @@ def refresh_provider_fw_rules(self, ctxt, host):
cctxt.cast(ctxt, 'refresh_provider_fw_rules')
def remove_aggregate_host(self, ctxt, aggregate, host_param, host,
- slave_info=None):
+ subordinate_info=None):
'''Remove aggregate host.
:param ctxt: request context
@@ -629,7 +629,7 @@ def remove_aggregate_host(self, ctxt, aggregate, host_param, host,
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'remove_aggregate_host',
aggregate=aggregate, host=host_param,
- slave_info=slave_info)
+ subordinate_info=subordinate_info)
def remove_fixed_ip_from_instance(self, ctxt, instance, address):
# NOTE(russellb) Havana compat
diff --git a/nova/console/xvp.py b/nova/console/xvp.py
index 81418e494..10bada303 100644
--- a/nova/console/xvp.py
+++ b/nova/console/xvp.py
@@ -40,7 +40,7 @@
help='Generated XVP conf file'),
cfg.StrOpt('console_xvp_pid',
default='/var/run/xvp.pid',
- help='XVP master process pid file'),
+ help='XVP main process pid file'),
cfg.StrOpt('console_xvp_log',
default='/var/log/xvp.log',
help='XVP log file'),
diff --git a/nova/db/api.py b/nova/db/api.py
index 08e3d4a75..0dd39b1a7 100644
--- a/nova/db/api.py
+++ b/nova/db/api.py
@@ -445,13 +445,13 @@ def migration_get_by_instance_and_status(context, instance_uuid, status):
def migration_get_unconfirmed_by_dest_compute(context, confirm_window,
- dest_compute, use_slave=False):
+ dest_compute, use_subordinate=False):
"""
Finds all unconfirmed migrations within the confirmation window for
a specific destination compute host.
"""
return IMPL.migration_get_unconfirmed_by_dest_compute(context,
- confirm_window, dest_compute, use_slave=use_slave)
+ confirm_window, dest_compute, use_subordinate=use_subordinate)
def migration_get_in_progress_by_host_and_node(context, host, node):
@@ -588,10 +588,10 @@ def virtual_interface_get_by_uuid(context, vif_uuid):
return IMPL.virtual_interface_get_by_uuid(context, vif_uuid)
-def virtual_interface_get_by_instance(context, instance_id, use_slave=False):
+def virtual_interface_get_by_instance(context, instance_id, use_subordinate=False):
"""Gets all virtual_interfaces for instance."""
return IMPL.virtual_interface_get_by_instance(context, instance_id,
- use_slave=use_slave)
+ use_subordinate=use_subordinate)
def virtual_interface_get_by_instance_and_network(context, instance_id,
@@ -632,10 +632,10 @@ def instance_destroy(context, instance_uuid, constraint=None,
return rv
-def instance_get_by_uuid(context, uuid, columns_to_join=None, use_slave=False):
+def instance_get_by_uuid(context, uuid, columns_to_join=None, use_subordinate=False):
"""Get an instance or raise if it does not exist."""
return IMPL.instance_get_by_uuid(context, uuid,
- columns_to_join, use_slave=use_slave)
+ columns_to_join, use_subordinate=use_subordinate)
def instance_get(context, instance_id, columns_to_join=None):
@@ -651,13 +651,13 @@ def instance_get_all(context, columns_to_join=None):
def instance_get_all_by_filters(context, filters, sort_key='created_at',
sort_dir='desc', limit=None, marker=None,
- columns_to_join=None, use_slave=False):
+ columns_to_join=None, use_subordinate=False):
"""Get all instances that match all filters."""
return IMPL.instance_get_all_by_filters(context, filters, sort_key,
sort_dir, limit=limit,
marker=marker,
columns_to_join=columns_to_join,
- use_slave=use_slave)
+ use_subordinate=use_subordinate)
def instance_get_active_by_window_joined(context, begin, end=None,
@@ -672,7 +672,7 @@ def instance_get_active_by_window_joined(context, begin, end=None,
def instance_get_all_by_host(context, host,
- columns_to_join=None, use_slave=False):
+ columns_to_join=None, use_subordinate=False):
"""Get all instances belonging to a host."""
return IMPL.instance_get_all_by_host(context, host, columns_to_join)
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index d7f397868..844e0b4ad 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -76,7 +76,7 @@
CONF.import_opt('connection',
'nova.openstack.common.db.sqlalchemy.session',
group='database')
-CONF.import_opt('slave_connection',
+CONF.import_opt('subordinate_connection',
'nova.openstack.common.db.sqlalchemy.session',
group='database')
@@ -179,7 +179,7 @@ def model_query(context, model, *args, **kwargs):
"""Query helper that accounts for context's `read_deleted` field.
:param context: context to query under
- :param use_slave: If true, use slave_connection
+ :param use_subordinate: If true, use subordinate_connection
:param session: if present, the session to use
:param read_deleted: if present, overrides context's read_deleted field.
:param project_only: if present and context is user-type, then restrict
@@ -191,11 +191,11 @@ def model_query(context, model, *args, **kwargs):
model parameter.
"""
- use_slave = kwargs.get('use_slave') or False
- if CONF.database.slave_connection == '':
- use_slave = False
+ use_subordinate = kwargs.get('use_subordinate') or False
+ if CONF.database.subordinate_connection == '':
+ use_subordinate = False
- session = kwargs.get('session') or get_session(slave_session=use_slave)
+ session = kwargs.get('session') or get_session(subordinate_session=use_subordinate)
read_deleted = kwargs.get('read_deleted') or context.read_deleted
project_only = kwargs.get('project_only', False)
@@ -1448,9 +1448,9 @@ def virtual_interface_create(context, values):
return vif_ref
-def _virtual_interface_query(context, session=None, use_slave=False):
+def _virtual_interface_query(context, session=None, use_subordinate=False):
return model_query(context, models.VirtualInterface, session=session,
- read_deleted="no", use_slave=use_slave)
+ read_deleted="no", use_subordinate=use_subordinate)
@require_context
@@ -1496,12 +1496,12 @@ def virtual_interface_get_by_uuid(context, vif_uuid):
@require_context
@require_instance_exists_using_uuid
-def virtual_interface_get_by_instance(context, instance_uuid, use_slave=False):
+def virtual_interface_get_by_instance(context, instance_uuid, use_subordinate=False):
"""Gets all virtual interfaces for instance.
:param instance_uuid: = uuid of the instance to retrieve vifs for
"""
- vif_refs = _virtual_interface_query(context, use_slave=use_slave).\
+ vif_refs = _virtual_interface_query(context, use_subordinate=use_subordinate).\
filter_by(instance_uuid=instance_uuid).\
all()
return vif_refs
@@ -1696,16 +1696,16 @@ def instance_destroy(context, instance_uuid, constraint=None):
@require_context
-def instance_get_by_uuid(context, uuid, columns_to_join=None, use_slave=False):
+def instance_get_by_uuid(context, uuid, columns_to_join=None, use_subordinate=False):
return _instance_get_by_uuid(context, uuid,
- columns_to_join=columns_to_join, use_slave=use_slave)
+ columns_to_join=columns_to_join, use_subordinate=use_subordinate)
def _instance_get_by_uuid(context, uuid, session=None,
- columns_to_join=None, use_slave=False):
+ columns_to_join=None, use_subordinate=False):
result = _build_instance_get(context, session=session,
columns_to_join=columns_to_join,
- use_slave=use_slave).\
+ use_subordinate=use_subordinate).\
filter_by(uuid=uuid).\
first()
@@ -1734,9 +1734,9 @@ def instance_get(context, instance_id, columns_to_join=None):
def _build_instance_get(context, session=None,
- columns_to_join=None, use_slave=False):
+ columns_to_join=None, use_subordinate=False):
query = model_query(context, models.Instance, session=session,
- project_only=True, use_slave=use_slave).\
+ project_only=True, use_subordinate=use_subordinate).\
options(joinedload_all('security_groups.rules')).\
options(joinedload('info_cache'))
if columns_to_join is None:
@@ -1754,7 +1754,7 @@ def _build_instance_get(context, session=None,
def _instances_fill_metadata(context, instances,
- manual_joins=None, use_slave=False):
+ manual_joins=None, use_subordinate=False):
"""Selectively fill instances with manually-joined metadata. Note that
instance will be converted to a dict.
@@ -1771,13 +1771,13 @@ def _instances_fill_metadata(context, instances,
meta = collections.defaultdict(list)
if 'metadata' in manual_joins:
- for row in _instance_metadata_get_multi(context, uuids, use_slave):
+ for row in _instance_metadata_get_multi(context, uuids, use_subordinate):
meta[row['instance_uuid']].append(row)
sys_meta = collections.defaultdict(list)
if 'system_metadata' in manual_joins:
for row in _instance_system_metadata_get_multi(context, uuids,
- use_slave):
+ use_subordinate):
sys_meta[row['instance_uuid']].append(row)
pcidevs = collections.defaultdict(list)
@@ -1829,7 +1829,7 @@ def instance_get_all(context, columns_to_join=None):
@require_context
def instance_get_all_by_filters(context, filters, sort_key, sort_dir,
limit=None, marker=None, columns_to_join=None,
- use_slave=False):
+ use_subordinate=False):
"""Return instances that match all filters. Deleted instances
will be returned by default, unless there's a filter that says
otherwise.
@@ -1866,10 +1866,10 @@ def instance_get_all_by_filters(context, filters, sort_key, sort_dir,
sort_fn = {'desc': desc, 'asc': asc}
- if CONF.database.slave_connection == '':
- use_slave = False
+ if CONF.database.subordinate_connection == '':
+ use_subordinate = False
- session = get_session(slave_session=use_slave)
+ session = get_session(subordinate_session=use_subordinate)
if columns_to_join is None:
columns_to_join = ['info_cache', 'security_groups']
@@ -2072,14 +2072,14 @@ def instance_get_active_by_window_joined(context, begin, end=None,
def _instance_get_all_query(context, project_only=False,
- joins=None, use_slave=False):
+ joins=None, use_subordinate=False):
if joins is None:
joins = ['info_cache', 'security_groups']
query = model_query(context,
models.Instance,
project_only=project_only,
- use_slave=use_slave)
+ use_subordinate=use_subordinate)
for join in joins:
query = query.options(joinedload(join))
return query
@@ -2088,12 +2088,12 @@ def _instance_get_all_query(context, project_only=False,
@require_admin_context
def instance_get_all_by_host(context, host,
columns_to_join=None,
- use_slave=False):
+ use_subordinate=False):
return _instances_fill_metadata(context,
_instance_get_all_query(context,
- use_slave=use_slave).filter_by(host=host).all(),
+ use_subordinate=use_subordinate).filter_by(host=host).all(),
manual_joins=columns_to_join,
- use_slave=use_slave)
+ use_subordinate=use_subordinate)
def _instance_get_all_uuids_by_host(context, host, session=None):
@@ -4051,12 +4051,12 @@ def migration_get_by_instance_and_status(context, instance_uuid, status):
@require_admin_context
def migration_get_unconfirmed_by_dest_compute(context, confirm_window,
- dest_compute, use_slave=False):
+ dest_compute, use_subordinate=False):
confirm_window = (timeutils.utcnow() -
datetime.timedelta(seconds=confirm_window))
return model_query(context, models.Migration, read_deleted="yes",
- use_slave=use_slave).\
+ use_subordinate=use_subordinate).\
filter(models.Migration.updated_at <= confirm_window).\
filter_by(status="finished").\
filter_by(dest_compute=dest_compute).\
diff --git a/nova/db/sqlalchemy/utils.py b/nova/db/sqlalchemy/utils.py
index 5d55200c9..39b4e67a8 100644
--- a/nova/db/sqlalchemy/utils.py
+++ b/nova/db/sqlalchemy/utils.py
@@ -114,7 +114,7 @@ def _get_unique_constraints_in_sqlite(migrate_engine, table_name):
"""
SELECT sql
FROM
- sqlite_master
+ sqlite_main
WHERE
type = 'table' AND
name = :table_name;
diff --git a/nova/network/ldapdns.py b/nova/network/ldapdns.py
index 4d5bb0c45..665c47ae8 100644
--- a/nova/network/ldapdns.py
+++ b/nova/network/ldapdns.py
@@ -42,9 +42,9 @@
default='password',
help='Password for LDAP DNS',
secret=True),
- cfg.StrOpt('ldap_dns_soa_hostmaster',
- default='hostmaster@example.org',
- help='Hostmaster for LDAP DNS driver Statement of Authority'),
+ cfg.StrOpt('ldap_dns_soa_hostmain',
+ default='hostmain@example.org',
+ help='Hostmain for LDAP DNS driver Statement of Authority'),
cfg.MultiStrOpt('ldap_dns_servers',
default=['dns.example.org'],
help='DNS Servers for LDAP DNS driver'),
@@ -156,7 +156,7 @@ def _soa(cls):
date = time.strftime('%Y%m%d%H%M%S')
soa = '%s %s %s %s %s %s %s' % (
CONF.ldap_dns_servers[0],
- CONF.ldap_dns_soa_hostmaster,
+ CONF.ldap_dns_soa_hostmain,
date,
CONF.ldap_dns_soa_refresh,
CONF.ldap_dns_soa_retry,
diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py
index 81e49fa7c..ee9298687 100644
--- a/nova/network/linux_net.py
+++ b/nova/network/linux_net.py
@@ -1566,7 +1566,7 @@ def ensure_bridge(bridge, interface, net_attrs=None, gateway=True,
run_as_root=True)
if (err and err != "device %s is already a member of a bridge;"
- "can't enslave it to bridge %s.\n" % (interface, bridge)):
+ "can't ensubordinate it to bridge %s.\n" % (interface, bridge)):
msg = _('Failed to add interface: %s') % err
raise exception.NovaException(msg)
diff --git a/nova/network/manager.py b/nova/network/manager.py
index 3e2330190..0f4406662 100644
--- a/nova/network/manager.py
+++ b/nova/network/manager.py
@@ -570,14 +570,14 @@ def get_instance_nw_info(self, context, instance_id, rxtx_factor,
where network = dict containing pertinent data from a network db object
and info = dict containing pertinent networking data
"""
- use_slave = kwargs.get('use_slave') or False
+ use_subordinate = kwargs.get('use_subordinate') or False
if not uuidutils.is_uuid_like(instance_id):
instance_id = instance_uuid
instance_uuid = instance_id
vifs = vif_obj.VirtualInterfaceList.get_by_instance_uuid(context,
- instance_uuid, use_slave=use_slave)
+ instance_uuid, use_subordinate=use_subordinate)
networks = {}
for vif in vifs:
diff --git a/nova/network/neutronv2/api.py b/nova/network/neutronv2/api.py
index c1fbcfb7c..90759e70b 100644
--- a/nova/network/neutronv2/api.py
+++ b/nova/network/neutronv2/api.py
@@ -435,13 +435,13 @@ def show_port(self, context, port_id):
@refresh_cache
def get_instance_nw_info(self, context, instance, networks=None,
- use_slave=False):
+ use_subordinate=False):
"""Return network information for specified instance
and update cache.
"""
- # NOTE(geekinutah): It would be nice if use_slave had us call
- # special APIs that pummeled slaves instead of
- # the master. For now we just ignore this arg.
+ # NOTE(geekinutah): It would be nice if use_subordinate had us call
+ # special APIs that pummeled subordinates instead of
+ # the main. For now we just ignore this arg.
result = self._get_instance_nw_info(context, instance, networks)
return result
diff --git a/nova/objects/instance.py b/nova/objects/instance.py
index e30209a60..e7fe30a39 100644
--- a/nova/objects/instance.py
+++ b/nova/objects/instance.py
@@ -72,7 +72,7 @@ class Instance(base.NovaPersistentObject, base.NovaObject):
# Version 1.7: String attributes updated to support unicode
# Version 1.8: 'security_groups' and 'pci_devices' cannot be None
# Version 1.9: Make uuid a non-None real string
- # Version 1.10: Added use_slave to refresh and get_by_uuid
+ # Version 1.10: Added use_subordinate to refresh and get_by_uuid
# Version 1.11: Update instance from database during destroy
# Version 1.12: Added ephemeral_key_uuid
VERSION = '1.12'
@@ -294,13 +294,13 @@ def _from_db_object(context, instance, db_inst, expected_attrs=None):
return instance
@base.remotable_classmethod
- def get_by_uuid(cls, context, uuid, expected_attrs=None, use_slave=False):
+ def get_by_uuid(cls, context, uuid, expected_attrs=None, use_subordinate=False):
if expected_attrs is None:
expected_attrs = ['info_cache', 'security_groups']
columns_to_join = _expected_cols(expected_attrs)
db_inst = db.instance_get_by_uuid(context, uuid,
columns_to_join=columns_to_join,
- use_slave=use_slave)
+ use_subordinate=use_subordinate)
return cls._from_db_object(context, cls(), db_inst,
expected_attrs)
@@ -472,12 +472,12 @@ def _handle_cell_update_from_api():
self.obj_reset_changes()
@base.remotable
- def refresh(self, context, use_slave=False):
+ def refresh(self, context, use_subordinate=False):
extra = [field for field in INSTANCE_OPTIONAL_ATTRS
if self.obj_attr_is_set(field)]
current = self.__class__.get_by_uuid(context, uuid=self.uuid,
expected_attrs=extra,
- use_slave=use_slave)
+ use_subordinate=use_subordinate)
# NOTE(danms): We orphan the instance copy so we do not unexpectedly
# trigger a lazy-load (which would mean we failed to calculate the
# expected_attrs properly)
@@ -568,10 +568,10 @@ def _make_instance_list(context, inst_list, db_inst_list, expected_attrs):
class InstanceList(base.ObjectListBase, base.NovaObject):
# Version 1.0: Initial version
- # Version 1.1: Added use_slave to get_by_host
+ # Version 1.1: Added use_subordinate to get_by_host
# Instance <= version 1.9
# Version 1.2: Instance <= version 1.11
- # Version 1.3: Added use_slave to get_by_filters
+ # Version 1.3: Added use_subordinate to get_by_filters
# Version 1.4: Instance <= version 1.12
# Version 1.5: Added method get_active_by_window_joined.
VERSION = '1.5'
@@ -591,19 +591,19 @@ class InstanceList(base.ObjectListBase, base.NovaObject):
@base.remotable_classmethod
def get_by_filters(cls, context, filters,
sort_key='created_at', sort_dir='desc', limit=None,
- marker=None, expected_attrs=None, use_slave=False):
+ marker=None, expected_attrs=None, use_subordinate=False):
db_inst_list = db.instance_get_all_by_filters(
context, filters, sort_key, sort_dir, limit=limit, marker=marker,
columns_to_join=_expected_cols(expected_attrs),
- use_slave=use_slave)
+ use_subordinate=use_subordinate)
return _make_instance_list(context, cls(), db_inst_list,
expected_attrs)
@base.remotable_classmethod
- def get_by_host(cls, context, host, expected_attrs=None, use_slave=False):
+ def get_by_host(cls, context, host, expected_attrs=None, use_subordinate=False):
db_inst_list = db.instance_get_all_by_host(
context, host, columns_to_join=_expected_cols(expected_attrs),
- use_slave=use_slave)
+ use_subordinate=use_subordinate)
return _make_instance_list(context, cls(), db_inst_list,
expected_attrs)
diff --git a/nova/objects/migration.py b/nova/objects/migration.py
index f017a8cd7..fcda76e91 100644
--- a/nova/objects/migration.py
+++ b/nova/objects/migration.py
@@ -88,7 +88,7 @@ def _make_list(context, list_obj, item_cls, db_list):
class MigrationList(base.ObjectListBase, base.NovaObject):
# Version 1.0: Initial version
# Migration <= 1.1
- # Version 1.1: Added use_slave to get_unconfirmed_by_dest_compute
+ # Version 1.1: Added use_subordinate to get_unconfirmed_by_dest_compute
VERSION = '1.1'
fields = {
@@ -102,9 +102,9 @@ class MigrationList(base.ObjectListBase, base.NovaObject):
@base.remotable_classmethod
def get_unconfirmed_by_dest_compute(cls, context, confirm_window,
- dest_compute, use_slave=False):
+ dest_compute, use_subordinate=False):
db_migrations = db.migration_get_unconfirmed_by_dest_compute(
- context, confirm_window, dest_compute, use_slave=use_slave)
+ context, confirm_window, dest_compute, use_subordinate=use_subordinate)
return _make_list(context, MigrationList(), Migration, db_migrations)
@base.remotable_classmethod
diff --git a/nova/objects/virtual_interface.py b/nova/objects/virtual_interface.py
index cd6296114..03db68139 100644
--- a/nova/objects/virtual_interface.py
+++ b/nova/objects/virtual_interface.py
@@ -93,7 +93,7 @@ def get_all(cls, context):
return base.obj_make_list(context, cls(), VirtualInterface, db_vifs)
@base.remotable_classmethod
- def get_by_instance_uuid(cls, context, instance_uuid, use_slave=False):
+ def get_by_instance_uuid(cls, context, instance_uuid, use_subordinate=False):
db_vifs = db.virtual_interface_get_by_instance(context, instance_uuid,
- use_slave=use_slave)
+ use_subordinate=use_subordinate)
return base.obj_make_list(context, cls(), VirtualInterface, db_vifs)
diff --git a/nova/openstack/common/db/sqlalchemy/migration.py b/nova/openstack/common/db/sqlalchemy/migration.py
index b96daa1c0..e0ac93e9c 100644
--- a/nova/openstack/common/db/sqlalchemy/migration.py
+++ b/nova/openstack/common/db/sqlalchemy/migration.py
@@ -68,7 +68,7 @@ def _get_unique_constraints(self, table):
data = table.metadata.bind.execute(
"""SELECT sql
- FROM sqlite_master
+ FROM sqlite_main
WHERE
type='table' AND
name=:table_name""",
diff --git a/nova/openstack/common/db/sqlalchemy/session.py b/nova/openstack/common/db/sqlalchemy/session.py
index 3d6e763d3..64ef0cb40 100644
--- a/nova/openstack/common/db/sqlalchemy/session.py
+++ b/nova/openstack/common/db/sqlalchemy/session.py
@@ -330,11 +330,11 @@ def soft_delete_multi_models():
group='DATABASE'),
cfg.DeprecatedOpt('connection',
group='sql'), ]),
- cfg.StrOpt('slave_connection',
+ cfg.StrOpt('subordinate_connection',
default='',
secret=True,
help='The SQLAlchemy connection string used to connect to the '
- 'slave database'),
+ 'subordinate database'),
cfg.IntOpt('idle_timeout',
default=3600,
deprecated_opts=[cfg.DeprecatedOpt('sql_idle_timeout',
@@ -461,21 +461,21 @@ def connect(self, dbapi_con, con_record):
def get_session(autocommit=True, expire_on_commit=False, sqlite_fk=False,
- slave_session=False, mysql_traditional_mode=False):
+ subordinate_session=False, mysql_traditional_mode=False):
"""Return a SQLAlchemy session."""
global _MAKER
global _SLAVE_MAKER
maker = _MAKER
- if slave_session:
+ if subordinate_session:
maker = _SLAVE_MAKER
if maker is None:
- engine = get_engine(sqlite_fk=sqlite_fk, slave_engine=slave_session,
+ engine = get_engine(sqlite_fk=sqlite_fk, subordinate_engine=subordinate_session,
mysql_traditional_mode=mysql_traditional_mode)
maker = get_maker(engine, autocommit, expire_on_commit)
- if slave_session:
+ if subordinate_session:
_SLAVE_MAKER = maker
else:
_MAKER = maker
@@ -617,7 +617,7 @@ def _wrap(*args, **kwargs):
return _wrap
-def get_engine(sqlite_fk=False, slave_engine=False,
+def get_engine(sqlite_fk=False, subordinate_engine=False,
mysql_traditional_mode=False):
"""Return a SQLAlchemy engine."""
global _ENGINE
@@ -625,14 +625,14 @@ def get_engine(sqlite_fk=False, slave_engine=False,
engine = _ENGINE
db_uri = CONF.database.connection
- if slave_engine:
+ if subordinate_engine:
engine = _SLAVE_ENGINE
- db_uri = CONF.database.slave_connection
+ db_uri = CONF.database.subordinate_connection
if engine is None:
engine = create_engine(db_uri, sqlite_fk=sqlite_fk,
mysql_traditional_mode=mysql_traditional_mode)
- if slave_engine:
+ if subordinate_engine:
_SLAVE_ENGINE = engine
else:
_ENGINE = engine
@@ -715,7 +715,7 @@ def create_engine(sql_connection, sqlite_fk=False,
mysql_traditional_mode=False):
"""Return a new SQLAlchemy engine."""
# NOTE(geekinutah): At this point we could be connecting to the normal
- # db handle or the slave db handle. Things like
+ # db handle or the subordinate db handle. Things like
# _wrap_db_error aren't going to work well if their
# backends don't match. Let's check.
_assert_matching_drivers()
@@ -878,12 +878,12 @@ def _do_query(self, q):
def _assert_matching_drivers():
- """Make sure slave handle and normal handle have the same driver."""
+ """Make sure subordinate handle and normal handle have the same driver."""
# NOTE(geekinutah): There's no use case for writing to one backend and
# reading from another. Who knows what the future holds?
- if CONF.database.slave_connection == '':
+ if CONF.database.subordinate_connection == '':
return
normal = sqlalchemy.engine.url.make_url(CONF.database.connection)
- slave = sqlalchemy.engine.url.make_url(CONF.database.slave_connection)
- assert normal.drivername == slave.drivername
+ subordinate = sqlalchemy.engine.url.make_url(CONF.database.subordinate_connection)
+ assert normal.drivername == subordinate.drivername
diff --git a/nova/openstack/common/gettextutils.py b/nova/openstack/common/gettextutils.py
index c900b227e..3b0e015be 100644
--- a/nova/openstack/common/gettextutils.py
+++ b/nova/openstack/common/gettextutils.py
@@ -313,9 +313,9 @@ def get_available_languages(domain):
# order matters) since our in-line message strings are en_US
language_list = ['en_US']
# NOTE(luisg): Babel <1.0 used a function called list(), which was
- # renamed to locale_identifiers() in >=1.0, the requirements master list
+ # renamed to locale_identifiers() in >=1.0, the requirements main list
# requires >=0.9.6, uncapped, so defensively work with both. We can remove
- # this check when the master list updates to >=1.0, and update all projects
+ # this check when the main list updates to >=1.0, and update all projects
list_identifiers = (getattr(localedata, 'list', None) or
getattr(localedata, 'locale_identifiers'))
locale_identifiers = list_identifiers()
diff --git a/nova/tests/api/openstack/compute/contrib/test_disk_config.py b/nova/tests/api/openstack/compute/contrib/test_disk_config.py
index 9f60a0303..d40be0d76 100644
--- a/nova/tests/api/openstack/compute/contrib/test_disk_config.py
+++ b/nova/tests/api/openstack/compute/contrib/test_disk_config.py
@@ -65,7 +65,7 @@ def fake_instance_get(context, id_):
self.stubs.Set(db, 'instance_get', fake_instance_get)
def fake_instance_get_by_uuid(context, uuid,
- columns_to_join=None, use_slave=False):
+ columns_to_join=None, use_subordinate=False):
for instance in FAKE_INSTANCES:
if uuid == instance['uuid']:
return instance
diff --git a/nova/tests/api/openstack/compute/contrib/test_instance_actions.py b/nova/tests/api/openstack/compute/contrib/test_instance_actions.py
index 0e931a230..3f98c61ca 100644
--- a/nova/tests/api/openstack/compute/contrib/test_instance_actions.py
+++ b/nova/tests/api/openstack/compute/contrib/test_instance_actions.py
@@ -78,7 +78,7 @@ def test_list_actions_restricted_by_project(self):
def fake_instance_get_by_uuid(context, instance_id,
columns_to_join=None,
- use_slave=False):
+ use_subordinate=False):
return fake_instance.fake_db_instance(
**{'name': 'fake', 'project_id': '%s_unequal' %
context.project_id})
@@ -96,7 +96,7 @@ def test_get_action_restricted_by_project(self):
def fake_instance_get_by_uuid(context, instance_id,
columns_to_join=None,
- use_slave=False):
+ use_subordinate=False):
return fake_instance.fake_db_instance(
**{'name': 'fake', 'project_id': '%s_unequal' %
context.project_id})
@@ -118,7 +118,7 @@ def setUp(self):
def fake_get(self, context, instance_uuid):
return {'uuid': instance_uuid}
- def fake_instance_get_by_uuid(context, instance_id, use_slave=False):
+ def fake_instance_get_by_uuid(context, instance_id, use_subordinate=False):
return {'name': 'fake', 'project_id': context.project_id}
self.stubs.Set(compute_api.API, 'get', fake_get)
diff --git a/nova/tests/api/openstack/compute/contrib/test_security_groups.py b/nova/tests/api/openstack/compute/contrib/test_security_groups.py
index 5b35a899c..9dc9fcb1e 100644
--- a/nova/tests/api/openstack/compute/contrib/test_security_groups.py
+++ b/nova/tests/api/openstack/compute/contrib/test_security_groups.py
@@ -79,7 +79,7 @@ def security_group_rule_db(rule, id=None):
def return_server(context, server_id,
- columns_to_join=None, use_slave=False):
+ columns_to_join=None, use_subordinate=False):
return fake_instance.fake_db_instance(
**{'id': int(server_id),
'power_state': 0x01,
@@ -90,7 +90,7 @@ def return_server(context, server_id,
def return_server_by_uuid(context, server_uuid,
columns_to_join=None,
- use_slave=False):
+ use_subordinate=False):
return fake_instance.fake_db_instance(
**{'id': 1,
'power_state': 0x01,
@@ -354,7 +354,7 @@ def test_get_security_group_by_instance(self):
expected = {'security_groups': groups}
def return_instance(context, server_id,
- columns_to_join=None, use_slave=False):
+ columns_to_join=None, use_subordinate=False):
self.assertEqual(server_id, FAKE_UUID1)
return return_server_by_uuid(context, server_id)
@@ -1612,7 +1612,7 @@ def construct(self):
root.set('id')
root.set('imageRef')
root.set('flavorRef')
- return xmlutil.MasterTemplate(root, 1,
+ return xmlutil.MainTemplate(root, 1,
nsmap={None: xmlutil.XMLNS_V11})
def _encode_body(self, body):
diff --git a/nova/tests/api/openstack/compute/contrib/test_server_start_stop.py b/nova/tests/api/openstack/compute/contrib/test_server_start_stop.py
index c4ec60ba3..f195c9f30 100644
--- a/nova/tests/api/openstack/compute/contrib/test_server_start_stop.py
+++ b/nova/tests/api/openstack/compute/contrib/test_server_start_stop.py
@@ -24,7 +24,7 @@
def fake_instance_get(context, instance_id,
- columns_to_join=None, use_slave=False):
+ columns_to_join=None, use_subordinate=False):
result = fakes.stub_instance(id=1, uuid=instance_id)
result['created_at'] = None
result['deleted_at'] = None
diff --git a/nova/tests/api/openstack/compute/contrib/test_shelve.py b/nova/tests/api/openstack/compute/contrib/test_shelve.py
index 4cd393991..bfcc58ce2 100644
--- a/nova/tests/api/openstack/compute/contrib/test_shelve.py
+++ b/nova/tests/api/openstack/compute/contrib/test_shelve.py
@@ -26,7 +26,7 @@
def fake_instance_get_by_uuid(context, instance_id,
- columns_to_join=None, use_slave=False):
+ columns_to_join=None, use_subordinate=False):
return fake_instance.fake_db_instance(
**{'name': 'fake', 'project_id': '%s_unequal' % context.project_id})
diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_instance_actions.py b/nova/tests/api/openstack/compute/plugins/v3/test_instance_actions.py
index 5486fe457..a4f1797b6 100644
--- a/nova/tests/api/openstack/compute/plugins/v3/test_instance_actions.py
+++ b/nova/tests/api/openstack/compute/plugins/v3/test_instance_actions.py
@@ -79,7 +79,7 @@ def test_list_actions_restricted_by_project(self):
policy.set_rules(rules)
def fake_instance_get_by_uuid(context, instance_id,
- columns_to_join=None, use_slave=False):
+ columns_to_join=None, use_subordinate=False):
return fake_instance.fake_db_instance(
**{'name': 'fake', 'project_id': '%s_unequal' %
context.project_id})
@@ -96,7 +96,7 @@ def test_get_action_restricted_by_project(self):
policy.set_rules(rules)
def fake_instance_get_by_uuid(context, instance_id,
- columns_to_join=None, use_slave=False):
+ columns_to_join=None, use_subordinate=False):
return fake_instance.fake_db_instance(
**{'name': 'fake', 'project_id': '%s_unequal' %
context.project_id})
@@ -119,7 +119,7 @@ def fake_get(self, context, instance_uuid):
return {'uuid': instance_uuid}
def fake_instance_get_by_uuid(context, instance_id,
- columns_to_join=None, use_slave=False):
+ columns_to_join=None, use_subordinate=False):
return fake_instance.fake_db_instance(
**{'name': 'fake', 'project_id': '%s_unequal' %
context.project_id})
diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_server_actions.py b/nova/tests/api/openstack/compute/plugins/v3/test_server_actions.py
index e8db94140..ddc1d14ed 100644
--- a/nova/tests/api/openstack/compute/plugins/v3/test_server_actions.py
+++ b/nova/tests/api/openstack/compute/plugins/v3/test_server_actions.py
@@ -466,7 +466,7 @@ def test_rebuild_admin_password_pass_disabled(self):
def test_rebuild_server_not_found(self):
def server_not_found(self, instance_id,
- columns_to_join=None, use_slave=False):
+ columns_to_join=None, use_subordinate=False):
raise exception.InstanceNotFound(instance_id=instance_id)
self.stubs.Set(db, 'instance_get_by_uuid', server_not_found)
diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_server_metadata.py b/nova/tests/api/openstack/compute/plugins/v3/test_server_metadata.py
index fa5ff5b7d..73772b823 100644
--- a/nova/tests/api/openstack/compute/plugins/v3/test_server_metadata.py
+++ b/nova/tests/api/openstack/compute/plugins/v3/test_server_metadata.py
@@ -89,7 +89,7 @@ def return_server(context, server_id, columns_to_join=None):
def return_server_by_uuid(context, server_uuid,
- columns_to_join=None, use_slave=False):
+ columns_to_join=None, use_subordinate=False):
return fake_instance.fake_db_instance(
**{'id': 1,
'uuid': '0cc3346e-9fef-4445-abe6-5d2b2690ec64',
@@ -101,7 +101,7 @@ def return_server_by_uuid(context, server_uuid,
def return_server_nonexistent(context, server_id,
- columns_to_join=None, use_slave=False):
+ columns_to_join=None, use_subordinate=False):
raise exception.InstanceNotFound(instance_id=server_id)
@@ -592,7 +592,7 @@ def _return_server_in_build(self, context, server_id,
'vm_state': vm_states.BUILDING})
def _return_server_in_build_by_uuid(self, context, server_uuid,
- columns_to_join=None, use_slave=False):
+ columns_to_join=None, use_subordinate=False):
return fake_instance.fake_db_instance(
**{'id': 1,
'uuid': '0cc3346e-9fef-4445-abe6-5d2b2690ec64',
diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_servers.py b/nova/tests/api/openstack/compute/plugins/v3/test_servers.py
index 3e19bd367..34176dc5a 100644
--- a/nova/tests/api/openstack/compute/plugins/v3/test_servers.py
+++ b/nova/tests/api/openstack/compute/plugins/v3/test_servers.py
@@ -107,7 +107,7 @@ def fake_start_stop_not_ready(self, context, instance):
def fake_instance_get_by_uuid_not_found(context, uuid,
- columns_to_join, use_slave=False):
+ columns_to_join, use_subordinate=False):
raise exception.InstanceNotFound(instance_id=uuid)
@@ -689,7 +689,7 @@ def fake_get_all(compute_self, context, search_opts=None,
def test_tenant_id_filter_converts_to_project_id_for_admin(self):
def fake_get_all(context, filters=None, sort_key=None,
sort_dir='desc', limit=None, marker=None,
- columns_to_join=None, use_slave=False,
+ columns_to_join=None, use_subordinate=False,
expected_attrs=[]):
self.assertIsNotNone(filters)
self.assertEqual(filters['project_id'], 'newfake')
@@ -709,7 +709,7 @@ def fake_get_all(context, filters=None, sort_key=None,
def test_tenant_id_filter_no_admin_context(self):
def fake_get_all(context, filters=None, sort_key=None,
sort_dir='desc', limit=None, marker=None,
- columns_to_join=None, use_slave=False,
+ columns_to_join=None, use_subordinate=False,
expected_attrs=[]):
self.assertNotEqual(filters, None)
self.assertEqual(filters['project_id'], 'fake')
@@ -725,7 +725,7 @@ def fake_get_all(context, filters=None, sort_key=None,
def test_tenant_id_filter_implies_all_tenants(self):
def fake_get_all(context, filters=None, sort_key=None,
sort_dir='desc', limit=None, marker=None,
- columns_to_join=None, use_slave=False,
+ columns_to_join=None, use_subordinate=False,
expected_attrs=[]):
self.assertNotEqual(filters, None)
# The project_id assertion checks that the project_id
@@ -747,7 +747,7 @@ def fake_get_all(context, filters=None, sort_key=None,
def test_all_tenants_param_normal(self):
def fake_get_all(context, filters=None, sort_key=None,
sort_dir='desc', limit=None, marker=None,
- columns_to_join=None, use_slave=False,
+ columns_to_join=None, use_subordinate=False,
expected_attrs=[]):
self.assertNotIn('project_id', filters)
return [fakes.stub_instance(100)]
@@ -764,7 +764,7 @@ def fake_get_all(context, filters=None, sort_key=None,
def test_all_tenants_param_one(self):
def fake_get_all(context, filters=None, sort_key=None,
sort_dir='desc', limit=None, marker=None,
- columns_to_join=None, use_slave=False,
+ columns_to_join=None, use_subordinate=False,
expected_attrs=[]):
self.assertNotIn('project_id', filters)
return [fakes.stub_instance(100)]
@@ -781,7 +781,7 @@ def fake_get_all(context, filters=None, sort_key=None,
def test_all_tenants_param_zero(self):
def fake_get_all(context, filters=None, sort_key=None,
sort_dir='desc', limit=None, marker=None,
- columns_to_join=None, use_slave=False,
+ columns_to_join=None, use_subordinate=False,
expected_attrs=[]):
self.assertNotIn('all_tenants', filters)
return [fakes.stub_instance(100)]
@@ -798,7 +798,7 @@ def fake_get_all(context, filters=None, sort_key=None,
def test_all_tenants_param_false(self):
def fake_get_all(context, filters=None, sort_key=None,
sort_dir='desc', limit=None, marker=None,
- columns_to_join=None, use_slave=False,
+ columns_to_join=None, use_subordinate=False,
expected_attrs=[]):
self.assertNotIn('all_tenants', filters)
return [fakes.stub_instance(100)]
@@ -831,7 +831,7 @@ def fake_get_all(context, filters=None, sort_key=None,
def test_admin_restricted_tenant(self):
def fake_get_all(context, filters=None, sort_key=None,
sort_dir='desc', limit=None, marker=None,
- columns_to_join=None, use_slave=False,
+ columns_to_join=None, use_subordinate=False,
expected_attrs=[]):
self.assertIsNotNone(filters)
self.assertEqual(filters['project_id'], 'fake')
@@ -849,7 +849,7 @@ def fake_get_all(context, filters=None, sort_key=None,
def test_all_tenants_pass_policy(self):
def fake_get_all(context, filters=None, sort_key=None,
sort_dir='desc', limit=None, marker=None,
- columns_to_join=None, use_slave=False,
+ columns_to_join=None, use_subordinate=False,
expected_attrs=[]):
self.assertIsNotNone(filters)
self.assertNotIn('project_id', filters)
diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_shelve.py b/nova/tests/api/openstack/compute/plugins/v3/test_shelve.py
index 58d62af34..4ac86e5ae 100644
--- a/nova/tests/api/openstack/compute/plugins/v3/test_shelve.py
+++ b/nova/tests/api/openstack/compute/plugins/v3/test_shelve.py
@@ -26,7 +26,7 @@
def fake_instance_get_by_uuid(context, instance_id,
- columns_to_join=None, use_slave=False):
+ columns_to_join=None, use_subordinate=False):
return fake_instance.fake_db_instance(
**{'name': 'fake', 'project_id': '%s_unequal' % context.project_id})
diff --git a/nova/tests/api/openstack/compute/test_server_actions.py b/nova/tests/api/openstack/compute/test_server_actions.py
index a209c0960..caf46d8f5 100644
--- a/nova/tests/api/openstack/compute/test_server_actions.py
+++ b/nova/tests/api/openstack/compute/test_server_actions.py
@@ -631,7 +631,7 @@ def test_rebuild_admin_pass_pass_disabled(self):
def test_rebuild_server_not_found(self):
def server_not_found(self, instance_id,
- columns_to_join=None, use_slave=False):
+ columns_to_join=None, use_subordinate=False):
raise exception.InstanceNotFound(instance_id=instance_id)
self.stubs.Set(db, 'instance_get_by_uuid', server_not_found)
diff --git a/nova/tests/api/openstack/compute/test_server_metadata.py b/nova/tests/api/openstack/compute/test_server_metadata.py
index 08937f55a..71c25198c 100644
--- a/nova/tests/api/openstack/compute/test_server_metadata.py
+++ b/nova/tests/api/openstack/compute/test_server_metadata.py
@@ -90,7 +90,7 @@ def return_server(context, server_id, columns_to_join=None):
def return_server_by_uuid(context, server_uuid,
- columns_to_join=None, use_slave=False):
+ columns_to_join=None, use_subordinate=False):
return fake_instance.fake_db_instance(
**{'id': 1,
'uuid': '0cc3346e-9fef-4445-abe6-5d2b2690ec64',
@@ -102,7 +102,7 @@ def return_server_by_uuid(context, server_uuid,
def return_server_nonexistent(context, server_id,
- columns_to_join=None, use_slave=False):
+ columns_to_join=None, use_subordinate=False):
raise exception.InstanceNotFound(instance_id=server_id)
@@ -568,7 +568,7 @@ def _return_server_in_build(self, context, server_id,
'vm_state': vm_states.BUILDING})
def _return_server_in_build_by_uuid(self, context, server_uuid,
- columns_to_join=None, use_slave=False):
+ columns_to_join=None, use_subordinate=False):
return fake_instance.fake_db_instance(
**{'id': 1,
'uuid': '0cc3346e-9fef-4445-abe6-5d2b2690ec64',
diff --git a/nova/tests/api/openstack/compute/test_servers.py b/nova/tests/api/openstack/compute/test_servers.py
index 22dc9d410..c2a41c98c 100644
--- a/nova/tests/api/openstack/compute/test_servers.py
+++ b/nova/tests/api/openstack/compute/test_servers.py
@@ -659,7 +659,7 @@ def fake_get_all(compute_self, context, search_opts=None,
def test_tenant_id_filter_converts_to_project_id_for_admin(self):
def fake_get_all(context, filters=None, sort_key=None,
sort_dir='desc', limit=None, marker=None,
- columns_to_join=None, use_slave=False):
+ columns_to_join=None, use_subordinate=False):
self.assertIsNotNone(filters)
self.assertEqual(filters['project_id'], 'newfake')
self.assertFalse(filters.get('tenant_id'))
@@ -678,7 +678,7 @@ def fake_get_all(context, filters=None, sort_key=None,
def test_all_tenants_param_normal(self):
def fake_get_all(context, filters=None, sort_key=None,
sort_dir='desc', limit=None, marker=None,
- columns_to_join=None, use_slave=False):
+ columns_to_join=None, use_subordinate=False):
self.assertNotIn('project_id', filters)
return [fakes.stub_instance(100)]
@@ -694,7 +694,7 @@ def fake_get_all(context, filters=None, sort_key=None,
def test_all_tenants_param_one(self):
def fake_get_all(context, filters=None, sort_key=None,
sort_dir='desc', limit=None, marker=None,
- columns_to_join=None, use_slave=False):
+ columns_to_join=None, use_subordinate=False):
self.assertNotIn('project_id', filters)
return [fakes.stub_instance(100)]
@@ -710,7 +710,7 @@ def fake_get_all(context, filters=None, sort_key=None,
def test_all_tenants_param_zero(self):
def fake_get_all(context, filters=None, sort_key=None,
sort_dir='desc', limit=None, marker=None,
- columns_to_join=None, use_slave=False):
+ columns_to_join=None, use_subordinate=False):
self.assertNotIn('all_tenants', filters)
return [fakes.stub_instance(100)]
@@ -726,7 +726,7 @@ def fake_get_all(context, filters=None, sort_key=None,
def test_all_tenants_param_false(self):
def fake_get_all(context, filters=None, sort_key=None,
sort_dir='desc', limit=None, marker=None,
- columns_to_join=None, use_slave=False):
+ columns_to_join=None, use_subordinate=False):
self.assertNotIn('all_tenants', filters)
return [fakes.stub_instance(100)]
@@ -757,7 +757,7 @@ def fake_get_all(context, filters=None, sort_key=None,
def test_admin_restricted_tenant(self):
def fake_get_all(context, filters=None, sort_key=None,
sort_dir='desc', limit=None, marker=None,
- columns_to_join=None, use_slave=False):
+ columns_to_join=None, use_subordinate=False):
self.assertIsNotNone(filters)
self.assertEqual(filters['project_id'], 'fake')
return [fakes.stub_instance(100)]
@@ -774,7 +774,7 @@ def fake_get_all(context, filters=None, sort_key=None,
def test_all_tenants_pass_policy(self):
def fake_get_all(context, filters=None, sort_key=None,
sort_dir='desc', limit=None, marker=None,
- columns_to_join=None, use_slave=False):
+ columns_to_join=None, use_subordinate=False):
self.assertIsNotNone(filters)
self.assertNotIn('project_id', filters)
return [fakes.stub_instance(100)]
diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py
index 266bc5a15..74db64a47 100644
--- a/nova/tests/api/openstack/fakes.py
+++ b/nova/tests/api/openstack/fakes.py
@@ -438,7 +438,7 @@ def get_fake_uuid(token=0):
def fake_instance_get(**kwargs):
- def _return_server(context, uuid, columns_to_join=None, use_slave=False):
+ def _return_server(context, uuid, columns_to_join=None, use_subordinate=False):
return stub_instance(1, **kwargs)
return _return_server
@@ -461,8 +461,8 @@ def _return_servers(context, *args, **kwargs):
if 'columns_to_join' in kwargs:
kwargs.pop('columns_to_join')
- if 'use_slave' in kwargs:
- kwargs.pop('use_slave')
+ if 'use_subordinate' in kwargs:
+ kwargs.pop('use_subordinate')
for i in xrange(num_servers):
uuid = get_fake_uuid(i)
diff --git a/nova/tests/api/openstack/test_xmlutil.py b/nova/tests/api/openstack/test_xmlutil.py
index 75822a54e..774b5daae 100644
--- a/nova/tests/api/openstack/test_xmlutil.py
+++ b/nova/tests/api/openstack/test_xmlutil.py
@@ -391,17 +391,17 @@ def test__render(self):
attr2=xmlutil.ConstantSelector(2),
attr3=xmlutil.ConstantSelector(3))
- # Create a master template element
- master_elem = xmlutil.TemplateElement('test', attr1=attrs['attr1'])
+ # Create a main template element
+ main_elem = xmlutil.TemplateElement('test', attr1=attrs['attr1'])
- # Create a couple of slave template element
- slave_elems = [
+ # Create a couple of subordinate template element
+ subordinate_elems = [
xmlutil.TemplateElement('test', attr2=attrs['attr2']),
xmlutil.TemplateElement('test', attr3=attrs['attr3']),
]
# Try the render
- elem = master_elem._render(None, None, slave_elems, None)
+ elem = main_elem._render(None, None, subordinate_elems, None)
# Verify the particulars of the render
self.assertEqual(elem.tag, 'test')
@@ -413,7 +413,7 @@ def test__render(self):
parent = etree.Element('parent')
# Try the render again...
- elem = master_elem._render(parent, None, slave_elems, dict(a='foo'))
+ elem = main_elem._render(parent, None, subordinate_elems, dict(a='foo'))
# Verify the particulars of the render
self.assertEqual(len(parent), 1)
@@ -550,47 +550,47 @@ def test__nsmap(self):
self.assertEqual(len(nsmap), 1)
self.assertEqual(nsmap['a'], 'foo')
- def test_master_attach(self):
- # Set up a master template
+ def test_main_attach(self):
+ # Set up a main template
elem = xmlutil.TemplateElement('test')
- tmpl = xmlutil.MasterTemplate(elem, 1)
+ tmpl = xmlutil.MainTemplate(elem, 1)
- # Make sure it has a root but no slaves
+ # Make sure it has a root but no subordinates
self.assertEqual(tmpl.root, elem)
- self.assertEqual(len(tmpl.slaves), 0)
+ self.assertEqual(len(tmpl.subordinates), 0)
self.assertTrue(repr(tmpl))
- # Try to attach an invalid slave
+ # Try to attach an invalid subordinate
bad_elem = xmlutil.TemplateElement('test2')
self.assertRaises(ValueError, tmpl.attach, bad_elem)
- self.assertEqual(len(tmpl.slaves), 0)
+ self.assertEqual(len(tmpl.subordinates), 0)
- # Try to attach an invalid and a valid slave
+ # Try to attach an invalid and a valid subordinate
good_elem = xmlutil.TemplateElement('test')
self.assertRaises(ValueError, tmpl.attach, good_elem, bad_elem)
- self.assertEqual(len(tmpl.slaves), 0)
+ self.assertEqual(len(tmpl.subordinates), 0)
# Try to attach an inapplicable template
class InapplicableTemplate(xmlutil.Template):
- def apply(self, master):
+ def apply(self, main):
return False
inapp_tmpl = InapplicableTemplate(good_elem)
tmpl.attach(inapp_tmpl)
- self.assertEqual(len(tmpl.slaves), 0)
+ self.assertEqual(len(tmpl.subordinates), 0)
# Now try attaching an applicable template
tmpl.attach(good_elem)
- self.assertEqual(len(tmpl.slaves), 1)
- self.assertEqual(tmpl.slaves[0].root, good_elem)
+ self.assertEqual(len(tmpl.subordinates), 1)
+ self.assertEqual(tmpl.subordinates[0].root, good_elem)
- def test_master_copy(self):
- # Construct a master template
+ def test_main_copy(self):
+ # Construct a main template
elem = xmlutil.TemplateElement('test')
- tmpl = xmlutil.MasterTemplate(elem, 1, nsmap=dict(a='foo'))
+ tmpl = xmlutil.MainTemplate(elem, 1, nsmap=dict(a='foo'))
- # Give it a slave
- slave = xmlutil.TemplateElement('test')
- tmpl.attach(slave)
+ # Give it a subordinate
+ subordinate = xmlutil.TemplateElement('test')
+ tmpl.attach(subordinate)
# Construct a copy
copy = tmpl.copy()
@@ -600,43 +600,43 @@ def test_master_copy(self):
self.assertEqual(tmpl.root, copy.root)
self.assertEqual(tmpl.version, copy.version)
self.assertEqual(id(tmpl.nsmap), id(copy.nsmap))
- self.assertNotEqual(id(tmpl.slaves), id(copy.slaves))
- self.assertEqual(len(tmpl.slaves), len(copy.slaves))
- self.assertEqual(tmpl.slaves[0], copy.slaves[0])
+ self.assertNotEqual(id(tmpl.subordinates), id(copy.subordinates))
+ self.assertEqual(len(tmpl.subordinates), len(copy.subordinates))
+ self.assertEqual(tmpl.subordinates[0], copy.subordinates[0])
- def test_slave_apply(self):
- # Construct a master template
+ def test_subordinate_apply(self):
+ # Construct a main template
elem = xmlutil.TemplateElement('test')
- master = xmlutil.MasterTemplate(elem, 3)
+ main = xmlutil.MainTemplate(elem, 3)
- # Construct a slave template with applicable minimum version
- slave = xmlutil.SlaveTemplate(elem, 2)
- self.assertEqual(slave.apply(master), True)
- self.assertTrue(repr(slave))
+ # Construct a subordinate template with applicable minimum version
+ subordinate = xmlutil.SubordinateTemplate(elem, 2)
+ self.assertEqual(subordinate.apply(main), True)
+ self.assertTrue(repr(subordinate))
- # Construct a slave template with equal minimum version
- slave = xmlutil.SlaveTemplate(elem, 3)
- self.assertEqual(slave.apply(master), True)
+ # Construct a subordinate template with equal minimum version
+ subordinate = xmlutil.SubordinateTemplate(elem, 3)
+ self.assertEqual(subordinate.apply(main), True)
- # Construct a slave template with inapplicable minimum version
- slave = xmlutil.SlaveTemplate(elem, 4)
- self.assertEqual(slave.apply(master), False)
+ # Construct a subordinate template with inapplicable minimum version
+ subordinate = xmlutil.SubordinateTemplate(elem, 4)
+ self.assertEqual(subordinate.apply(main), False)
- # Construct a slave template with applicable version range
- slave = xmlutil.SlaveTemplate(elem, 2, 4)
- self.assertEqual(slave.apply(master), True)
+ # Construct a subordinate template with applicable version range
+ subordinate = xmlutil.SubordinateTemplate(elem, 2, 4)
+ self.assertEqual(subordinate.apply(main), True)
- # Construct a slave template with low version range
- slave = xmlutil.SlaveTemplate(elem, 1, 2)
- self.assertEqual(slave.apply(master), False)
+ # Construct a subordinate template with low version range
+ subordinate = xmlutil.SubordinateTemplate(elem, 1, 2)
+ self.assertEqual(subordinate.apply(main), False)
- # Construct a slave template with high version range
- slave = xmlutil.SlaveTemplate(elem, 4, 5)
- self.assertEqual(slave.apply(master), False)
+ # Construct a subordinate template with high version range
+ subordinate = xmlutil.SubordinateTemplate(elem, 4, 5)
+ self.assertEqual(subordinate.apply(main), False)
- # Construct a slave template with matching version range
- slave = xmlutil.SlaveTemplate(elem, 3, 3)
- self.assertEqual(slave.apply(master), True)
+ # Construct a subordinate template with matching version range
+ subordinate = xmlutil.SubordinateTemplate(elem, 3, 3)
+ self.assertEqual(subordinate.apply(main), True)
def test__serialize(self):
# Our test object to serialize
@@ -657,7 +657,7 @@ def test__serialize(self):
},
}
- # Set up our master template
+ # Set up our main template
root = xmlutil.TemplateElement('test', selector='test',
name='name')
value = xmlutil.SubTemplateElement(root, 'value', selector='values')
@@ -665,22 +665,22 @@ def test__serialize(self):
attrs = xmlutil.SubTemplateElement(root, 'attrs', selector='attrs')
xmlutil.SubTemplateElement(attrs, 'attr', selector=xmlutil.get_items,
key=0, value=1)
- master = xmlutil.MasterTemplate(root, 1, nsmap=dict(f='foo'))
+ main = xmlutil.MainTemplate(root, 1, nsmap=dict(f='foo'))
- # Set up our slave template
- root_slave = xmlutil.TemplateElement('test', selector='test')
- image = xmlutil.SubTemplateElement(root_slave, 'image',
+ # Set up our subordinate template
+ root_subordinate = xmlutil.TemplateElement('test', selector='test')
+ image = xmlutil.SubTemplateElement(root_subordinate, 'image',
selector='image', id='id')
image.text = xmlutil.Selector('name')
- slave = xmlutil.SlaveTemplate(root_slave, 1, nsmap=dict(b='bar'))
+ subordinate = xmlutil.SubordinateTemplate(root_subordinate, 1, nsmap=dict(b='bar'))
- # Attach the slave to the master...
- master.attach(slave)
+ # Attach the subordinate to the main...
+ main.attach(subordinate)
# Try serializing our object
- siblings = master._siblings()
- nsmap = master._nsmap()
- result = master._serialize(None, obj, siblings, nsmap)
+ siblings = main._siblings()
+ nsmap = main._nsmap()
+ result = main._serialize(None, obj, siblings, nsmap)
# Now we get to manually walk the element tree...
self.assertEqual(result.tag, 'test')
@@ -712,14 +712,14 @@ def test_serialize_with_colon_tagname_support(self):
expected_xml = (("\n"
'999'
''))
- # Set up our master template
+ # Set up our main template
root = xmlutil.TemplateElement('extra_specs', selector='extra_specs',
colon_ns=True)
value = xmlutil.SubTemplateElement(root, 'foo:bar', selector='foo:bar',
colon_ns=True)
value.text = xmlutil.Selector()
- master = xmlutil.MasterTemplate(root, 1)
- result = master.serialize(obj)
+ main = xmlutil.MainTemplate(root, 1)
+ result = main.serialize(obj)
self.assertEqual(expected_xml, result)
def test__serialize_with_empty_datum_selector(self):
@@ -733,76 +733,76 @@ def test__serialize_with_empty_datum_selector(self):
root = xmlutil.TemplateElement('test', selector='test',
name='name')
- master = xmlutil.MasterTemplate(root, 1)
- root_slave = xmlutil.TemplateElement('test', selector='test')
- image = xmlutil.SubTemplateElement(root_slave, 'image',
+ main = xmlutil.MainTemplate(root, 1)
+ root_subordinate = xmlutil.TemplateElement('test', selector='test')
+ image = xmlutil.SubTemplateElement(root_subordinate, 'image',
selector='image')
image.set('id')
xmlutil.make_links(image, 'links')
- slave = xmlutil.SlaveTemplate(root_slave, 1)
- master.attach(slave)
+ subordinate = xmlutil.SubordinateTemplate(root_subordinate, 1)
+ main.attach(subordinate)
- siblings = master._siblings()
- result = master._serialize(None, obj, siblings)
+ siblings = main._siblings()
+ result = main._serialize(None, obj, siblings)
self.assertEqual(result.tag, 'test')
self.assertEqual(result[0].tag, 'image')
self.assertEqual(result[0].get('id'), str(obj['test']['image']))
-class MasterTemplateBuilder(xmlutil.TemplateBuilder):
+class MainTemplateBuilder(xmlutil.TemplateBuilder):
def construct(self):
elem = xmlutil.TemplateElement('test')
- return xmlutil.MasterTemplate(elem, 1)
+ return xmlutil.MainTemplate(elem, 1)
-class SlaveTemplateBuilder(xmlutil.TemplateBuilder):
+class SubordinateTemplateBuilder(xmlutil.TemplateBuilder):
def construct(self):
elem = xmlutil.TemplateElement('test')
- return xmlutil.SlaveTemplate(elem, 1)
+ return xmlutil.SubordinateTemplate(elem, 1)
class TemplateBuilderTest(test.NoDBTestCase):
- def test_master_template_builder(self):
+ def test_main_template_builder(self):
# Make sure the template hasn't been built yet
- self.assertIsNone(MasterTemplateBuilder._tmpl)
+ self.assertIsNone(MainTemplateBuilder._tmpl)
# Now, construct the template
- tmpl1 = MasterTemplateBuilder()
+ tmpl1 = MainTemplateBuilder()
# Make sure that there is a template cached...
- self.assertIsNotNone(MasterTemplateBuilder._tmpl)
+ self.assertIsNotNone(MainTemplateBuilder._tmpl)
# Make sure it wasn't what was returned...
- self.assertNotEqual(MasterTemplateBuilder._tmpl, tmpl1)
+ self.assertNotEqual(MainTemplateBuilder._tmpl, tmpl1)
# Make sure it doesn't get rebuilt
- cached = MasterTemplateBuilder._tmpl
- tmpl2 = MasterTemplateBuilder()
- self.assertEqual(MasterTemplateBuilder._tmpl, cached)
+ cached = MainTemplateBuilder._tmpl
+ tmpl2 = MainTemplateBuilder()
+ self.assertEqual(MainTemplateBuilder._tmpl, cached)
# Make sure we're always getting fresh copies
self.assertNotEqual(tmpl1, tmpl2)
# Make sure we can override the copying behavior
- tmpl3 = MasterTemplateBuilder(False)
- self.assertEqual(MasterTemplateBuilder._tmpl, tmpl3)
+ tmpl3 = MainTemplateBuilder(False)
+ self.assertEqual(MainTemplateBuilder._tmpl, tmpl3)
- def test_slave_template_builder(self):
+ def test_subordinate_template_builder(self):
# Make sure the template hasn't been built yet
- self.assertIsNone(SlaveTemplateBuilder._tmpl)
+ self.assertIsNone(SubordinateTemplateBuilder._tmpl)
# Now, construct the template
- tmpl1 = SlaveTemplateBuilder()
+ tmpl1 = SubordinateTemplateBuilder()
# Make sure there is a template cached...
- self.assertIsNotNone(SlaveTemplateBuilder._tmpl)
+ self.assertIsNotNone(SubordinateTemplateBuilder._tmpl)
# Make sure it was what was returned...
- self.assertEqual(SlaveTemplateBuilder._tmpl, tmpl1)
+ self.assertEqual(SubordinateTemplateBuilder._tmpl, tmpl1)
# Make sure it doesn't get rebuilt
- tmpl2 = SlaveTemplateBuilder()
- self.assertEqual(SlaveTemplateBuilder._tmpl, tmpl1)
+ tmpl2 = SubordinateTemplateBuilder()
+ self.assertEqual(SubordinateTemplateBuilder._tmpl, tmpl1)
# Make sure we're always getting the cached copy
self.assertEqual(tmpl1, tmpl2)
@@ -828,7 +828,7 @@ def test_make_flat_dict(self):
expected_xml = ("\n"
'foobar')
root = xmlutil.make_flat_dict('wrapper')
- tmpl = xmlutil.MasterTemplate(root, 1)
+ tmpl = xmlutil.MainTemplate(root, 1)
result = tmpl.serialize(dict(wrapper=dict(a='foo', b='bar')))
self.assertEqual(result, expected_xml)
@@ -836,7 +836,7 @@ def test_make_flat_dict(self):
'foobar'
"")
root = xmlutil.make_flat_dict('wrapper', ns='ns')
- tmpl = xmlutil.MasterTemplate(root, 1)
+ tmpl = xmlutil.MainTemplate(root, 1)
result = tmpl.serialize(dict(wrapper=dict(a='foo', b='bar')))
self.assertEqual(result, expected_xml)
@@ -846,10 +846,10 @@ def test_make_flat_dict_with_colon_tagname_support(self):
expected_xml = (("\n"
'999'
''))
- # Set up our master template
+ # Set up our main template
root = xmlutil.make_flat_dict('extra_specs', colon_ns=True)
- master = xmlutil.MasterTemplate(root, 1)
- result = master.serialize(obj)
+ main = xmlutil.MainTemplate(root, 1)
+ result = main.serialize(obj)
self.assertEqual(expected_xml, result)
def test_make_flat_dict_with_parent(self):
@@ -866,8 +866,8 @@ def test_make_flat_dict_with_parent(self):
root.set('id')
extra = xmlutil.make_flat_dict('extra_info', root=root)
root.append(extra)
- master = xmlutil.MasterTemplate(root, 1)
- result = master.serialize(obj)
+ main = xmlutil.MainTemplate(root, 1)
+ result = main.serialize(obj)
self.assertEqual(expected_xml, result)
def test_make_flat_dict_with_dicts(self):
@@ -884,8 +884,8 @@ def test_make_flat_dict_with_dicts(self):
ignore_sub_dicts=True)
extra = xmlutil.make_flat_dict('extra_info', selector='extra_info')
root.append(extra)
- master = xmlutil.MasterTemplate(root, 1)
- result = master.serialize(obj)
+ main = xmlutil.MainTemplate(root, 1)
+ result = main.serialize(obj)
self.assertEqual(expected_xml, result)
def test_safe_parse_xml(self):
diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py
index 950b8c2c4..ae18181c2 100644
--- a/nova/tests/compute/test_compute.py
+++ b/nova/tests/compute/test_compute.py
@@ -5418,7 +5418,7 @@ def test_get_instance_nw_info(self):
db.instance_get_by_uuid(self.context, fake_inst['uuid'],
columns_to_join=['info_cache',
'security_groups'],
- use_slave=False
+ use_subordinate=False
).AndReturn(fake_inst)
self.compute.network_api.get_instance_nw_info(self.context,
mox.IsA(instance_obj.Instance)).AndReturn(fake_nw_info)
@@ -5449,13 +5449,13 @@ def test_heal_instance_info_cache(self):
'get_nw_info': 0, 'expected_instance': None}
def fake_instance_get_all_by_host(context, host,
- columns_to_join, use_slave=False):
+ columns_to_join, use_subordinate=False):
call_info['get_all_by_host'] += 1
self.assertEqual([], columns_to_join)
return instances[:]
def fake_instance_get_by_uuid(context, instance_uuid,
- columns_to_join, use_slave=False):
+ columns_to_join, use_subordinate=False):
if instance_uuid not in instance_map:
raise exception.InstanceNotFound(instance_id=instance_uuid)
call_info['get_by_uuid'] += 1
@@ -5463,7 +5463,7 @@ def fake_instance_get_by_uuid(context, instance_uuid,
return instance_map[instance_uuid]
# NOTE(comstud): Override the stub in setUp()
- def fake_get_instance_nw_info(context, instance, use_slave=False):
+ def fake_get_instance_nw_info(context, instance, use_subordinate=False):
# Note that this exception gets caught in compute/manager
# and is ignored. However, the below increment of
# 'get_nw_info' won't happen, and you'll get an assert
@@ -5527,7 +5527,7 @@ def test_poll_rescued_instances(self):
unrescued_instances = {'fake_uuid1': False, 'fake_uuid2': False}
def fake_instance_get_all_by_filters(context, filters,
- columns_to_join, use_slave=False):
+ columns_to_join, use_subordinate=False):
self.assertEqual(columns_to_join, [])
return instances
@@ -5586,7 +5586,7 @@ def test_poll_unconfirmed_resizes(self):
migrations.append(fake_mig)
def fake_instance_get_by_uuid(context, instance_uuid,
- columns_to_join=None, use_slave=False):
+ columns_to_join=None, use_subordinate=False):
self.assertIn('metadata', columns_to_join)
self.assertIn('system_metadata', columns_to_join)
# raise InstanceNotFound exception for uuid 'noexist'
@@ -5597,7 +5597,7 @@ def fake_instance_get_by_uuid(context, instance_uuid,
return instance
def fake_migration_get_unconfirmed_by_dest_compute(context,
- resize_confirm_window, dest_compute, use_slave=False):
+ resize_confirm_window, dest_compute, use_subordinate=False):
self.assertEqual(dest_compute, CONF.host)
return migrations
@@ -5719,7 +5719,7 @@ def test_instance_build_timeout_mixed_instances(self):
sort_dir,
marker=None,
columns_to_join=[],
- use_slave=True,
+ use_subordinate=True,
limit=None)
self.assertThat(conductor_instance_update.mock_calls,
testtools_matchers.HasLength(len(old_instances)))
@@ -6073,7 +6073,7 @@ def test_reclaim_queued_deletes_continue_on_error(self):
instance_obj.InstanceList.get_by_filters(
ctxt, mox.IgnoreArg(),
expected_attrs=instance_obj.INSTANCE_DEFAULT_FIELDS,
- use_slave=True
+ use_subordinate=True
).AndReturn(instances)
# The first instance delete fails.
@@ -6113,12 +6113,12 @@ def test_sync_power_states(self):
{'state': power_state.RUNNING})
self.compute._sync_instance_power_state(ctxt, mox.IgnoreArg(),
power_state.RUNNING,
- use_slave=True)
+ use_subordinate=True)
self.compute.driver.get_info(mox.IgnoreArg()).AndReturn(
{'state': power_state.SHUTDOWN})
self.compute._sync_instance_power_state(ctxt, mox.IgnoreArg(),
power_state.SHUTDOWN,
- use_slave=True)
+ use_subordinate=True)
self.mox.ReplayAll()
self.compute._sync_power_states(ctxt)
@@ -7047,7 +7047,7 @@ def test_get(self):
instance_obj.INSTANCE_DEFAULT_FIELDS + ['fault']))
def fake_db_get(_context, _instance_uuid,
- columns_to_join=None, use_slave=False):
+ columns_to_join=None, use_subordinate=False):
return exp_instance
self.stubs.Set(db, 'instance_get_by_uuid', fake_db_get)
@@ -7068,7 +7068,7 @@ def test_get_with_admin_context(self):
instance_obj.INSTANCE_DEFAULT_FIELDS + ['fault']))
def fake_db_get(context, instance_uuid,
- columns_to_join=None, use_slave=False):
+ columns_to_join=None, use_subordinate=False):
return exp_instance
self.stubs.Set(db, 'instance_get_by_uuid', fake_db_get)
@@ -9183,7 +9183,7 @@ def fake_driver_add_to_aggregate(context, aggregate, host, **_ignore):
fake_driver_add_to_aggregate)
self.compute.add_aggregate_host(self.context, host="host",
- aggregate=jsonutils.to_primitive(self.aggr), slave_info=None)
+ aggregate=jsonutils.to_primitive(self.aggr), subordinate_info=None)
self.assertTrue(fake_driver_add_to_aggregate.called)
def test_remove_aggregate_host(self):
@@ -9197,36 +9197,36 @@ def fake_driver_remove_from_aggregate(context, aggregate, host,
self.compute.remove_aggregate_host(self.context,
aggregate=jsonutils.to_primitive(self.aggr), host="host",
- slave_info=None)
+ subordinate_info=None)
self.assertTrue(fake_driver_remove_from_aggregate.called)
- def test_add_aggregate_host_passes_slave_info_to_driver(self):
+ def test_add_aggregate_host_passes_subordinate_info_to_driver(self):
def driver_add_to_aggregate(context, aggregate, host, **kwargs):
self.assertEqual(self.context, context)
self.assertEqual(aggregate['id'], self.aggr['id'])
self.assertEqual(host, "the_host")
- self.assertEqual("SLAVE_INFO", kwargs.get("slave_info"))
+ self.assertEqual("SLAVE_INFO", kwargs.get("subordinate_info"))
self.stubs.Set(self.compute.driver, "add_to_aggregate",
driver_add_to_aggregate)
self.compute.add_aggregate_host(self.context, host="the_host",
- slave_info="SLAVE_INFO",
+ subordinate_info="SLAVE_INFO",
aggregate=jsonutils.to_primitive(self.aggr))
- def test_remove_from_aggregate_passes_slave_info_to_driver(self):
+ def test_remove_from_aggregate_passes_subordinate_info_to_driver(self):
def driver_remove_from_aggregate(context, aggregate, host, **kwargs):
self.assertEqual(self.context, context)
self.assertEqual(aggregate['id'], self.aggr['id'])
self.assertEqual(host, "the_host")
- self.assertEqual("SLAVE_INFO", kwargs.get("slave_info"))
+ self.assertEqual("SLAVE_INFO", kwargs.get("subordinate_info"))
self.stubs.Set(self.compute.driver, "remove_from_aggregate",
driver_remove_from_aggregate)
self.compute.remove_aggregate_host(self.context,
aggregate=jsonutils.to_primitive(self.aggr), host="the_host",
- slave_info="SLAVE_INFO")
+ subordinate_info="SLAVE_INFO")
class ComputePolicyTestCase(BaseTestCase):
diff --git a/nova/tests/compute/test_compute_mgr.py b/nova/tests/compute/test_compute_mgr.py
index 639047760..d02c81cba 100644
--- a/nova/tests/compute/test_compute_mgr.py
+++ b/nova/tests/compute/test_compute_mgr.py
@@ -161,7 +161,7 @@ def _do_mock_calls(defer_iptables_apply):
context.get_admin_context().AndReturn(fake_context)
db.instance_get_all_by_host(
fake_context, our_host, columns_to_join=['info_cache'],
- use_slave=False
+ use_subordinate=False
).AndReturn(startup_instances)
if defer_iptables_apply:
self.compute.driver.filter_defer_apply_on()
@@ -228,7 +228,7 @@ def test_init_host_with_deleted_migration(self):
context.get_admin_context().AndReturn(fake_context)
db.instance_get_all_by_host(fake_context, our_host,
columns_to_join=['info_cache'],
- use_slave=False
+ use_subordinate=False
).AndReturn([])
self.compute.init_virt_events()
@@ -459,7 +459,7 @@ def test_get_instances_on_driver(self):
inst in driver_instances]},
'created_at', 'desc', columns_to_join=None,
limit=None, marker=None,
- use_slave=False).AndReturn(
+ use_subordinate=False).AndReturn(
driver_instances)
self.mox.ReplayAll()
@@ -500,7 +500,7 @@ def test_get_instances_on_driver_fallback(self):
fake_context, filters,
'created_at', 'desc', columns_to_join=None,
limit=None, marker=None,
- use_slave=False).AndReturn(all_instances)
+ use_subordinate=False).AndReturn(all_instances)
self.mox.ReplayAll()
@@ -542,7 +542,7 @@ def _get_sync_instance(self, power_state, vm_state, task_state=None):
def test_sync_instance_power_state_match(self):
instance = self._get_sync_instance(power_state.RUNNING,
vm_states.ACTIVE)
- instance.refresh(use_slave=False)
+ instance.refresh(use_subordinate=False)
self.mox.ReplayAll()
self.compute._sync_instance_power_state(self.context, instance,
power_state.RUNNING)
@@ -550,7 +550,7 @@ def test_sync_instance_power_state_match(self):
def test_sync_instance_power_state_running_stopped(self):
instance = self._get_sync_instance(power_state.RUNNING,
vm_states.ACTIVE)
- instance.refresh(use_slave=False)
+ instance.refresh(use_subordinate=False)
instance.save()
self.mox.ReplayAll()
self.compute._sync_instance_power_state(self.context, instance,
@@ -560,7 +560,7 @@ def test_sync_instance_power_state_running_stopped(self):
def _test_sync_to_stop(self, power_state, vm_state, driver_power_state,
stop=True, force=False):
instance = self._get_sync_instance(power_state, vm_state)
- instance.refresh(use_slave=False)
+ instance.refresh(use_subordinate=False)
instance.save()
self.mox.StubOutWithMock(self.compute.compute_api, 'stop')
self.mox.StubOutWithMock(self.compute.compute_api, 'force_stop')
diff --git a/nova/tests/compute/test_compute_xen.py b/nova/tests/compute/test_compute_xen.py
index 52cff5042..34ca9964a 100644
--- a/nova/tests/compute/test_compute_xen.py
+++ b/nova/tests/compute/test_compute_xen.py
@@ -51,7 +51,7 @@ def test_sync_power_states_instance_not_found(self):
self.mox.StubOutWithMock(self.compute, '_sync_instance_power_state')
instance_obj.InstanceList.get_by_host(ctxt,
- self.compute.host, use_slave=True).AndReturn(instance_list)
+ self.compute.host, use_subordinate=True).AndReturn(instance_list)
self.compute.driver.get_num_instances().AndReturn(1)
vm_utils.lookup(self.compute.driver._session, instance['name'],
False).AndReturn(None)
diff --git a/nova/tests/compute/test_rpcapi.py b/nova/tests/compute/test_rpcapi.py
index e48810eaa..85e673aa3 100644
--- a/nova/tests/compute/test_rpcapi.py
+++ b/nova/tests/compute/test_rpcapi.py
@@ -94,13 +94,13 @@ def _test_compute_api(self, method, rpc_method, **kwargs):
def test_add_aggregate_host(self):
self._test_compute_api('add_aggregate_host', 'cast',
aggregate={'id': 'fake_id'}, host_param='host', host='host',
- slave_info={})
+ subordinate_info={})
# NOTE(russellb) Havana compat
self.flags(compute='havana', group='upgrade_levels')
self._test_compute_api('add_aggregate_host', 'cast',
aggregate={'id': 'fake_id'}, host_param='host', host='host',
- slave_info={}, version='2.14')
+ subordinate_info={}, version='2.14')
def test_add_fixed_ip_to_instance(self):
self._test_compute_api('add_fixed_ip_to_instance', 'cast',
@@ -539,13 +539,13 @@ def test_refresh_security_group_members(self):
def test_remove_aggregate_host(self):
self._test_compute_api('remove_aggregate_host', 'cast',
aggregate={'id': 'fake_id'}, host_param='host', host='host',
- slave_info={})
+ subordinate_info={})
# NOTE(russellb) Havana compat
self.flags(compute='havana', group='upgrade_levels')
self._test_compute_api('remove_aggregate_host', 'cast',
aggregate={'id': 'fake_id'}, host_param='host', host='host',
- slave_info={}, version='2.15')
+ subordinate_info={}, version='2.15')
def test_remove_fixed_ip_from_instance(self):
self._test_compute_api('remove_fixed_ip_from_instance', 'cast',
diff --git a/nova/tests/db/test_db_api.py b/nova/tests/db/test_db_api.py
index 1ce287f7b..cfb274cdb 100644
--- a/nova/tests/db/test_db_api.py
+++ b/nova/tests/db/test_db_api.py
@@ -1209,8 +1209,8 @@ def test_security_group_get_no_instances(self):
session = get_session()
self.mox.StubOutWithMock(sqlalchemy_api, 'get_session')
- sqlalchemy_api.get_session(slave_session=False).AndReturn(session)
- sqlalchemy_api.get_session(slave_session=False).AndReturn(session)
+ sqlalchemy_api.get_session(subordinate_session=False).AndReturn(session)
+ sqlalchemy_api.get_session(subordinate_session=False).AndReturn(session)
self.mox.ReplayAll()
security_group = db.security_group_get(self.ctxt, sid,
diff --git a/nova/tests/network/test_linux_net.py b/nova/tests/network/test_linux_net.py
index 41be0bf41..597d1890b 100644
--- a/nova/tests/network/test_linux_net.py
+++ b/nova/tests/network/test_linux_net.py
@@ -257,7 +257,7 @@ def setUp(self):
self.context = context.RequestContext('testuser', 'testproject',
is_admin=True)
- def get_vifs(_context, instance_uuid, use_slave):
+ def get_vifs(_context, instance_uuid, use_subordinate):
return [vif for vif in vifs if vif['instance_uuid'] ==
instance_uuid]
diff --git a/nova/tests/network/test_manager.py b/nova/tests/network/test_manager.py
index 482adf172..44d0d29b1 100644
--- a/nova/tests/network/test_manager.py
+++ b/nova/tests/network/test_manager.py
@@ -387,7 +387,7 @@ def test_add_fixed_ip_instance_using_id_without_vpn(self):
fixed_ips=mox.IgnoreArg()).AndReturn(None)
db.instance_get_by_uuid(self.context,
- mox.IgnoreArg(), use_slave=False,
+ mox.IgnoreArg(), use_subordinate=False,
columns_to_join=['info_cache',
'security_groups']
).AndReturn(fake_inst(display_name=HOST,
@@ -438,7 +438,7 @@ def test_add_fixed_ip_instance_using_uuid_without_vpn(self):
fixed_ips=mox.IgnoreArg()).AndReturn(None)
db.instance_get_by_uuid(self.context,
- mox.IgnoreArg(), use_slave=False,
+ mox.IgnoreArg(), use_subordinate=False,
columns_to_join=['info_cache',
'security_groups']
).AndReturn(fake_inst(display_name=HOST,
@@ -533,7 +533,7 @@ def test_instance_dns(self):
fixed_ips=mox.IgnoreArg()).AndReturn(None)
db.instance_get_by_uuid(self.context,
- mox.IgnoreArg(), use_slave=False,
+ mox.IgnoreArg(), use_subordinate=False,
columns_to_join=['info_cache',
'security_groups']
).AndReturn(fake_inst(display_name=HOST,
@@ -630,7 +630,7 @@ def test_vpn_allocate_fixed_ip(self):
db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(vifs[0])
db.instance_get_by_uuid(mox.IgnoreArg(),
- mox.IgnoreArg(), use_slave=False,
+ mox.IgnoreArg(), use_subordinate=False,
columns_to_join=['info_cache',
'security_groups']
).AndReturn(fake_inst(display_name=HOST,
@@ -678,7 +678,7 @@ def test_allocate_fixed_ip(self):
db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(vifs[0])
db.instance_get_by_uuid(mox.IgnoreArg(),
- mox.IgnoreArg(), use_slave=False,
+ mox.IgnoreArg(), use_subordinate=False,
columns_to_join=['info_cache',
'security_groups']
).AndReturn(fake_inst(display_name=HOST,
@@ -1220,7 +1220,7 @@ def test_add_fixed_ip_instance_without_vpn_requested_networks(self):
).AndReturn(dict(test_network.fake_network,
**networks[0]))
db.instance_get_by_uuid(mox.IgnoreArg(),
- mox.IgnoreArg(), use_slave=False,
+ mox.IgnoreArg(), use_subordinate=False,
columns_to_join=['info_cache',
'security_groups']
).AndReturn(fake_inst(display_name=HOST,
@@ -1496,7 +1496,7 @@ def test_get_instance_nw_info_client_exceptions(self):
'virtual_interface_get_by_instance')
manager.db.virtual_interface_get_by_instance(
self.context, FAKEUUID,
- use_slave=False).AndRaise(exception.InstanceNotFound(
+ use_subordinate=False).AndRaise(exception.InstanceNotFound(
instance_id=FAKEUUID))
self.mox.ReplayAll()
self.assertRaises(messaging.ExpectedException,
diff --git a/nova/tests/objects/test_instance.py b/nova/tests/objects/test_instance.py
index 3c914c883..afad2d05c 100644
--- a/nova/tests/objects/test_instance.py
+++ b/nova/tests/objects/test_instance.py
@@ -104,7 +104,7 @@ def test_get_without_expected(self):
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
db.instance_get_by_uuid(self.context, 'uuid',
columns_to_join=[],
- use_slave=False
+ use_subordinate=False
).AndReturn(self.fake_instance)
self.mox.ReplayAll()
inst = instance.Instance.get_by_uuid(self.context, 'uuid',
@@ -123,7 +123,7 @@ def test_get_with_expected(self):
db.instance_get_by_uuid(
self.context, 'uuid',
columns_to_join=exp_cols,
- use_slave=False
+ use_subordinate=False
).AndReturn(self.fake_instance)
fake_faults = test_instance_fault.fake_faults
db.instance_fault_get_by_instance_uuids(
@@ -155,13 +155,13 @@ def test_load(self):
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['info_cache',
'security_groups'],
- use_slave=False
+ use_subordinate=False
).AndReturn(self.fake_instance)
fake_inst2 = dict(self.fake_instance,
system_metadata=[{'key': 'foo', 'value': 'bar'}])
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['system_metadata'],
- use_slave=False
+ use_subordinate=False
).AndReturn(fake_inst2)
self.mox.ReplayAll()
inst = instance.Instance.get_by_uuid(self.context, fake_uuid)
@@ -186,7 +186,7 @@ def test_get_remote(self):
db.instance_get_by_uuid(self.context, 'fake-uuid',
columns_to_join=['info_cache',
'security_groups'],
- use_slave=False
+ use_subordinate=False
).AndReturn(fake_instance)
self.mox.ReplayAll()
inst = instance.Instance.get_by_uuid(self.context, 'fake-uuid')
@@ -204,13 +204,13 @@ def test_refresh(self):
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['info_cache',
'security_groups'],
- use_slave=False
+ use_subordinate=False
).AndReturn(dict(self.fake_instance,
host='orig-host'))
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['info_cache',
'security_groups'],
- use_slave=False
+ use_subordinate=False
).AndReturn(dict(self.fake_instance,
host='new-host'))
self.mox.StubOutWithMock(instance_info_cache.InstanceInfoCache,
@@ -232,7 +232,7 @@ def test_refresh_does_not_recurse(self):
self.mox.StubOutWithMock(instance.Instance, 'get_by_uuid')
instance.Instance.get_by_uuid(self.context, uuid=inst.uuid,
expected_attrs=['metadata'],
- use_slave=False
+ use_subordinate=False
).AndReturn(inst_copy)
self.mox.ReplayAll()
self.assertRaises(exception.OrphanedObjectError, inst.refresh)
@@ -279,7 +279,7 @@ def _save_test_helper(self, cell_type, save_kwargs):
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['info_cache',
'security_groups'],
- use_slave=False
+ use_subordinate=False
).AndReturn(old_ref)
db.instance_update_and_get_original(
self.context, fake_uuid, expected_updates,
@@ -359,7 +359,7 @@ def test_save_rename_sends_notification(self):
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['info_cache',
'security_groups'],
- use_slave=False
+ use_subordinate=False
).AndReturn(old_ref)
db.instance_update_and_get_original(
self.context, fake_uuid, expected_updates, update_cells=False,
@@ -372,7 +372,7 @@ def test_save_rename_sends_notification(self):
self.mox.ReplayAll()
inst = instance.Instance.get_by_uuid(self.context, old_ref['uuid'],
- use_slave=False)
+ use_subordinate=False)
self.assertEqual('hello', inst.display_name)
inst.display_name = 'goodbye'
inst.save()
@@ -386,7 +386,7 @@ def test_get_deleted(self):
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['info_cache',
'security_groups'],
- use_slave=False
+ use_subordinate=False
).AndReturn(fake_inst)
self.mox.ReplayAll()
inst = instance.Instance.get_by_uuid(self.context, fake_uuid)
@@ -400,7 +400,7 @@ def test_get_not_cleaned(self):
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['info_cache',
'security_groups'],
- use_slave=False
+ use_subordinate=False
).AndReturn(fake_inst)
self.mox.ReplayAll()
inst = instance.Instance.get_by_uuid(self.context, fake_uuid)
@@ -414,7 +414,7 @@ def test_get_cleaned(self):
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['info_cache',
'security_groups'],
- use_slave=False
+ use_subordinate=False
).AndReturn(fake_inst)
self.mox.ReplayAll()
inst = instance.Instance.get_by_uuid(self.context, fake_uuid)
@@ -438,7 +438,7 @@ def test_with_info_cache(self):
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['info_cache',
'security_groups'],
- use_slave=False
+ use_subordinate=False
).AndReturn(fake_inst)
db.instance_info_cache_update(self.context, fake_uuid,
{'network_info': nwinfo2_json})
@@ -455,7 +455,7 @@ def test_with_info_cache_none(self):
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['info_cache'],
- use_slave=False
+ use_subordinate=False
).AndReturn(fake_inst)
self.mox.ReplayAll()
inst = instance.Instance.get_by_uuid(self.context, fake_uuid,
@@ -481,7 +481,7 @@ def test_with_security_groups(self):
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['info_cache',
'security_groups'],
- use_slave=False
+ use_subordinate=False
).AndReturn(fake_inst)
db.security_group_update(self.context, 1, {'description': 'changed'}
).AndReturn(fake_inst['security_groups'][0])
@@ -506,7 +506,7 @@ def test_with_empty_security_groups(self):
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['info_cache',
'security_groups'],
- use_slave=False
+ use_subordinate=False
).AndReturn(fake_inst)
self.mox.ReplayAll()
inst = instance.Instance.get_by_uuid(self.context, fake_uuid)
@@ -518,7 +518,7 @@ def test_with_empty_pci_devices(self):
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['pci_devices'],
- use_slave=False
+ use_subordinate=False
).AndReturn(fake_inst)
self.mox.ReplayAll()
inst = instance.Instance.get_by_uuid(self.context, fake_uuid,
@@ -564,7 +564,7 @@ def test_with_pci_devices(self):
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['pci_devices'],
- use_slave=False
+ use_subordinate=False
).AndReturn(fake_inst)
self.mox.ReplayAll()
inst = instance.Instance.get_by_uuid(self.context, fake_uuid,
@@ -582,7 +582,7 @@ def test_with_fault(self):
self.mox.StubOutWithMock(db, 'instance_fault_get_by_instance_uuids')
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=[],
- use_slave=False
+ use_subordinate=False
).AndReturn(self.fake_instance)
db.instance_fault_get_by_instance_uuids(
self.context, [fake_uuid]).AndReturn({fake_uuid: fake_faults})
@@ -854,11 +854,11 @@ def test_get_all_by_filters(self):
db.instance_get_all_by_filters(self.context, {'foo': 'bar'}, 'uuid',
'asc', limit=None, marker=None,
columns_to_join=['metadata'],
- use_slave=False).AndReturn(fakes)
+ use_subordinate=False).AndReturn(fakes)
self.mox.ReplayAll()
inst_list = instance.InstanceList.get_by_filters(
self.context, {'foo': 'bar'}, 'uuid', 'asc',
- expected_attrs=['metadata'], use_slave=False)
+ expected_attrs=['metadata'], use_subordinate=False)
for i in range(0, len(fakes)):
self.assertIsInstance(inst_list.objects[i], instance.Instance)
@@ -875,12 +875,12 @@ def test_get_all_by_filters_works_for_cleaned(self):
{'deleted': True, 'cleaned': False},
'uuid', 'asc', limit=None, marker=None,
columns_to_join=['metadata'],
- use_slave=False).AndReturn(
+ use_subordinate=False).AndReturn(
[fakes[1]])
self.mox.ReplayAll()
inst_list = instance.InstanceList.get_by_filters(
self.context, {'deleted': True, 'cleaned': False}, 'uuid', 'asc',
- expected_attrs=['metadata'], use_slave=False)
+ expected_attrs=['metadata'], use_subordinate=False)
self.assertEqual(1, len(inst_list))
self.assertIsInstance(inst_list.objects[0], instance.Instance)
@@ -893,7 +893,7 @@ def test_get_by_host(self):
self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
db.instance_get_all_by_host(self.context, 'foo',
columns_to_join=None,
- use_slave=False).AndReturn(fakes)
+ use_subordinate=False).AndReturn(fakes)
self.mox.ReplayAll()
inst_list = instance.InstanceList.get_by_host(self.context, 'foo')
for i in range(0, len(fakes)):
@@ -981,7 +981,7 @@ def test_with_fault(self):
self.mox.StubOutWithMock(db, 'instance_fault_get_by_instance_uuids')
db.instance_get_all_by_host(self.context, 'host',
columns_to_join=[],
- use_slave=False
+ use_subordinate=False
).AndReturn(fake_insts)
db.instance_fault_get_by_instance_uuids(
self.context, [x['uuid'] for x in fake_insts]
@@ -989,7 +989,7 @@ def test_with_fault(self):
self.mox.ReplayAll()
instances = instance.InstanceList.get_by_host(self.context, 'host',
expected_attrs=['fault'],
- use_slave=False)
+ use_subordinate=False)
self.assertEqual(2, len(instances))
self.assertEqual(fake_faults['fake-uuid'][0],
dict(instances[0].fault.iteritems()))
diff --git a/nova/tests/objects/test_migration.py b/nova/tests/objects/test_migration.py
index 08f0f1af8..0b59c7d26 100644
--- a/nova/tests/objects/test_migration.py
+++ b/nova/tests/objects/test_migration.py
@@ -102,7 +102,7 @@ def test_instance(self):
db.instance_get_by_uuid(ctxt, fake_migration['instance_uuid'],
columns_to_join=['info_cache',
'security_groups'],
- use_slave=False
+ use_subordinate=False
).AndReturn(fake_inst)
mig = migration.Migration._from_db_object(ctxt,
migration.Migration(),
@@ -119,11 +119,11 @@ def test_get_unconfirmed_by_dest_compute(self):
db, 'migration_get_unconfirmed_by_dest_compute')
db.migration_get_unconfirmed_by_dest_compute(
ctxt, 'window', 'foo',
- use_slave=False).AndReturn(db_migrations)
+ use_subordinate=False).AndReturn(db_migrations)
self.mox.ReplayAll()
migrations = (
migration.MigrationList.get_unconfirmed_by_dest_compute(
- ctxt, 'window', 'foo', use_slave=False))
+ ctxt, 'window', 'foo', use_subordinate=False))
self.assertEqual(2, len(migrations))
for index, db_migration in enumerate(db_migrations):
self.compare_obj(migrations[index], db_migration)
diff --git a/nova/tests/virt/libvirt/test_libvirt.py b/nova/tests/virt/libvirt/test_libvirt.py
index 574242fc7..8b2d43bf9 100644
--- a/nova/tests/virt/libvirt/test_libvirt.py
+++ b/nova/tests/virt/libvirt/test_libvirt.py
@@ -4439,7 +4439,7 @@ def _test_destroy_removes_disk(self, volume_fail=False):
db.instance_get_by_uuid(mox.IgnoreArg(), mox.IgnoreArg(),
columns_to_join=['info_cache',
'security_groups'],
- use_slave=False
+ use_subordinate=False
).AndReturn(instance)
self.mox.StubOutWithMock(driver, "block_device_info_get_mapping")
driver.block_device_info_get_mapping(vol
@@ -4546,7 +4546,7 @@ def test_delete_instance_files(self):
db.instance_get_by_uuid(mox.IgnoreArg(), mox.IgnoreArg(),
columns_to_join=['info_cache',
'security_groups'],
- use_slave=False
+ use_subordinate=False
).AndReturn(instance)
os.path.exists(mox.IgnoreArg()).AndReturn(False)
os.path.exists(mox.IgnoreArg()).AndReturn(True)
diff --git a/nova/tests/virt/xenapi/test_xenapi.py b/nova/tests/virt/xenapi/test_xenapi.py
index 1104c467f..f1cd82e89 100644
--- a/nova/tests/virt/xenapi/test_xenapi.py
+++ b/nova/tests/virt/xenapi/test_xenapi.py
@@ -1428,7 +1428,7 @@ def test_uuid_find(self):
fake_inst2 = fake_instance.fake_db_instance(id=456)
db.instance_get_all_by_host(self.context, fake_inst['host'],
columns_to_join=None,
- use_slave=False
+ use_subordinate=False
).AndReturn([fake_inst, fake_inst2])
self.mox.ReplayAll()
expected_name = CONF.instance_name_template % fake_inst['id']
@@ -1445,7 +1445,7 @@ def fake_aggregate_get_by_host(self, *args, **kwargs):
self.stubs.Set(db, "aggregate_get_by_host",
fake_aggregate_get_by_host)
- self.stubs.Set(self.conn._session, "is_slave", True)
+ self.stubs.Set(self.conn._session, "is_subordinate", True)
self.assertRaises(test.TestingException,
self.conn._session._get_host_uuid)
@@ -2844,7 +2844,7 @@ def setUp(self):
pool_states.POOL_FLAG: 'XenAPI'}}
self.aggr = db.aggregate_create(self.context, values)
self.fake_metadata = {pool_states.POOL_FLAG: 'XenAPI',
- 'master_compute': 'host',
+ 'main_compute': 'host',
'availability_zone': 'fake_zone',
pool_states.KEY: pool_states.ACTIVE,
'host': xenapi_fake.get_record('host',
@@ -2854,18 +2854,18 @@ def test_pool_add_to_aggregate_called_by_driver(self):
calls = []
- def pool_add_to_aggregate(context, aggregate, host, slave_info=None):
+ def pool_add_to_aggregate(context, aggregate, host, subordinate_info=None):
self.assertEqual("CONTEXT", context)
self.assertEqual("AGGREGATE", aggregate)
self.assertEqual("HOST", host)
- self.assertEqual("SLAVEINFO", slave_info)
+ self.assertEqual("SLAVEINFO", subordinate_info)
calls.append(pool_add_to_aggregate)
self.stubs.Set(self.conn._pool,
"add_to_aggregate",
pool_add_to_aggregate)
self.conn.add_to_aggregate("CONTEXT", "AGGREGATE", "HOST",
- slave_info="SLAVEINFO")
+ subordinate_info="SLAVEINFO")
self.assertIn(pool_add_to_aggregate, calls)
@@ -2874,18 +2874,18 @@ def test_pool_remove_from_aggregate_called_by_driver(self):
calls = []
def pool_remove_from_aggregate(context, aggregate, host,
- slave_info=None):
+ subordinate_info=None):
self.assertEqual("CONTEXT", context)
self.assertEqual("AGGREGATE", aggregate)
self.assertEqual("HOST", host)
- self.assertEqual("SLAVEINFO", slave_info)
+ self.assertEqual("SLAVEINFO", subordinate_info)
calls.append(pool_remove_from_aggregate)
self.stubs.Set(self.conn._pool,
"remove_from_aggregate",
pool_remove_from_aggregate)
self.conn.remove_from_aggregate("CONTEXT", "AGGREGATE", "HOST",
- slave_info="SLAVEINFO")
+ subordinate_info="SLAVEINFO")
self.assertIn(pool_remove_from_aggregate, calls)
@@ -2901,11 +2901,11 @@ def fake_init_pool(id, name):
self.assertThat(self.fake_metadata,
matchers.DictMatches(result['metadetails']))
- def test_join_slave(self):
- # Ensure join_slave gets called when the request gets to master.
- def fake_join_slave(id, compute_uuid, host, url, user, password):
- fake_join_slave.called = True
- self.stubs.Set(self.conn._pool, "_join_slave", fake_join_slave)
+ def test_join_subordinate(self):
+ # Ensure join_subordinate gets called when the request gets to main.
+ def fake_join_subordinate(id, compute_uuid, host, url, user, password):
+ fake_join_subordinate.called = True
+ self.stubs.Set(self.conn._pool, "_join_subordinate", fake_join_subordinate)
aggregate = self._aggregate_setup(hosts=['host', 'host2'],
metadata=self.fake_metadata)
@@ -2915,7 +2915,7 @@ def fake_join_slave(id, compute_uuid, host, url, user, password):
user='fake_user',
passwd='fake_pass',
xenhost_uuid='fake_uuid'))
- self.assertTrue(fake_join_slave.called)
+ self.assertTrue(fake_join_subordinate.called)
def test_add_to_aggregate_first_host(self):
def fake_pool_set_name_label(self, session, pool_ref, name):
@@ -2955,19 +2955,19 @@ def test_remove_from_empty_aggregate(self):
self.conn._pool.remove_from_aggregate,
self.context, result, "test_host")
- def test_remove_slave(self):
- # Ensure eject slave gets called.
- def fake_eject_slave(id, compute_uuid, host_uuid):
- fake_eject_slave.called = True
- self.stubs.Set(self.conn._pool, "_eject_slave", fake_eject_slave)
+ def test_remove_subordinate(self):
+ # Ensure eject subordinate gets called.
+ def fake_eject_subordinate(id, compute_uuid, host_uuid):
+ fake_eject_subordinate.called = True
+ self.stubs.Set(self.conn._pool, "_eject_subordinate", fake_eject_subordinate)
self.fake_metadata['host2'] = 'fake_host2_uuid'
aggregate = self._aggregate_setup(hosts=['host', 'host2'],
metadata=self.fake_metadata, aggr_state=pool_states.ACTIVE)
self.conn._pool.remove_from_aggregate(self.context, aggregate, "host2")
- self.assertTrue(fake_eject_slave.called)
+ self.assertTrue(fake_eject_subordinate.called)
- def test_remove_master_solo(self):
+ def test_remove_main_solo(self):
# Ensure metadata are cleared after removal.
def fake_clear_pool(id):
fake_clear_pool.called = True
@@ -2982,8 +2982,8 @@ def fake_clear_pool(id):
pool_states.KEY: pool_states.ACTIVE},
matchers.DictMatches(result['metadetails']))
- def test_remote_master_non_empty_pool(self):
- # Ensure AggregateError is raised if removing the master.
+ def test_remote_main_non_empty_pool(self):
+ # Ensure AggregateError is raised if removing the main.
aggregate = self._aggregate_setup(hosts=['host', 'host2'],
metadata=self.fake_metadata)
@@ -3091,7 +3091,7 @@ def fake_driver_add_to_aggregate(context, aggregate, host, **_ignore):
self.compute.add_aggregate_host,
self.context, host="fake_host",
aggregate=jsonutils.to_primitive(self.aggr),
- slave_info=None)
+ subordinate_info=None)
excepted = db.aggregate_get(self.context, self.aggr['id'])
self.assertEqual(excepted['metadetails'][pool_states.KEY],
pool_states.ERROR)
@@ -3103,16 +3103,16 @@ def __init__(self):
self._mock_calls = []
def add_aggregate_host(self, ctxt, aggregate,
- host_param, host, slave_info):
+ host_param, host, subordinate_info):
self._mock_calls.append((
self.add_aggregate_host, ctxt, aggregate,
- host_param, host, slave_info))
+ host_param, host, subordinate_info))
def remove_aggregate_host(self, ctxt, aggregate_id, host_param,
- host, slave_info):
+ host, subordinate_info):
self._mock_calls.append((
self.remove_aggregate_host, ctxt, aggregate_id,
- host_param, host, slave_info))
+ host_param, host, subordinate_info))
class StubDependencies(object):
@@ -3127,10 +3127,10 @@ def _is_hv_pool(self, *_ignore):
def _get_metadata(self, *_ignore):
return {
pool_states.KEY: {},
- 'master_compute': 'master'
+ 'main_compute': 'main'
}
- def _create_slave_info(self, *ignore):
+ def _create_subordinate_info(self, *ignore):
return "SLAVE_INFO"
@@ -3144,32 +3144,32 @@ class HypervisorPoolTestCase(test.NoDBTestCase):
'id': 98,
'hosts': [],
'metadata': {
- 'master_compute': 'master',
+ 'main_compute': 'main',
pool_states.POOL_FLAG: {},
pool_states.KEY: {}
}
}
- def test_slave_asks_master_to_add_slave_to_pool(self):
- slave = ResourcePoolWithStubs()
+ def test_subordinate_asks_main_to_add_subordinate_to_pool(self):
+ subordinate = ResourcePoolWithStubs()
- slave.add_to_aggregate("CONTEXT", self.fake_aggregate, "slave")
+ subordinate.add_to_aggregate("CONTEXT", self.fake_aggregate, "subordinate")
self.assertIn(
- (slave.compute_rpcapi.add_aggregate_host,
+ (subordinate.compute_rpcapi.add_aggregate_host,
"CONTEXT", jsonutils.to_primitive(self.fake_aggregate),
- "slave", "master", "SLAVE_INFO"),
- slave.compute_rpcapi._mock_calls)
+ "subordinate", "main", "SLAVE_INFO"),
+ subordinate.compute_rpcapi._mock_calls)
- def test_slave_asks_master_to_remove_slave_from_pool(self):
- slave = ResourcePoolWithStubs()
+ def test_subordinate_asks_main_to_remove_subordinate_from_pool(self):
+ subordinate = ResourcePoolWithStubs()
- slave.remove_from_aggregate("CONTEXT", self.fake_aggregate, "slave")
+ subordinate.remove_from_aggregate("CONTEXT", self.fake_aggregate, "subordinate")
self.assertIn(
- (slave.compute_rpcapi.remove_aggregate_host,
- "CONTEXT", 98, "slave", "master", "SLAVE_INFO"),
- slave.compute_rpcapi._mock_calls)
+ (subordinate.compute_rpcapi.remove_aggregate_host,
+ "CONTEXT", 98, "subordinate", "main", "SLAVE_INFO"),
+ subordinate.compute_rpcapi._mock_calls)
class SwapXapiHostTestCase(test.NoDBTestCase):
diff --git a/nova/virt/xenapi/client/session.py b/nova/virt/xenapi/client/session.py
index 97f2846e1..5d1771e3d 100644
--- a/nova/virt/xenapi/client/session.py
+++ b/nova/virt/xenapi/client/session.py
@@ -65,7 +65,7 @@ def __init__(self, url, user, pw):
import XenAPI
self.XenAPI = XenAPI
self._sessions = queue.Queue()
- self.is_slave = False
+ self.is_subordinate = False
exception = self.XenAPI.Failure(_("Unable to log in to XenAPI "
"(is the Dom0 disk full?)"))
url = self._create_first_session(url, user, pw, exception)
@@ -93,13 +93,13 @@ def _create_first_session(self, url, user, pw, exception):
with timeout.Timeout(CONF.xenserver.login_timeout, exception):
session.login_with_password(user, pw)
except self.XenAPI.Failure as e:
- # if user and pw of the master are different, we're doomed!
+ # if user and pw of the main are different, we're doomed!
if e.details[0] == 'HOST_IS_SLAVE':
- master = e.details[1]
- url = pool.swap_xapi_host(url, master)
+ main = e.details[1]
+ url = pool.swap_xapi_host(url, main)
session = self.XenAPI.Session(url)
session.login_with_password(user, pw)
- self.is_slave = True
+ self.is_subordinate = True
else:
raise
self._sessions.put(session)
@@ -113,7 +113,7 @@ def _populate_session_pool(self, url, user, pw, exception):
self._sessions.put(session)
def _get_host_uuid(self):
- if self.is_slave:
+ if self.is_subordinate:
aggr = aggregate_obj.AggregateList.get_by_host(
context.get_admin_context(),
CONF.host, key=pool_states.POOL_FLAG)[0]
diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py
index ece9b0845..b776a0b9d 100644
--- a/nova/virt/xenapi/fake.py
+++ b/nova/virt/xenapi/fake.py
@@ -113,7 +113,7 @@ def create_host(name_label, hostname='fake_name', address='fake_addr'):
# Create a pool if we don't have one already
if len(_db_content['pool']) == 0:
pool_ref = _create_pool('')
- _db_content['pool'][pool_ref]['master'] = host_ref
+ _db_content['pool'][pool_ref]['main'] = host_ref
_db_content['pool'][pool_ref]['default-SR'] = host_default_sr_ref
_db_content['pool'][pool_ref]['suspend-image-SR'] = host_default_sr_ref
@@ -805,7 +805,7 @@ def __getattr__(self, name):
return self._session
elif name == 'xenapi':
return _Dispatcher(self.xenapi_request, None)
- elif name.startswith('login') or name.startswith('slave_local'):
+ elif name.startswith('login') or name.startswith('subordinate_local'):
return lambda *params: self._login(name, params)
elif name.startswith('Async'):
return lambda *params: self._async(name, params)
diff --git a/nova/virt/xenapi/pool.py b/nova/virt/xenapi/pool.py
index 490675579..20a3baaaf 100644
--- a/nova/virt/xenapi/pool.py
+++ b/nova/virt/xenapi/pool.py
@@ -71,7 +71,7 @@ def undo_aggregate_operation(self, context, op, aggregate,
'during operation on %(host)s'),
{'aggregate_id': aggregate['id'], 'host': host})
- def add_to_aggregate(self, context, aggregate, host, slave_info=None):
+ def add_to_aggregate(self, context, aggregate, host, subordinate_info=None):
"""Add a compute host to an aggregate."""
if not pool_states.is_hv_pool(aggregate['metadata']):
return
@@ -89,38 +89,38 @@ def add_to_aggregate(self, context, aggregate, host, slave_info=None):
if (aggregate['metadata'][pool_states.KEY] == pool_states.CREATED):
aggregate.update_metadata({pool_states.KEY: pool_states.CHANGING})
if len(aggregate['hosts']) == 1:
- # this is the first host of the pool -> make it master
+ # this is the first host of the pool -> make it main
self._init_pool(aggregate['id'], aggregate['name'])
- # save metadata so that we can find the master again
- metadata = {'master_compute': host,
+ # save metadata so that we can find the main again
+ metadata = {'main_compute': host,
host: self._host_uuid,
pool_states.KEY: pool_states.ACTIVE}
aggregate.update_metadata(metadata)
else:
# the pool is already up and running, we need to figure out
# whether we can serve the request from this host or not.
- master_compute = aggregate['metadata']['master_compute']
- if master_compute == CONF.host and master_compute != host:
- # this is the master -> do a pool-join
- # To this aim, nova compute on the slave has to go down.
+ main_compute = aggregate['metadata']['main_compute']
+ if main_compute == CONF.host and main_compute != host:
+ # this is the main -> do a pool-join
+ # To this aim, nova compute on the subordinate has to go down.
# NOTE: it is assumed that ONLY nova compute is running now
- self._join_slave(aggregate['id'], host,
- slave_info.get('compute_uuid'),
- slave_info.get('url'), slave_info.get('user'),
- slave_info.get('passwd'))
- metadata = {host: slave_info.get('xenhost_uuid'), }
+ self._join_subordinate(aggregate['id'], host,
+ subordinate_info.get('compute_uuid'),
+ subordinate_info.get('url'), subordinate_info.get('user'),
+ subordinate_info.get('passwd'))
+ metadata = {host: subordinate_info.get('xenhost_uuid'), }
aggregate.update_metadata(metadata)
- elif master_compute and master_compute != host:
- # send rpc cast to master, asking to add the following
+ elif main_compute and main_compute != host:
+ # send rpc cast to main, asking to add the following
# host with specified credentials.
- slave_info = self._create_slave_info()
+ subordinate_info = self._create_subordinate_info()
self.compute_rpcapi.add_aggregate_host(
- context, aggregate, host, master_compute, slave_info)
+ context, aggregate, host, main_compute, subordinate_info)
- def remove_from_aggregate(self, context, aggregate, host, slave_info=None):
+ def remove_from_aggregate(self, context, aggregate, host, subordinate_info=None):
"""Remove a compute host from an aggregate."""
- slave_info = slave_info or dict()
+ subordinate_info = subordinate_info or dict()
if not pool_states.is_hv_pool(aggregate['metadata']):
return
@@ -133,19 +133,19 @@ def remove_from_aggregate(self, context, aggregate, host, slave_info=None):
aggregate_id=aggregate['id'],
reason=invalid[aggregate['metadata'][pool_states.KEY]])
- master_compute = aggregate['metadata']['master_compute']
- if master_compute == CONF.host and master_compute != host:
- # this is the master -> instruct it to eject a host from the pool
+ main_compute = aggregate['metadata']['main_compute']
+ if main_compute == CONF.host and main_compute != host:
+ # this is the main -> instruct it to eject a host from the pool
host_uuid = aggregate['metadata'][host]
- self._eject_slave(aggregate['id'],
- slave_info.get('compute_uuid'), host_uuid)
+ self._eject_subordinate(aggregate['id'],
+ subordinate_info.get('compute_uuid'), host_uuid)
aggregate.update_metadata({host: None})
- elif master_compute == host:
- # Remove master from its own pool -> destroy pool only if the
- # master is on its own, otherwise raise fault. Destroying a
- # pool made only by master is fictional
+ elif main_compute == host:
+ # Remove main from its own pool -> destroy pool only if the
+ # main is on its own, otherwise raise fault. Destroying a
+ # pool made only by main is fictional
if len(aggregate['hosts']) > 1:
- # NOTE: this could be avoided by doing a master
+ # NOTE: this could be avoided by doing a main
# re-election, but this is simpler for now.
raise exception.InvalidAggregateAction(
aggregate_id=aggregate['id'],
@@ -154,32 +154,32 @@ def remove_from_aggregate(self, context, aggregate, host, slave_info=None):
'from the pool; pool not empty')
% host)
self._clear_pool(aggregate['id'])
- aggregate.update_metadata({'master_compute': None, host: None})
- elif master_compute and master_compute != host:
- # A master exists -> forward pool-eject request to master
- slave_info = self._create_slave_info()
+ aggregate.update_metadata({'main_compute': None, host: None})
+ elif main_compute and main_compute != host:
+ # A main exists -> forward pool-eject request to main
+ subordinate_info = self._create_subordinate_info()
self.compute_rpcapi.remove_aggregate_host(
- context, aggregate['id'], host, master_compute, slave_info)
+ context, aggregate['id'], host, main_compute, subordinate_info)
else:
# this shouldn't have happened
raise exception.AggregateError(aggregate_id=aggregate['id'],
action='remove_from_aggregate',
reason=_('Unable to eject %s '
- 'from the pool; No master found')
+ 'from the pool; No main found')
% host)
- def _join_slave(self, aggregate_id, host, compute_uuid, url, user, passwd):
- """Joins a slave into a XenServer resource pool."""
+ def _join_subordinate(self, aggregate_id, host, compute_uuid, url, user, passwd):
+ """Joins a subordinate into a XenServer resource pool."""
try:
args = {'compute_uuid': compute_uuid,
'url': url,
'user': user,
'password': passwd,
'force': jsonutils.dumps(CONF.xenserver.use_join_force),
- 'master_addr': self._host_addr,
- 'master_user': CONF.xenserver.connection_username,
- 'master_pass': CONF.xenserver.connection_password, }
+ 'main_addr': self._host_addr,
+ 'main_user': CONF.xenserver.connection_username,
+ 'main_pass': CONF.xenserver.connection_password, }
self._session.call_plugin('xenhost', 'host_join', args)
except self._session.XenAPI.Failure as e:
LOG.error(_("Pool-Join failed: %s"), e)
@@ -188,8 +188,8 @@ def _join_slave(self, aggregate_id, host, compute_uuid, url, user, passwd):
reason=_('Unable to join %s '
'in the pool') % host)
- def _eject_slave(self, aggregate_id, compute_uuid, host_uuid):
- """Eject a slave from a XenServer resource pool."""
+ def _eject_subordinate(self, aggregate_id, compute_uuid, host_uuid):
+ """Eject a subordinate from a XenServer resource pool."""
try:
# shutdown nova-compute; if there are other VMs running, e.g.
# guest instances, the eject will fail. That's a precaution
@@ -229,7 +229,7 @@ def _clear_pool(self, aggregate_id):
action='remove_from_aggregate',
reason=str(e.details))
- def _create_slave_info(self):
+ def _create_subordinate_info(self):
"""XenServer specific info needed to join the hypervisor pool."""
# replace the address from the xenapi connection url
# because this might be 169.254.0.1, i.e. xenapi
diff --git a/nova/virt/xenapi/pool_states.py b/nova/virt/xenapi/pool_states.py
index ae431ddec..f4acdf541 100644
--- a/nova/virt/xenapi/pool_states.py
+++ b/nova/virt/xenapi/pool_states.py
@@ -25,7 +25,7 @@
A 'created' pool becomes 'changing' during the first request of
adding a host. During a 'changing' status no other requests will be accepted;
this is to allow the hypervisor layer to instantiate the underlying pool
-without any potential race condition that may incur in master/slave-based
+without any potential race condition that may incur in main/subordinate-based
configurations. The pool goes into the 'active' state when the underlying
pool has been correctly instantiated.
All other operations (e.g. add/remove hosts) that succeed will keep the
diff --git a/tools/db/schema_diff.py b/tools/db/schema_diff.py
index fdcad3114..829de014e 100755
--- a/tools/db/schema_diff.py
+++ b/tools/db/schema_diff.py
@@ -26,7 +26,7 @@
commit hash) and a SQLAlchemy-Migrate version number:
Run like:
- ./tools/db/schema_diff.py mysql master:latest my_branch:82
+ ./tools/db/schema_diff.py mysql main:latest my_branch:82
"""
from __future__ import print_function
@@ -216,12 +216,12 @@ def parse_options():
try:
orig_branch, orig_version = sys.argv[2].split(':')
except IndexError:
- usage('original branch and version required (e.g. master:82)')
+ usage('original branch and version required (e.g. main:82)')
try:
new_branch, new_version = sys.argv[3].split(':')
except IndexError:
- usage('new branch and version required (e.g. master:82)')
+ usage('new branch and version required (e.g. main:82)')
return db_type, orig_branch, orig_version, new_branch, new_version