diff --git a/README.md b/README.md
index 6797cdd..dccfb03 100644
--- a/README.md
+++ b/README.md
@@ -20,9 +20,11 @@ Quick Start instructions for KVM deployments:
To get started, you need a server running Ubuntu 14.04 (or some similar flavor) with libvirt, kvm and a few python tools.
+You can use the various ansible-playbooks in the extras folder to get an install up and running quickly.
+
Install the required Ubuntu packages-
```
-root@wistar-build:~# apt-get install python-pip python-dev build-essential qemu-kvm libz-dev libvirt-bin socat python-pexpect python-libvirt libxml2-dev libxslt1-dev unzip bridge-utils genisoimage python-netaddr libffi-dev libssl-dev python-markupsafe libxml2-dev libxslt1-dev git mtools dosfstools
+root@wistar-build:~# apt-get install python-pip python-dev build-essential qemu-kvm libz-dev libvirt-bin socat python-pexpect python-libvirt libxml2-dev libxslt1-dev unzip bridge-utils genisoimage python-netaddr libffi-dev libssl-dev python-markupsafe libxml2-dev libxslt1-dev git mtools dosfstools python-openstacksdk
```
Install Python packages-
diff --git a/ajax/templates/ajax/openstackDeploymentStatus.html b/ajax/templates/ajax/openstackDeploymentStatus.html
index 63feee1..0a0c03e 100644
--- a/ajax/templates/ajax/openstackDeploymentStatus.html
+++ b/ajax/templates/ajax/openstackDeploymentStatus.html
@@ -57,9 +57,34 @@
{{ resource.resource_name }}
-
- {% if 'COMPLETE' in resource.resource_status %}
- ✓
+ |
+ {% if 'COMPLETE' in resource.resource_status and resource.physical_status == "ACTIVE" %}
+
+
+
+
+
+
+
+ {% elif 'COMPLETE' in resource.resource_status and resource.physical_status == "SHUTOFF" %}
+
+
+ {% elif 'COMPLETE' in resource.resource_status and resource.physical_status == "REBOOT" %}
+ ↻
+
+ {% elif 'COMPLETE' in resource.resource_status and resource.physical_status == None %}
+ ✓
{% else %}
▽
diff --git a/ajax/urls.py b/ajax/urls.py
index 59520b4..16a2444 100644
--- a/ajax/urls.py
+++ b/ajax/urls.py
@@ -49,6 +49,7 @@
url(r'^manageDomain/$', views.manage_domain, name='manageDomain'),
url(r'^manageNetwork/$', views.manage_network, name='manageNetwork'),
url(r'^manageHypervisor/$', views.manage_hypervisor, name='manage_hypervisor'),
+ url(r'^manageInstance/$', views.manage_instance, name="manageInstance"),
url(r'^executeCli/$', views.execute_cli, name='executeCli'),
url(r'^executeLinuxCli/$', views.execute_linux_cli, name='executeLinuxCli'),
url(r'^launchWebConsole/$', views.launch_web_console, name='launchWebConsole'),
diff --git a/ajax/views.py b/ajax/views.py
index e3a0957..2181ae1 100644
--- a/ajax/views.py
+++ b/ajax/views.py
@@ -585,8 +585,27 @@ def refresh_openstack_deployment_status(request, topology_id):
stack_details = openstackUtils.get_stack_details(stack_name)
stack_resources = dict()
logger.debug(stack_details)
- if stack_details is not None and 'stack_status' in stack_details and 'COMPLETE' in stack_details["stack_status"]:
+
+ if stack_details is not None and "stack_status" in stack_details and "COMPLETE" in stack_details["stack_status"]:
stack_resources = openstackUtils.get_stack_resources(stack_name, stack_details["id"])
+ # No attempt being made to get the physical status, since this is for legacy Openstack
+ # And I do not know what field names are
+ for resource in stack_resources:
+ resource["physical_status"] = None
+
+ if stack_details is not None and 'status' in stack_details and 'COMPLETE' in stack_details["status"]:
+ # This fixes compatbility with newer resource responses which have different fields
+ # Simply readd the data with the old names
+
+ # Also get the physical status
+ stack_resources = openstackUtils.get_stack_resources(stack_name, stack_details["id"], resource_status=True)
+
+ stack_details["stack_status"] = stack_details["status"]
+ stack_details["stack_status_reason"] = stack_details["status_reason"]
+
+ for resource in stack_resources["resources"]:
+ resource["resource_name"] = resource["name"]
+ resource["resource_status"] = resource["status"]
if hasattr(configuration, 'openstack_horizon_url'):
horizon_url = configuration.openstack_horizon_url
@@ -711,6 +730,24 @@ def manage_domain(request):
else:
return render(request, 'ajax/ajaxError.html', {'error': "Unknown Parameters in POST!"})
+def manage_instance(request):
+ """
+ This function manages basic interactions with the OS::Nova::Server
+ resources in the deployed openstack stack
+ The instanceId corresponds to the OS::Nova::Server instance
+ """
+ required_fields = set(['topologyId', 'action', 'instanceId'])
+
+ if not required_fields.issubset(request.POST):
+ return render(request, 'ajax/ajaxError.html', {'error': "Invalid Parameters in POST"})
+
+ instance_id = request.POST['instanceId']
+ action = request.POST['action']
+ topology_id = request.POST["topologyId"]
+
+ openstackUtils.manage_instance(instance_id, action)
+
+ return refresh_openstack_deployment_status(request, topology_id)
def manage_network(request):
required_fields = set(['networkName', 'action', 'topologyId'])
@@ -872,6 +909,8 @@ def multi_clone_topology(request):
def redeploy_topology(request):
+
+ logger.debug("---redeploy_topology---")
required_fields = set(['json', 'topologyId'])
if not required_fields.issubset(request.POST):
return render(request, 'ajax/ajaxError.html', {'error': "No Topology Id in request"})
@@ -886,40 +925,50 @@ def redeploy_topology(request):
return render(request, 'ajax/ajaxError.html', {'error': "Topology doesn't exist"})
try:
- domains = libvirtUtils.get_domains_for_topology(topology_id)
- config = wistarUtils.load_config_from_topology_json(topo.json, topology_id)
-
- logger.debug('checking for orphaned domains first')
- # find domains we no longer need
- for d in domains:
- logger.debug('checking domain: %s' % d['name'])
- found = False
- for config_device in config["devices"]:
- if config_device['name'] == d['name']:
- found = True
- continue
+ if configuration.deployment_backend == "openstack":
+ # Updates the stack with the new heat template
+ # Should check first if the stack exists
+ # if the stack doesn't exist, just switch to deployment instead
+ #FIXME
+ update_stack(request, topology_id)
+
+ elif configuration.deployment_backend == "kvm":
+
+ domains = libvirtUtils.get_domains_for_topology(topology_id)
+ config = wistarUtils.load_config_from_topology_json(topo.json, topology_id)
+
+ logger.debug('checking for orphaned domains first')
+ # find domains we no longer need
+ for d in domains:
+ logger.debug('checking domain: %s' % d['name'])
+ found = False
+ for config_device in config["devices"]:
+ if config_device['name'] == d['name']:
+ found = True
+ continue
- if not found:
- logger.info("undefine domain: " + d["name"])
- source_file = libvirtUtils.get_image_for_domain(d["uuid"])
- if libvirtUtils.undefine_domain(d["uuid"]):
- if source_file is not None:
- osUtils.remove_instance(source_file)
+ if not found:
+ logger.info("undefine domain: " + d["name"])
+ source_file = libvirtUtils.get_image_for_domain(d["uuid"])
+ if libvirtUtils.undefine_domain(d["uuid"]):
+ if source_file is not None:
+ osUtils.remove_instance(source_file)
- osUtils.remove_cloud_init_seed_dir_for_domain(d['name'])
+ osUtils.remove_cloud_init_seed_dir_for_domain(d['name'])
except Exception as e:
logger.debug("Caught Exception in redeploy")
logger.debug(str(e))
return render(request, 'ajax/ajaxError.html', {'error': str(e)})
- # forward onto deploy topo
- try:
- inline_deploy_topology(config)
- except Exception as e:
- logger.debug("Caught Exception in inline_deploy")
- logger.debug(str(e))
- return render(request, 'ajax/ajaxError.html', {'error': str(e)})
+ # forward onto deploy topoloy if this is a kvm topology
+ if configuration.deployment_backend == "kvm":
+ try:
+ inline_deploy_topology(config)
+ except Exception as e:
+ logger.debug("Caught Exception in inline_deploy")
+ logger.debug(str(e))
+ return render(request, 'ajax/ajaxError.html', {'error': str(e)})
return refresh_deployment_status(request)
@@ -1475,6 +1524,7 @@ def deploy_stack(request, topology_id):
except ObjectDoesNotExist:
return render(request, 'error.html', {'error': "Topology not found!"})
+ heat_template =None
try:
# generate a stack name
# FIXME should add a check to verify this is a unique name
@@ -1506,6 +1556,41 @@ def deploy_stack(request, topology_id):
logger.debug(str(e))
return render(request, 'error.html', {'error': str(e)})
+def update_stack(request, topology_id):
+ """
+ Updates an already existing stack with a new template
+ """
+ try:
+ topology = Topology.objects.get(pk=topology_id)
+ except ObjectDoesNotExist:
+ return render(request, 'error.html', {'error': "Topology not found!"})
+ try:
+ stack_name = topology.name.replace(' ', '_')
+ # let's parse the json and convert to simple lists and dicts
+ logger.debug("loading config")
+ config = wistarUtils.load_config_from_topology_json(topology.json, topology_id)
+ logger.debug("Config is loaded")
+ heat_template = wistarUtils.get_heat_json_from_topology_config(config, stack_name)
+ logger.debug("heat template created")
+ if not openstackUtils.connect_to_openstack():
+ return render(request, 'error.html', {'error': "Could not connect to Openstack"})
+
+
+ result = openstackUtils.update_stack(stack_name, heat_template)
+ if result == None:
+ logger.debug("Can't update stack since it doesn't exist, deploying")
+ openstackUtils.create_stack(stack_name, heat_template)
+ else:
+ logger.debug(result)
+
+ return HttpResponseRedirect('/topologies/' + topology_id + '/')
+
+ except Exception as e:
+ logger.debug("Caught Exception in update stack")
+ logger.debug(str(e))
+
+ return render(request, 'error.html', {'error': str(e)})
+
def delete_stack(request, topology_id):
"""
diff --git a/common/lib/openstackUtils.py b/common/lib/openstackUtils.py
index 5c5fe7b..da23a0c 100644
--- a/common/lib/openstackUtils.py
+++ b/common/lib/openstackUtils.py
@@ -1,555 +1,254 @@
-#
-# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER
-#
-# Copyright (c) 2015 Juniper Networks, Inc.
-# All rights reserved.
-#
-# Use is subject to license terms.
-#
-# Licensed under the Apache License, Version 2.0 (the ?License?); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at http://www.apache.org/licenses/LICENSE-2.0.
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
import json
import logging
import mmap
import time
-import urllib2
-from urllib2 import URLError
from wistar import configuration
-# OpenStack component URLs
-# _glance_url = ':9292/v1'
-_analytics_url = ':8081'
-_api_url = ':8082'
-_os_url = ':5000/v3'
-_nova_url = ':8774/v2'
-_neutron_url = ':9696/v2.0'
-_glance_url = ':9292/v2'
-_heat_url = ':8004/v1'
-_auth_url = _os_url + "/auth/tokens"
-_data_url = ':8143/api/tenant/networking/'
-
-# auth token will get populated by connect on each instantiation
-# and referenced by each subsequent call
-_auth_token = ""
-_project_auth_token = ""
-_tenant_id = ""
-
-_token_cache_time = time.time()
-_project_token_cache_time = time.time()
-
-# cache auth tokens for 1 hour
-_max_cache_time = 3600
+import openstack
+from openstack.config import loader
+from openstack import utils
+from openstack.cloud import OpenStackCloud
+from keystoneauth1.exceptions.http import Unauthorized as ErrorUnauthorized
+
logger = logging.getLogger(__name__)
-def connect_to_openstack():
+def create_connection():
"""
- authenticates to keystone at configuration.openstack_host with OPENSTACK_USER, OPENSTACK_PASS
- will set the _auth_token property on success, which is then used for all subsequent
- calls from this module
- :return: True on successful authentication to keystone, False otherwise
+ Creates an connection object based on the configuration mode
+ Either uses the openstacksdk mode which searches for clouds.yaml
+ Or uses the configuration options
"""
+ if configuration.openstack_mode == "auto":
+ return openstack.connect(cloud=configuration.openstack_cloud)
+ else:
+ return openstack.connect(
+ auth_url=configuration.openstack_host,
+ project_name=configuration.openstack_project,
+ username=configuration.openstack_user,
+ password=configuration.openstack_password,
+ region_name=configuration.openstack_region
+ )
- logger.debug("--- connect_to_openstack ---")
-
- logger.debug('verify configuration')
-
- if not hasattr(configuration, 'openstack_host'):
- logger.error('Openstack Host is not configured')
- return False
-
- if not hasattr(configuration, 'openstack_user'):
- logger.error('Openstack User is not configured')
- return False
-
- if not hasattr(configuration, 'openstack_password'):
- logger.error('Openstack Password is not configured')
- return False
- global _auth_token
- global _tenant_id
- global _token_cache_time
-
- # simple cache calculation
- # _token_cache_time will get updated when we refresh the token
- # so let's find out how long ago that was
- # and if we should refresh again
- now = time.time()
- diff = now - _token_cache_time
-
- if diff < _max_cache_time and _auth_token != "":
- return _auth_token
-
- logger.debug("refreshing auth token")
- _token_cache_time = now
- _auth_token = ""
-
- _auth_json = """
- { "auth": {
- "identity": {
- "methods": ["password"],
- "password": {
- "user": {
- "name": "%s",
- "domain": { "id": "default" },
- "password": "%s"
- }
- }
- },
- "scope": {
- "project": {
- "domain": {
- "id": "default"
- },
- "name": "admin"
- }
- }
- }
- }
- """ % (configuration.openstack_user, configuration.openstack_password)
+def connect_to_openstack():
+ """
+ Tries to connect to the selected openstack cloud
+ """
+ logger.debug("--- connect_to_openstack ---")
+ connection = create_connection()
try:
- _auth_token = ""
- request = urllib2.Request("http://" + configuration.openstack_host + _auth_url)
- request.add_header("Content-Type", "application/json")
- request.add_header("charset", "UTF-8")
- request.add_header("Content-Length", len(_auth_json))
- result = urllib2.urlopen(request, _auth_json)
- _auth_token = result.info().getheader('X-Subject-Token')
- # now get the tenant_id for the chosen project
- _tenant_id = get_project_id(configuration.openstack_project)
- # logger.debug(_auth_token)
+ connection.authorize()
return True
- except URLError as e:
- logger.error("Could not authenticate to openstack!")
- logger.error("error was %s" % str(e))
+ except ErrorUnauthorized:
return False
+def get_glance_image_list():
+ # logger.debug("--- get_glance_image_list ---")
-def get_project_auth_token(project):
- """
- :param project: project name string
- :return: auth_token specific to this project, None on error
- """
- logger.debug("--- get_project_auth_token ---")
-
- global _project_auth_token
- global _project_token_cache_time
-
- now = time.time()
- diff = now - _project_token_cache_time
-
- if diff < _max_cache_time and _project_auth_token != "":
- return _project_auth_token
-
- logger.debug("refreshing project auth token")
- _project_token_cache_time = now
- _project_auth_token = ""
-
- _auth_json = """
- { "auth": {
- "identity": {
- "methods": ["password"],
- "password": {
- "user": {
- "name": "%s",
- "domain": { "id": "default" },
- "password": "%s"
- }
- }
- },
- "scope": {
- "project": {
- "domain": {
- "id": "default"
- },
- "name": "%s"
- }
- }
- }
- }
- """ % (configuration.openstack_user, configuration.openstack_password, project)
+ connection = create_connection()
- try:
- request = urllib2.Request("http://" + configuration.openstack_host + _auth_url)
- request.add_header("Content-Type", "application/json")
- request.add_header("charset", "UTF-8")
- request.add_header("Content-Length", len(_auth_json))
- result = urllib2.urlopen(request, _auth_json)
- _project_auth_token = result.info().getheader('X-Subject-Token')
- return _project_auth_token
+ images = connection.image.images()
- except URLError as e:
- logger.error("Could not get project auth token")
- logger.error("error was %s" % str(e))
- return None
+ return [image.to_dict() for image in images if image.status == "active"]
-def get_project_id(project_name):
- """
- Gets the UUID of the project by project_name
- :param project_name: Name of the Project
- :return: string UUID or None
- """
-
- logger.debug("--- get_project_id ---")
+def get_glance_image_detail(image_id):
+ logger.debug("---get_glance_image-detail_by_id")
+ connection = create_connection()
- projects_url = create_os_url('/projects')
- projects_string = do_get(projects_url)
- if projects_string is None:
+ result = connection.image.get_image(image_id)
+ if result is None:
return None
+ return result.to_dict()
- projects = json.loads(projects_string)
- for project in projects["projects"]:
- if project["name"] == project_name:
- return str(project["id"])
-
- return None
-
-
-def get_network_id(network_name):
- """
- Gets the UUID of the network by network_name
- :param network_name: Name of the network
- :return: string UUID or None
- """
-
- logger.debug("--- get_network_id ---")
-
- networks_url = create_neutron_url('/networks?name=%s' % network_name)
- logger.info(networks_url)
- networks_string = do_get(networks_url)
- logger.info(networks_string)
- if networks_string is None:
- logger.error('Did not find a network for that name!')
- return None
+def get_glance_image_detail_by_name(image_name):
+ logger.debug("-- get glance image detail by name")
+ connection = create_connection()
- try:
- networks = json.loads(networks_string)
- except ValueError:
- logger.error('Could not parse json response in get_network_id')
+ result = connection.image.find_image(image_name)
+ if result is None:
return None
+ else:
+ return result.to_dict()
- for network in networks["networks"]:
- if network["name"] == network_name:
- logger.info('Found id!')
- return str(network["id"])
-
- return None
-
-
-def upload_image_to_glance_old(name, image_file_path):
- """
-
- :param name: name of the image to be uploaded
- :param image_file_path: full filesystem path as string to the image
- :return: json encoded results string from glance REST api
- """
- logger.debug("--- upload_image_to_glance ---")
- url = create_glance_url('/images')
- try:
- f = open(image_file_path, 'rb')
- fio = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
-
- request = urllib2.Request(url, fio)
- request.add_header("X-Auth-Token", _auth_token)
- request.add_header("Content-Type", "application/octet-stream")
- request.add_header("x-image-meta-name", name)
- request.add_header("x-image-meta-disk_format", "qcow2")
- request.add_header("x-image-meta-container_format", "bare")
- request.add_header("x-image-meta-is_public", "true")
- request.add_header("x-image-meta-min_ram", "1024")
- request.add_header("x-image-meta-min_disk", "1")
- result = urllib2.urlopen(request)
- return result.read()
- except Exception as e:
- logger.error("Could not upload image to glance")
- logger.error("error was %s" % str(e))
+def get_image_id_for_name(image_name):
+ connection = create_connection()
- finally:
- fio.close()
- f.close()
+ result = connection.image.find_image(image_name)
+ if result is None:
return None
-
+ else:
+ return result.to_dict()["id"]
def upload_image_to_glance(name, image_file_path):
"""
-
:param name: name of the image to be created
:param image_file_path: path of the file to upload
- :return: json encoded results string from glance REST api
+ :return: json encoded results string
"""
- logger.debug("--- create_image_in_glance ---")
+ #FIXME this is not properly checked yet
+ connection = create_connection()
- url = create_glance_url('/images')
+ image_attrs = dict()
+ image_attrs['disk_format'] = 'qcow2'
+ image_attrs['container_format'] = 'bare'
+ image_attrs['name'] = name
- try:
+ f = open(image_file_path, 'rb')
+ fio = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
- d = dict()
- d['disk_format'] = 'qcow2'
- d['container_format'] = 'bare'
- d['name'] = name
+ image_attrs['data'] = fio
- r_data = do_post(url, json.dumps(d))
+ connection.images.upload_image(**image_attrs)
- except Exception as e:
- logger.error("Could not upload image to glance")
- logger.error("error was %s" % str(e))
- return None
-
- try:
- r_json = json.loads(r_data)
- if 'id' in r_json:
- image_id = r_json['id']
-
- logger.info('Preparing to push image data to glance!')
- f = open(image_file_path, 'rb')
- fio = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
- upload_url = create_glance_url('/images/%s/file' % image_id)
- request = urllib2.Request(upload_url, fio)
- request.add_header("Content-Type", "application/octet-stream")
- request.add_header("X-Auth-Token", _auth_token)
- request.get_method = lambda: 'PUT'
- return urllib2.urlopen(request)
- else:
- logger.error('Could not find an ID key in returned json from glance image create')
- logger.error(r_data)
- logger.error('returning None')
- return None
-
- except ValueError:
- logger.error('Could not parse JSON return data from glance image create')
- return None
-
-
-def get_neutron_ports_for_network(network_name):
+def manage_instance(instance_id, action):
"""
- :return: json response from /ports URL
+ Some basic interactions with server instances
"""
- logger.debug("--- get_neutron_port_list ---")
- network_id = get_network_id(network_name)
- if network_id is None:
- logger.warn("couldn't find the correct network_id")
- return None
+ conn = create_connection()
- url = create_neutron_url("/ports.json?network_id=%s&fields=id&fields=fixed_ips" % network_id)
- logger.debug(url)
- port_list_string = do_get(url)
- logger.debug(port_list_string)
+ server_instance = conn.compute.get_server(instance_id)
- return port_list_string
+ if action == "stop" and server_instance.status == "ACTIVE":
+ # Only attempt to stop if it's active, don't try to interact with a non stable instance
+ conn.compute.stop_server(instance_id)
+ elif action == "start" and server_instance.status == "SHUTOFF":
+ # Only attempt to start if it's properly shut off
+ conn.compute.start_server(instance_id)
+ elif action == "reboot" and server_instance.status == "ACTIVE":
+ conn.compute.reboot_server(instance_id, "SOFT")
-def get_consumed_management_ips():
- """
- Return a list of dicts of the format
- [
- { "ip-address": "xxx.xxx.xxx.xxx"}
- ]
- This mimics the libvirt dnsmasq format for dhcp reservations
- This is used in the wistarUtils.get_dhcp_reserved_ips() as a single place to
- get all reserved management ips
- :return: list of dicts
- """
- consumed_ips = list()
- ports_string = get_neutron_ports_for_network(configuration.openstack_mgmt_network)
- if ports_string is None:
- return consumed_ips
- try:
- ports = json.loads(ports_string)
- except ValueError:
- logger.error('Could not parse json response in get_consumed_management_ips')
- return consumed_ips
-
- if 'ports' not in ports:
- logger.error('unexpected keys in json response!')
- return consumed_ips
-
- for port in ports['ports']:
- for fixed_ip in port['fixed_ips']:
- if configuration.management_prefix in fixed_ip['ip_address']:
- fip = dict()
- fip['ip-address'] = fixed_ip['ip_address']
- consumed_ips.append(fip)
-
- return consumed_ips
-
-
-def get_glance_image_list():
- """
- :return: list of json objects from glance /images URL filtered with only shared or public images
- """
- logger.debug("--- get_glance_image_list ---")
-
- url = create_glance_url("/images")
- image_list_string = do_get(url)
-
- image_list = list()
-
- if image_list_string is None:
- return image_list
-
- try:
- glance_return = json.loads(image_list_string)
- except ValueError:
- logger.warn('Could not parse json response from glance /images')
- return image_list
-
- if 'images' not in glance_return:
- logger.warn('did not find images key in glance return data')
- logger.debug(glance_return)
- return image_list
-
- for im in glance_return['images']:
-
- if 'status' in im and im['status'] != 'active':
- logger.debug('Skipping non-active image %s' % im['name'])
- continue
+def get_nova_flavors(project_name):
+ connection = create_connection()
- if 'visibility' in im and im['visibility'] in ['shared', 'public']:
- image_list.append(im)
+ all_flavors = connection.compute.flavors()
- return image_list
+ flavor_dicts = [flavor.to_dict() for flavor in all_flavors]
+ logger.debug("FLAVORS")
+ logger.debug(str(flavor_dicts))
+
+ return json.dumps(flavor_dicts)
+ # return [flavor.to_dict() for flavor in connection.compute.flavors()]
-def get_glance_image_detail(glance_id):
+def get_nova_serial_console(instance_name):
"""
- :param glance_id: id of the glance image to retrieve
- :return: json response from glance /images/glance_id URL
+ Get the websocket URL for the serial proxy for a given nova server (instance)
+ :param instance_name: name of the instance
+ :return: websocket url ws://x.x.x.x:xxxx/token=xxxxx
"""
- logger.debug("--- get_glance_image_detail ---")
+ #FIXME no proper openstacksdk implementation yet
+ connection = create_connection()
+ server = connection.compute.find_server(instance_name)
- url = create_glance_url("/images/%s" % glance_id)
- image_string = do_get(url)
- if image_string is None:
+ if server == None:
return None
+ # Trying to get the console via a manual query
- return json.loads(image_string)
-
-
-def get_glance_image_detail_by_name(image_name):
- """
- :param image_name: name of the glance image to retrieve
- :return: json response from glance /images?name=image_name URL or None
- """
- logger.debug("--- get_glance_image_detail ---")
+ # First build the cor
- url = create_glance_url("/images?name=%s" % image_name)
- image_string = do_get(url)
- if image_string is None:
- logger.error('Error calling glance api, no data')
- return None
+ cloud = OpenStackCloud()
+ project_id = cloud.current_project_id
+ data = '{"os-getVNCConsole": {"type": "novnc"}}'
+ url = create_nova_url('/%s/servers/%s/action' % (project_id, server.id))
+ logger.debug("nova console: trying: " + str(url))
try:
- images_dict = json.loads(image_string)
-
- if 'images' not in images_dict:
- logger.error('Unexpected output from glance api')
- return None
-
- for image in images_dict['images']:
- if 'name' in image and image['name'] == image_name:
- logger.debug('returning image with id: %s' % image.get('id', '0'))
- return image
-
- except ValueError:
- logger.error('Could not parse json return from glance api')
+ project_auth_token = connection.authorize()
+ request = urllib2.Request(url)
+ request.add_header("Content-Type", "application/json")
+ request.add_header("charset", "UTF-8")
+ request.add_header("X-Auth-Token", project_auth_token)
+ request.get_method = lambda: 'POST'
+ result = urllib2.urlopen(request, data)
+ console_json_data = json.loads(result.read())
+ logger.debug(json.dumps(console_json_data, indent=2))
+ return console_json_data["console"]["url"]
+ except URLError as e:
+ logger.error("Could not get serial console to instance: %s" % instance_name)
+ logger.error("error was %s" % str(e))
return None
-def get_image_id_for_name(image_name):
+
+def create_nova_url(url):
"""
- Returns the glance Id for the given image_name
- :param image_name: name of image to search for
- :return: glance id or None on failure
+ Creates a nova url based on the service and endpoint in the sdk
"""
- logger.debug("--- get_image_id_for_name ---")
+ conn = create_connection()
- image_detail = get_glance_image_detail_by_name(image_name)
- if 'name' in image_detail and image_detail['name'] == image_name:
- # all is well, return the id from here
- return image_detail.get('id', None)
+ nova_id = conn.identity.find_service("nova").id
- return None
+ endpoint_query == {
+ "service_id": nova_id,
+ "interface": "public"
+ }
+ # This should only give one result
+ endpoint = conn.identity.endpoints(**endpoint_query)
-def get_stack_details(stack_name):
- """
- Returns python object representing Stack details
- :param stack_name: name of the stack to find
- :return: stack object or None if not found!
- """
- logger.debug("--- get_stack_details ---")
+ return endpoint[0].url + url
- url = create_heat_url("/%s/stacks" % _tenant_id)
- stacks_list_string = do_get(url)
- stacks_list = json.loads(stacks_list_string)
- for stack in stacks_list["stacks"]:
- if stack["stack_name"] == stack_name:
- return stack
- logger.info("stack name %s was not found!" % stack_name)
- return None
-
-def get_stack_resources(stack_name, stack_id):
- """
- Get all the resources for this Stack
- :param stack_name: name of stack
- :param stack_id: id of stack - use get_stack_details to retrieve this
- :return: json response from HEAT API
+def get_project_id(project_name):
"""
- logger.debug("--- get_stack_resources ---")
-
- url = create_heat_url("/%s/stacks/%s/%s/resources" % (_tenant_id, stack_name, stack_id))
- stack_resources_string = do_get(url)
- if stack_resources_string is None:
+ :param project_name: name of the project to search for
+ """
+
+ connection = create_connection()
+ cloud = OpenStackCloud()
+ logger.debug("--get project id")
+ return cloud.current_project_id
+ logger.debug("--- all projects--")
+ logger.debug(str(connection.__dict__))
+ logger.debug("--properties")
+ for project in connection.identity.projects(user_id=cloud.current_user_id):
+ logger.debug(str(project))
+ logger.debug("Find project")
+ result = connection.identity.find_project(project_name, user_id=cloud.current_user_id)
+ if result is None:
return None
else:
- return json.loads(stack_resources_string)
+ return result.to_dict()["id"]
-def delete_stack(stack_name):
+def get_consumed_management_ips():
"""
- Deletes a stack from OpenStack
- :param stack_name: name of the stack to be deleted
- :return: JSON response fro HEAT API
+ Return a list of dicts of the format
+ [
+ { "ip-address": "xxx.xxx.xxx.xxx"}
+ ]
+ This mimics the libvirt dnsmasq format for dhcp reservations
+ This is used in the wistarUtils.get_dhcp_reserved_ips() as a single place to
+ get all reserved management ips
+ :return: list of dicts
"""
- logger.debug("--- delete_stack ---")
+ ips = []
+ connection = create_connection()
- stack_details = get_stack_details(stack_name)
- if stack_details is None:
- return None
- else:
- stack_id = stack_details["id"]
- url = create_heat_url("/%s/stacks/%s/%s" % (_tenant_id, stack_name, stack_id))
- return do_delete(url)
+ mgmt_network = connection.network.find_network(configuration.openstack_mgmt_network)
+ if mgmt_network is None:
+ return ips
+ for port in connection.network.ports(network_id=mgmt_network.id):
+ for fixed_ip in port.fixed_ips:
+ fip = {}
+ logger.debug(fixed_ip)
+ fip["ip-address"] = fixed_ip["ip_address"]
+ ips.append(fip)
-def get_nova_flavors(project_name):
- """
- Returns flavors for a specific project from Nova in JSON encoded string
- :return: JSON encoded string
- """
- logger.debug("--- get_nova_flavors ---")
- project_id = get_project_id(project_name)
- url = create_nova_url("/" + project_id + '/flavors/detail')
- return do_get(url)
+ logger.debug(str(ips))
+ return ips
def get_minimum_flavor_for_specs(project_name, cpu, ram, disk):
@@ -562,96 +261,84 @@ def get_minimum_flavor_for_specs(project_name, cpu, ram, disk):
:return: flavor object {"name": "m1.xlarge"}
"""
- logger.debug("checking: " + str(cpu) + " " + str(ram) + " " + str(disk))
-
- # create an emergency flavor so we have something to return in case we can't connect to openstack
- # or some other issue prevents us from determining the right thing to do
emergency_flavor = dict()
- emergency_flavor['name'] = "m1.xlarge"
+ emergency_flavor['name'] = "m1.large"
- if not connect_to_openstack():
- return emergency_flavor
-
- flavors = get_nova_flavors(project_name)
- try:
- flavors_object = json.loads(flavors)
- except ValueError:
- logger.error('Could not parse nova return data')
- return emergency_flavor
+ connection = create_connection()
+ logger.debug("Trying to determine minumum flavor")
+ flavors = connection.compute.flavors()
+ flavors = [flavor.to_dict() for flavor in flavors]
cpu_candidates = list()
ram_candidates = list()
disk_candidates = list()
+ logger.debug("checking flavors")
- if "flavors" in flavors_object:
- logger.debug("checking flavors")
+ # first, let's see if we have an exact match!
+ for f in flavors:
+ logger.debug("checking flavor: " + f["name"])
+ if f["vcpus"] == cpu and f["ram"] == ram and f["disk"] == disk:
+ return f
- # first, let's see if we have an exact match!
- for f in flavors_object["flavors"]:
- logger.debug("checking flavor: " + f["name"])
- if f["vcpus"] == cpu and f["ram"] == ram and f["disk"] == disk:
- return f
-
- logger.debug("not exact match yet")
- # we don't have an exact match yet!
- for f in flavors_object["flavors"]:
- logger.debug(str(f["vcpus"]) + " " + str(cpu))
- if "vcpus" in f and f["vcpus"] >= int(cpu):
- cpu_candidates.append(f)
+ logger.debug("not exact match yet")
+ # we don't have an exact match yet!
+ for f in flavors:
+ logger.debug(str(f["vcpus"]) + " " + str(cpu))
+ if "vcpus" in f and f["vcpus"] >= int(cpu):
+ cpu_candidates.append(f)
- logger.debug("got cpu candidates: " + str(len(cpu_candidates)))
+ logger.debug("got cpu candidates: " + str(len(cpu_candidates)))
- for f in cpu_candidates:
- if "ram" in f and f["ram"] >= ram:
- ram_candidates.append(f)
+ for f in cpu_candidates:
+ if "ram" in f and f["ram"] >= ram:
+ ram_candidates.append(f)
- logger.debug("got ram candidates: " + str(len(ram_candidates)))
+ logger.debug("got ram candidates: " + str(len(ram_candidates)))
- for f in ram_candidates:
- if "disk" in f and f["disk"] >= disk:
- disk_candidates.append(f)
+ for f in ram_candidates:
+ if "disk" in f and f["disk"] >= disk:
+ disk_candidates.append(f)
- logger.debug("got disk candidates: " + str(len(disk_candidates)))
+ logger.debug("got disk candidates: " + str(len(disk_candidates)))
- if len(disk_candidates) == 0:
- # uh-oh, just return the largest and hope for the best!
- return emergency_flavor
- elif len(disk_candidates) == 1:
- return disk_candidates[0]
- else:
- # we have more than one candidate left
- # let's find the smallest flavor left!
- cpu_low = 99
- disk_low = 999
- ram_low = 99999
- for f in disk_candidates:
- if f["vcpus"] < cpu_low:
- cpu_low = f["vcpus"]
- if f["ram"] < ram_low:
- ram_low = f["ram"]
- if f["disk"] < disk_low:
- disk_low = f["disk"]
-
- for f in disk_candidates:
- if f["vcpus"] == cpu_low and f["ram"] == ram_low and f["disk"] == disk_low:
- # found the lowest available
- logger.debug("return lowest across all axis")
- return f
- for f in disk_candidates:
- if f["vcpus"] == cpu_low and f["ram"] == ram_low:
- # lowest available along ram and cpu axis
- logger.debug("return lowest across cpu and ram")
- return f
- for f in disk_candidates:
- if f["vcpus"] == cpu:
- logger.debug("return lowest cpu only")
- logger.debug(f)
- return f
-
- # should not arrive here :-/
- logger.debug("got to the impossible")
- return disk_candidates[0]
+ if len(disk_candidates) == 0:
+ # uh-oh, just return the largest and hope for the best!
+ return emergency_flavor
+ elif len(disk_candidates) == 1:
+ return disk_candidates[0]
+ else:
+ # we have more than one candidate left
+ # let's find the smallest flavor left!
+ cpu_low = 99
+ disk_low = 999
+ ram_low = 99999
+ for f in disk_candidates:
+ if f["vcpus"] < cpu_low:
+ cpu_low = f["vcpus"]
+ if f["ram"] < ram_low:
+ ram_low = f["ram"]
+ if f["disk"] < disk_low:
+ disk_low = f["disk"]
+
+ for f in disk_candidates:
+ if f["vcpus"] == cpu_low and f["ram"] == ram_low and f["disk"] == disk_low:
+ # found the lowest available
+ logger.debug("return lowest across all axis")
+ return f
+ for f in disk_candidates:
+ if f["vcpus"] == cpu_low and f["ram"] == ram_low:
+ # lowest available along ram and cpu axis
+ logger.debug("return lowest across cpu and ram")
+ return f
+ for f in disk_candidates:
+ if f["vcpus"] == cpu:
+ logger.debug("return lowest cpu only")
+ logger.debug(f)
+ return f
+ # should not arrive here :-/
+ logger.debug("got to the impossible")
+ return disk_candidates[0]
def create_stack(stack_name, template_string):
"""
@@ -660,232 +347,114 @@ def create_stack(stack_name, template_string):
:param template_string: HEAT template to be used
:return: JSON response from HEAT-API or None on failure
"""
- logger.debug("--- create_stack ---")
- url = create_heat_url("/" + str(_tenant_id) + "/stacks")
- data = '''{
- "disable_rollback": true,
- "parameters": {},
- "stack_name": "%s",
- "template": %s
- }''' % (stack_name, template_string)
- logger.debug("Creating stack with data:")
- logger.debug(data)
- return do_post(url, data)
+ connection = create_connection()
+ template = json.loads(template_string)
-def get_nova_serial_console(instance_name):
- """
- Get the websocket URL for the serial proxy for a given nova server (instance)
- :param instance_name: name of the instance
- :return: websocket url ws://x.x.x.x:xxxx/token=xxxxx
- """
- logger.debug("--- get_nova_serial_console ---")
+ heat_data = {}
+ heat_data["name"] = stack_name
+ heat_data["template"] = template
- logger.debug("Looking for instance: %s" % instance_name)
- server_detail_url = create_nova_url('/%s/servers?name=%s' % (_tenant_id, instance_name))
- server_detail = do_nova_get(server_detail_url)
+ # result = connection.orchestration.create_stack({"name"})
- # logger.debug("got details: %s" % server_detail)
+ result = connection.orchestration.create_stack(preview=False, **heat_data)
+ logger.debug(result)
+ return result
- if server_detail is None:
- return None
+def update_stack(stack_name, template_string):
+ """
+ Updates the heat template associated with a stack
+ This triggers a rebuild of the associated resources so may break certain topologies
+ """
- json_data = json.loads(server_detail)
- if len(json_data["servers"]) == 0:
- return None
+ connection = create_connection()
- server_uuid = ""
- for s in json_data["servers"]:
- if s["name"] == instance_name:
- server_uuid = s["id"]
- break
+ template = json.loads(template_string)
- if server_uuid == "":
- logger.error("Console not found with server name %s" % instance_name)
- return None
+ stack = connection.orchestration.find_stack(stack_name)
- # logger.debug(server_uuid)
- data = '{"os-getSerialConsole": {"type": "serial"}}'
- url = create_nova_url('/%s/servers/%s/action' % (_tenant_id, server_uuid))
- try:
- project_auth_token = get_project_auth_token(configuration.openstack_project)
- request = urllib2.Request(url)
- request.add_header("Content-Type", "application/json")
- request.add_header("charset", "UTF-8")
- request.add_header("X-Auth-Token", project_auth_token)
- request.get_method = lambda: 'POST'
- result = urllib2.urlopen(request, data)
- console_json_data = json.loads(result.read())
- logger.debug(json.dumps(console_json_data, indent=2))
- return console_json_data["console"]["url"]
- except URLError as e:
- logger.error("Could not get serial console to instance: %s" % instance_name)
- logger.error("error was %s" % str(e))
+ if stack is None:
+ # Stack has been deleted or never deployed!
return None
+ else:
+ heat_data = {}
+ heat_data["template"] = template
+ result = connection.orchestration.update_stack(stack, **heat_data)
+ logger.debug(result)
-# URL Utility functions
-def create_glance_url(url):
- return "http://" + configuration.openstack_host + _glance_url + url
-
-
-def create_neutron_url(url):
- return "http://" + configuration.openstack_host + _neutron_url + url
-
-
-def create_os_url(url):
- return "http://" + configuration.openstack_host + _os_url + url
-
-
-def create_heat_url(url):
- return "http://" + configuration.openstack_host + _heat_url + url
-
-
-def create_nova_url(url):
- return "http://" + configuration.openstack_host + _nova_url + url
+ return result
-# Utility REST functions below
-def do_get(url):
+def delete_stack(stack_name):
"""
- Performs a simple REST GET
- :param url: full URL for GET request
- :return: response from urllib2.urlopen(r).read() or None
+ Deletes a stack from OpenStack
+ :param stack_name: name of the stack to be deleted
+ :return: JSON response fro HEAT API
"""
- try:
- request = urllib2.Request(url)
- request.add_header("Content-Type", "application/json")
- request.add_header("charset", "UTF-8")
- request.add_header("X-Auth-Token", _auth_token)
- request.get_method = lambda: 'GET'
- result = urllib2.urlopen(request)
- return result.read()
- except Exception as e:
- logger.error("Could not perform GET to url: %s" % url)
- logger.error("error was %s" % str(e))
- return None
-
+ connection = create_connection()
-def do_post(url, data):
- """
- Performs a simple REST POST
- :param url: full url to use for POST
- :param data: url encoded data
- :return: string response from urllib2.urlopen(r,data).read() or None
- """
- try:
- request = urllib2.Request(url)
- request.add_header("Content-Type", "application/json")
- request.add_header("charset", "UTF-8")
- request.add_header("Content-Length", len(data))
- request.add_header("X-Auth-Token", _auth_token)
- result = urllib2.urlopen(request, data)
- return result.read()
- except URLError as e:
- logger.error("Could not perform POST to url: %s" % url)
- logger.error("error was %s" % str(e))
+ stack_details = get_stack_details(stack_name)
+ if stack_details is None:
return None
+ else:
+ connection.orchestration.delete_stack(stack_details["id"])
-
-def do_put(url, data=""):
+def get_stack_details(stack_name):
"""
- Performs a simple REST PUT
- :param url: full URL to use for PUT
- :param data: url encoded data
- :return: string response from urllib2.urlopen(r, data).read() or None
+ Returns python object representing Stack details
+ :param stack_name: name of the stack to find
+ :return: stack object or None if not found!
"""
- try:
- request = urllib2.Request(url)
- request.add_header("Content-Type", "application/json")
- request.add_header("charset", "UTF-8")
- request.add_header("X-Auth-Token", _auth_token)
- request.get_method = lambda: 'PUT'
+ logger.debug("--- get_stack_details ---")
- if data == "":
- result = urllib2.urlopen(request)
- else:
- result = urllib2.urlopen(request, data)
+ connection = create_connection()
- return result.read()
- except URLError as e:
- logger.error("Could not perform PUT to url: %s" % url)
- logger.error("error was %s" % str(e))
+ result = connection.orchestration.find_stack(stack_name)
+ if result is None:
+ logger.debug("stack doesn't exist yet")
return None
+ else:
+ return result.to_dict()
-def do_nova_get(url):
+def get_stack_resources(stack_name, stack_id, resource_status=False):
"""
- Performs a simple REST GET
- :param url: full URL for GET request
- :return: response from urllib2.urlopen(r).read() or None
+ Get all the resources for this Stack
+ :param stack_name: name of stack
+ :param stack_id: id of stack - use get_stack_details to retrieve this
+ :param resource_status: Also get the physical_status of the OS::Nova::Server instances
+ :return: json response from HEAT API
"""
- try:
- project_auth_token = get_project_auth_token(configuration.openstack_project)
- request = urllib2.Request(url)
- request.add_header("Content-Type", "application/json")
- request.add_header("charset", "UTF-8")
- request.add_header("X-Auth-Token", project_auth_token)
- request.get_method = lambda: 'GET'
- result = urllib2.urlopen(request)
- return result.read()
- except Exception as e:
- logger.error("Could not perform GET to url: %s" % url)
- logger.error("error was %s" % str(e))
- return None
+ conn = create_connection()
-def do_nova_delete(url, project_name, data=""):
- """
- Performs a DELETE request with the specified project auth token
- :param url: full url to use for DELETE
- :param project_name: name of the project
- :param data: (optional) url encoded data
- :return: string response from urllib2.urlopen(r, data).read() or None
- """
- logger.debug("--- connect_to_openstack ---")
- try:
- project_token = get_project_auth_token(project_name)
- request = urllib2.Request(url)
- request.add_header("Content-Type", "application/json")
- request.add_header("charset", "UTF-8")
- request.add_header("X-Auth-Token", project_token)
- request.get_method = lambda: 'DELETE'
+ stack = conn.orchestration.get_stack(stack_id)
- if data == "":
- result = urllib2.urlopen(request)
- else:
- result = urllib2.urlopen(request, data)
+ resources = conn.orchestration.resources(stack)
+ logger.debug("Got resources")
+ resources_list = [r.to_dict() for r in resources]
+ logger.debug(resources_list)
+ if resource_status == False:
+ return {"resources": resources_list}
- return result.read()
- except URLError as e:
- logger.error("Could not perform DELETE to url: %s" % url)
- logger.error("error was %s" % str(e))
- return None
+ #Get status of the resources as well
+ for resource in resources_list:
+ # Only get the status for the OS::Nova::Server
+ if resource["resource_type"] == "OS::Nova::Server" and "COMPLETE" in resource["status"]:
-def do_delete(url, data=""):
- """
- Performs a simple REST DELETE call
- :param url: full url to use for Delete
- :param data: (optional) url encoded data
- :return: string response from urllib2.urlopen(r, data).read() or None
- """
- try:
- request = urllib2.Request(url)
- request.add_header("Content-Type", "application/json")
- request.add_header("charset", "UTF-8")
- request.add_header("X-Auth-Token", _auth_token)
- request.get_method = lambda: 'DELETE'
+ status = conn.compute.get_server(resource["physical_resource_id"]).status
- if data == "":
- result = urllib2.urlopen(request)
+ # Add the key for the status of the physical status
+ resource["physical_status"] = status
else:
- result = urllib2.urlopen(request, data)
+ # Either it's not Nova or not yet completed
+ resource["physical_status"] = None
+
+ logger.debug("Also gotten the status")
+ logger.debug(resources_list)
- return result.read()
- except URLError as e:
- logger.error("Could not perform DELETE to url: %s" % url)
- logger.error("error was %s" % str(e))
- return None
+ return {"resources": resources_list}
diff --git a/common/static/js/wistar_utils.js b/common/static/js/wistar_utils.js
index 75a1116..9384ec1 100644
--- a/common/static/js/wistar_utils.js
+++ b/common/static/js/wistar_utils.js
@@ -61,6 +61,44 @@
alert('Could not perform request!');
});
}
+
+ // Similar to the manageDomain function, except for OpenStack instances
+ // Does not immediately change the page, unlike the domain one
+ // Only a refresh of the status will show if an instance has been turned off
+ function manageInstance(action, instanceId, topoId) {
+ var doc = jQuery(document.documentElement);
+ doc.css('cursor', 'progress');
+
+ var doc = jQuery(document.documentElement);
+ doc.css('cursor', 'progress');
+
+ if (action == "stop") {
+ if (typeof s != 'undefined') {
+ s.setBootState("down");
+ }
+
+ if (! confirm("This will power off the instance ungracefully!")) {
+ doc.css('cursor', '');
+ return false;
+ }
+ }
+ var url = '/ajax/manageInstance/';
+ var params = {
+ 'topologyId' : topoId,
+ 'instanceId' : instanceId,
+ 'action' : action
+ };
+ var post = jQuery.post(url, params, function(response) {
+ var content = jQuery(response);
+ jQuery('#deploymentStatus').empty().append(content);
+ });
+ post.fail(function() {
+ alert('Could not perform request!');
+ });
+ post.always(function() {
+ doc.css('cursor', '');
+ });
+ }
function manageDomain(action, domainId, topoId) {
var doc = jQuery(document.documentElement);
diff --git a/extras/install_wistar_ubuntu_16_pb.yml b/extras/install_wistar_ubuntu_16_pb.yml
index 0b44e41..2c7c95a 100644
--- a/extras/install_wistar_ubuntu_16_pb.yml
+++ b/extras/install_wistar_ubuntu_16_pb.yml
@@ -70,6 +70,7 @@
- mtools
- dosfstools
- openvswitch-switch
+ - python-openstacksdk
- name: Install Django
pip:
diff --git a/extras/install_wistar_ubuntu_18_pb.yml b/extras/install_wistar_ubuntu_18_pb.yml
new file mode 100644
index 0000000..36a0db4
--- /dev/null
+++ b/extras/install_wistar_ubuntu_18_pb.yml
@@ -0,0 +1,189 @@
+---
+#
+# Provisions all the required dependencies for Wistar on the local host
+# wistar_branch refers to the branch from git that you want to clone
+# this is useful if you'd like to follow the develop branch for example
+#
+
+- name: Provision Wistar
+ hosts: localhost
+ connection: local
+ become: true
+ vars:
+ wistar_branch: master
+
+ tasks:
+ - name: Update all packages to the latest version
+ apt:
+ upgrade: dist
+ update_cache: yes
+
+ - name: Install Junos-eznc dependancies
+ apt:
+ name: "{{ item }}"
+ state: present
+ with_items:
+ - build-essential
+ - libxml2-dev
+ - libxslt1-dev
+ - libz-dev
+ - libffi-dev
+ - libssl-dev
+ - python-dev
+ - git
+ - python-pip
+
+ - name: Install python-cryptography
+ pip:
+ name: cryptography
+ editable: false
+
+ - name: Install junos-eznc
+ pip:
+ name: junos-eznc
+ editable: false
+
+ - name: Install jxmlease
+ pip:
+ name: jxmlease
+ editable: false
+
+ - name: Install Wistar dependancies
+ apt:
+ name: "{{ item }}"
+ state: present
+ update_cache: true
+ with_items:
+ - qemu-kvm
+ - libvirt-bin
+ - socat
+ - python-pexpect
+ - python-libvirt
+ - python-yaml
+ - unzip
+ - bridge-utils
+ - python-numpy
+ - genisoimage
+ - python-netaddr
+ - python-markupsafe
+ - python-setuptools
+ - mtools
+ - dosfstools
+ - openvswitch-switch
+ - python-openstacksdk
+
+ - name: Install Django
+ pip:
+ name: django
+ version: 1.9.9
+ editable: false
+
+ - name: Install Python virtualBox
+ pip:
+ name: pyvbox
+ editable: false
+
+ - name: Create Wistar directory structure 1
+ file:
+ path: /opt/wistar
+ state: directory
+ - name: Create Wistar directory structure 2
+ file:
+ path: /opt/wistar/user_images
+ state: directory
+ - name: Create Wistar directory structure 3
+ file:
+ path: /opt/wistar/wistar-master
+ state: directory
+ - name: Create Wistar directory structure 4
+ file:
+ path: /opt/wistar/media
+ state: directory
+ - name: Create Wistar directory structure 5
+ file:
+ path: /opt/wistar/seeds
+ state: directory
+ - name: Create Wistar directory structure 6
+ file:
+ path: /opt/wistar/user_images/instances
+ state: directory
+
+ - name: Pull latest Wistar from Git
+ git:
+ repo: https://github.com/Juniper/wistar.git
+ dest: /opt/wistar/wistar-master/
+ version: "{{ wistar_branch }}"
+
+ - name: Create Wistar tables
+ command: /opt/wistar/wistar-master/manage.py migrate
+
+ - name: install apache2
+ apt:
+ name: "{{ item }}"
+ state: present
+ with_items:
+ - apache2
+ - libapache2-mod-wsgi
+
+ - name: enable the Apache2 module "wsgi"
+ apache2_module:
+ state: present
+ name: wsgi
+ notify: restart apache
+
+ - name: set permissions on wistar dir
+ file:
+ path: /opt/wistar
+ owner: www-data
+ group: www-data
+ state: directory
+ recurse: yes
+
+ - name: set permissions on wistar log
+ file:
+ path: /var/log/wistar.log
+ owner: www-data
+ group: www-data
+ state: touch
+
+ - name: set permissions on wistar errorlog
+ file:
+ path: /var/log/apache2/wistar.log
+ owner: www-data
+ group: www-data
+ state: touch
+
+ - name: set permissions on wistar accesslog
+ file:
+ path: /var/log/apache2/wistar_access.log
+ owner: www-data
+ group: www-data
+ state: touch
+
+ - name: copy wistar config file to apache
+ copy:
+ src: 999-wistar.conf
+ dest: /etc/apache2/sites-available/999-wistar.conf
+
+ - name: enable wistar site in apache
+ file:
+ src: /etc/apache2/sites-available/999-wistar.conf
+ dest: /etc/apache2/sites-enabled/999-wistar.conf
+ state: link
+ notify: restart apache
+
+ - name: add www-data to libvirt users
+ user:
+ name: www-data
+ groups: libvirt
+ append: yes
+
+ - name: Allow libvirtd group to modify ovs-vsctl
+ lineinfile:
+ dest: /etc/sudoers
+ state: present
+ line: '%libvirt ALL=NOPASSWD: /usr/bin/ovs-vsctl'
+
+ handlers:
+ - name: restart apache
+ service: name=apache2 state=restarted
diff --git a/wistar/configuration.py b/wistar/configuration.py
index 592e538..18004cc 100644
--- a/wistar/configuration.py
+++ b/wistar/configuration.py
@@ -66,6 +66,15 @@
# some version of openstack use '/dashboard', '/horizon', or '/'
openstack_horizon_url = "http://10.10.10.10"
+# Selects the method to authenticate against openstack
+# can be "manual" or "auto"
+# Manual uses the configuration data in this configuration file
+# Auto uses the openstacksdk to search for authenication details in clouds.yaml files
+# On "auto", it uses the openstack_cloud variable to select to cloud in the clouds.yaml
+# https://docs.openstack.org/openstacksdk/latest/user/guides/connect_from_config.html
+openstack_mode = "auto"
+openstack_cloud = "openstack"
+
# authentication parameters
openstack_host = '10.10.10.10'
openstack_user = 'admin'
@@ -73,6 +82,8 @@
# project under which to place all topologies/stacks
openstack_project = 'admin'
+# The region for the cloud
+openstack_region = "RegionOne"
openstack_mgmt_network = 'wistar_mgmt'
openstack_external_network = 'public-br-eth0'
|