diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 00000000..8c756e76 --- /dev/null +++ b/.travis.yml @@ -0,0 +1,11 @@ +dist: xenial +sudo: false +language: python +python: + - "3.5" +install: + - sudo apt-get install -y tox + +script: + - tox -e py35 -- test.test_hpe_plugin_v2 + - tox -e pep8 diff --git a/Dockerfile b/Dockerfile index e8c86966..507c3c38 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM alpine:edge +FROM alpine:3.8 ENV DEBIAN_FRONTEND=noninteractive ENV PYTHONPATH=${HOME}/python-hpedockerplugin:/root/python-hpedockerplugin @@ -20,6 +20,7 @@ RUN apk add --no-cache --update \ sg3_utils\ eudev \ libssl1.0 \ + nfs-utils \ sudo \ && apk update \ && apk upgrade \ @@ -67,6 +68,13 @@ RUN mkdir -p /opt/hpe/data RUN chmod u+x /usr/bin/iscsiadm RUN chmod u+x /usr/bin/cleanup.sh +# Patch the os_brick, twisted modules + +COPY ./patch_os_bricks/linuxscsi.py /usr/lib/python3.6/site-packages/os_brick-1.13.1-py3.6.egg/os_brick/initiator/linuxscsi.py +COPY ./patch_os_bricks/rootwrap.py /usr/lib/python3.6/site-packages/os_brick-1.13.1-py3.6.egg/os_brick/privileged/rootwrap.py +COPY ./oslo/comm.py /usr/lib/python3.6/site-packages/oslo.privsep-1.29.0-py3.6.egg/oslo_privsep/comm.py +COPY ./patch_os_bricks/compat.py /usr/lib/python3.6/site-packages/Twisted-18.7.0rc1-py3.6-linux-x86_64.egg/twisted/python/compat.py + WORKDIR /python-hpedockerplugin ENTRYPOINT ["/bin/sh", "-c", "./plugin-start"] @@ -83,4 +91,3 @@ RUN sed -i \ ENV TAG $TAG ENV GIT_SHA $GIT_SHA ENV BUILD_DATE $BUILD_DATE - diff --git a/__init__.py b/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/buildDockerPlugin.sh b/buildDockerPlugin.sh index 149716b2..111f28a2 100755 --- a/buildDockerPlugin.sh +++ b/buildDockerPlugin.sh @@ -92,18 +92,10 @@ rc=$? exit $rc fi -# Patch the os-bricks code - -sudo rm ./v2plugin/rootfs/usr/lib/python3.6/site-packages/os_brick-1.13.1-py3.6.egg/os_brick/initiator/linuxscsi.pyc -sudo rm ./v2plugin/rootfs/usr/lib/python3.6/site-packages/os_brick-1.13.1-py3.6.egg/os_brick/privileged/rootwrap.pyc -sudo cp ./patch_os_bricks/linuxscsi.py ./v2plugin/rootfs/usr/lib/python3.6/site-packages/os_brick-1.13.1-py3.6.egg/os_brick/initiator/linuxscsi.py -sudo cp ./patch_os_bricks/rootwrap.py ./v2plugin/rootfs/usr/lib/python3.6/site-packages/os_brick-1.13.1-py3.6.egg/os_brick/privileged/rootwrap.py -sudo cp ./patch_os_bricks/compat.py ./v2plugin/rootfs/usr/lib/python3.6/site-packages/Twisted-18.7.0rc1-py3.6-linux-x86_64.egg/twisted/python/compat.py -sudo cp ./oslo/comm.py ./v2plugin/rootfs/usr/lib/python3.6/site-packages/oslo.privsep-1.29.0-py3.6.egg/oslo_privsep/comm.py - - -# end of patch for os-bricks # minor modification to remove the .git folder from getting packaged # into v2plugin folder rm -rf ./v2plugin/rootfs/python-hpedockerplugin/.git +rm -rf ./v2plugin/rootfs/python-hpedockerplugin/.tox docker plugin create ${pluginName} v2plugin +rm -rf v2plugin +rm -f unit_tests_run.log diff --git a/config/create_help.txt b/config/create_help.txt index cc1c3789..84355811 100644 --- a/config/create_help.txt +++ b/config/create_help.txt @@ -1,68 +1,111 @@ -HPE 3PAR volume plug-in for Docker: Create help -Create a volume in HPE 3PAR or create a clone of a docker volume or create a snapshot of a docker volume using HPE 3PAR volume plug-in for Docker. -Default Options: --o mountConflictDelay=x x is the number of seconds to delay a mount request when there is a conflict(default is 30) --o size=x x is a size of a docker volume to be created, default value of x is 100 (in GiB) --o provisioning=x x is a provision type of a volume to be created, valid values are thin,dedup,full. Default value is thin. +=============================================== +HPE 3PAR/PRIMERA Volume Plug-in For Docker: Create Help +=============================================== +Create a volume in HPE 3PAR/PRIMERA or create a clone of a Docker volume or create a snapshot of a Docker volume using HPE 3PAR/PRIMERA volume plug-in for Docker. + +--------------------------------- Create Volume Options: - -o cpg=x x is the Usr CPG used for provisioning the volume +--------------------------------- + -o cpg=x x is the User CPG used for provisioning the volume -o snapcpg=x x is the Snap CPG used for provisioning the snapshots of the volume - -o size=x x is a size of a docker volume to be created, deault value of x is 100 (in GiB) - -o provisioning=x x is a provision type of a volume to be created, valid values are thin,dedup,full. Default value is thin. - -o compression=x x is a boolean value, hence x can have true or false. To create a compressed volume, minimum size of a - volume should to be 16 GiB. It also requires 3PAR OS version 3.3.1 or more and underlying disks should be SSD. - -o flash-cache=x x is a boolean value, hence x can have true or false. x specifies whether flash cache should be used or not. - Valid vaues are true or false. - -o qos-name=x x is name of existing VVset on 3PAR on which QoS rules are applied. - -o fsOwner=x x is the user id and group id that should own the root directory of the filesystem, in the form of + -o size=x x is the size of a Docker volume to be created. Default value is 100 (in GiB) + -o provisioning=x x is the provision type of a volume to be created. Valid values are thin, dedup, full with thin as default. + PRIMERA array does not support full provisioning. + -o compression=x x is a boolean with true and false as valid values. To create a compressed volume, minimum size of a + volume should be 16 GiB. It also requires OS version 3.3.1 or greater for 3PAR and OS version 4.0.0 or greater for PRIMERA. + Also, underlying disks should be SSD. Default value is false. + -o flash-cache=x x is a boolean with true and false as valid values. x specifies whether flash cache should be used or not. + Default value is false. + -o qos-name=x x is name of an existing 3PAR/PRIMERA vv-set on which QoS rules are set. + -o fsOwner=x x is the user id and group id that should own the root directory of the filesystem in the form of [userId:groupId] - -o fsMode=x x is 1 to 4 octal digits that represent the file mode to be applied to the root directory of the + -o fsMode=x x is 1 to 4 octal digits that represent the file mode to be applied to the root directory of the filesystem - -o replicationGroup=x x is name of the 3PAR replication group to which the newly created volume is added. If the replication - group doesn't exist on 3PAR array then it is created. Configuration parameter, 'replication_device', + -o backend=x x is the name of the backend identified by square brackets in hpe.conf, and the volume creation happens on this identified + backend. Default value of this option is DEFAULT when not specified. This can be used in combination with other volume + create options along with -o importVol. + Backend represents a group of configuration parameters for a particular array + Documentation: https://github.com/hpe-storage/python-hpedockerplugin/blob/plugin_v2/docs/multi-array-feature.md +-o mountConflictDelay=x x is the number of seconds to delay a mount request when there is a conflict. Default value is 30 seconds. + + +--------------------------------- +Create Replicated Volume Options: +--------------------------------- + -o size=x x is the size of a Docker volume to be created. Default value is 100 (in GiB) + -o provisioning=x x is the provision type of a volume to be created. Valid values are thin, dedup, full with thin as default. + PRIMERA array does not support full provisioning. + -o compression=x x is a boolean with true and false as valid values. To create a compressed volume, minimum size of a + volume should be 16 GiB. It also requires OS version 3.3.1 or greater for 3PAR and OS version 4.0.0 or greater for PRIMERA. + Also, underlying disks should be SSD. Default value is false. + -o backend=x x is the name of the backend identified by square brackets in hpe.conf, and the volume creation happens on this identified + backend. Default value of this option is DEFAULT when not specified. This can be used in combination with other volume + create options along with -o importVol + Backend represents a group of configuration parameters for a particular 3PAR Array + Documentation: https://github.com/hpe-storage/python-hpedockerplugin/blob/plugin_v2/docs/multi-array-feature.md + -o mountConflictDelay=x x is the number of seconds to delay a mount request when there is a conflict. Default value is 30 seconds. + -o replicationGroup=x x is name of the 3PAR/PRIMERA replication group to which the newly created volume is added. If the replication + group doesn't exist on 3PAR/PRIMERA array then it is created. Configuration parameter, 'replication_device', must be defined in the hpe.conf file in conjunction with this option. Not doing so results in rejection of the volume creation request. Another configuration parameter, 'quorum_witness_ip', if defined, results into Peer Persistence based replication configuration. Absence of 'quorum_witness_ip' results into - Active/Passive based replication configuration. - + Active/Passive based replication configuration. PRIMERA does not support Async mode of replication +--------------------------------- Create Clone Options: - -o cloneOf=x x is the name of docker volume (source volume) of which clone to be created. - -o size=x x is the size of cloned volume. x should be greater than or equal to size of a source volume. - -o cpg=x x is the Usr CPG used for provisioning the volume +--------------------------------- + -o cloneOf=x x is the name of the source Docker volume of which the clone is to be created. + -o size=x x is the size of cloned volume. It should be greater than or equal to the size of the source volume. + -o cpg=x x is the User CPG used for provisioning the volume -o snapcpg=x x is the Snap CPG used for provisioning the snapshots of the volume + -o mountConflictDelay=x This option is ignored for this operation. It is present so that it can work with orchestrators like K8S and Openshift. + - +--------------------------------- Create Snapshot Options: - -o virtualCopyOf=x x is the name of a docker volume for which snapshot/virtual copy is to be created. - -o retentionHours=x x is the number of hours a snapshot will be retained. Snapshot will be retained for x hours from the time of creation. - Snapshot can not be deleted during retention period. - -o expirationHours=x x is the number of hours after which snapshot will be removed from 3PAR. If both retentionHours and expirationHours - are used then expirationHours must be greater than or equal to retentionHours. +--------------------------------- + -o virtualCopyOf=x x is the name of the source Docker volume whose snapshot/virtual copy is to be created. + -o retentionHours=x x is the number of hours the snapshot will be retained. Retention time begins from the time of snapshot creation. + During this time the snapshot cannot be removed. + -o expirationHours=x x is the number of hours after which snapshot is removed from 3PAR/PRIMERA. If both retentionHours and expirationHours + are specified then expirationHours must be greater than or equal to retentionHours. + -o mountConflictDelay=x This option is ignored for this operation. It is present so that it can work with orchestrators like K8S and Openshift. + -o size=x This option is ignored for this operation. It is present so that it can work with orchestrators like K8S and Openshift. + +--------------------------------- Import Volume Options: - -o importVol=x x is the name of 3PAR volume or snapshot which needs to be imported. Volume or snapshot which needs to be imported - should not be attached to any of the host. +--------------------------------- + -o importVol=x x is the name of 3PAR/PRIMERA volume or snapshot that needs to be imported. As a prerequisite, the volume or snapshot being imported + must not be in attached/mounted state. + -o backend=x x is the name of the backend identified by square brackets in hpe.conf, and the volume creation happens on this identified + backend. Default value of this option is DEFAULT when not specified. This can be used in combination with other volume + create options along with -o importVol + Backend represents a group of configuration parameters for a particular Array + Documentation: https://github.com/hpe-storage/python-hpedockerplugin/blob/plugin_v2/docs/multi-array-feature.md + -o mountConflictDelay=x This option is ignored for this operation. It is present so that it can work with orchestrators like K8S and Openshift. +--------------------------------- Create Snapshot Schedule: +--------------------------------- -o virtualCopyOf=x This option is mandatory. x is the name of the volume for which snapshot schedule has to be created. - -o scheduleFrequency=x This option is mandatory. x is the string that indicates the snapshot schedule frequency. - This string will contain 5 values which are seperated by space. + -o scheduleFrequency=x This option is mandatory. x is the string which indicates the snapshot schedule frequency. + This string will contain 5 values which are separated by space. Example x can be replaced with "5 * * * *" First field in the string is an Integer and represents the number of minutes that should be passed the scheduled - clock hour to exucute the scheduled task. - Second field in the string is an Integer and represents hour at which task needs to be executed. - User can specify a valid range ex:2-5. + clock hour to execute the scheduled task. + Second field in the string is an integer and represents hour at which task needs to be executed. + User can specify a valid range like 2-5. Third field in the string is an Integer and represents day of the month on which scheduled task has to be executed. - User can specify a valid range ex:12-15. + User can specify a valid range like 12-15. Fourth field in the string indicates month in which the task needs to be executed. - User can specify a valid range ex:3-5. + User can specify a valid range like 3-5. Fifth field in the string indicates day of a week on which task should be executed. - User can specify a valid range ex:0-4. + User can specify a valid range like 0-4. x has to be specified in double quotes. Valid values for these fields are: Field Allowed Values ----- -------------- @@ -71,13 +114,35 @@ Create Snapshot Schedule: day-of-month * or 1-31 month * or 1-12 day-of-week * or 0-6 (0 is Sunday) --o scheduleName=x This option is mandatory. x is a string which indicates name for the schedule on 3PAR. --o retentionHours=x This option is not mandatory option. x is an integer, indicates number of hours this snapshot will be retained. --o snaphotPrefix=x This option is mandatory. x is prefix string for the scheduled snapshots which will get created on 3PAR. - We recommend to use 3 letter string. If prefix is abc then name of the snapshot which gets created on the 3PAR + + --------- + Examples: + --------- + 1. To create snapshot at midnight and at noon, specify x as "0 0,12 * * *" + 2. To create snapshot on 5th, 15th and 25th of month, specify x as "0 * 5,15,25 * *" + 3. To create snapshot every quarter, specify x as "0 * * 3,6,9,12 *" + 4. To create snapshot on Monday, Wednesday and Friday, specify x as "0 * * * 1,3,5" +-o scheduleName=x This option is mandatory. x is a string which indicates name for the schedule on 3PAR/PRIMERA. + Note: When this parameter is passed with string 'auto', then the scheduleName is + auto generated with a timestamp. This is to support Kubernetes environment where + this parameter can be used as a storage class option. +-o snapshotPrefix=x This option is mandatory. x is prefix string for the scheduled snapshots which will get created on 3PAR/PRIMERA. + It is recommended to use 3 letter string. If prefix is abc then name of the snapshot which gets created on the 3PAR/PRIMERA will be in the format abc.@y@@m@@d@@H@@M@@S@ --o expHrs=x This option is not mandatory option. x is an integer, indicates number of hours after which snapshot created via - snapshot schedule will be deleted from 3PAR. --o retHrs=x This option is not mandetory option. x is an integer, indicates number of hours for which snapshot created via +-o expHrs=x This option is not mandatory. x is an integer which indicates number of hours after which snapshot created via + snapshot schedule will be deleted from 3PAR/PRIMERA. +-o retHrs=x This option is not mandatory. x is an integer which indicates number of hours for which snapshot created via snapshot schedule will be retained. +-o mountConflictDelay=x This option is ignored for this operation. It is present so that it can work with orchestrators like K8S and Openshift. +-o size=x This option is ignored for this operation. It is present so that it can work with orchestrators like K8S and Openshift. + + +--------------------------------- +Display available backends: +--------------------------------- + -o help=backends This option displays list of available backends along with their status +---------------------------------- +Display File Persona related help: +---------------------------------- + -o help -o filePersona These options when used together display help on File Persona \ No newline at end of file diff --git a/config/create_share_help.txt b/config/create_share_help.txt new file mode 100644 index 00000000..355b4cd6 --- /dev/null +++ b/config/create_share_help.txt @@ -0,0 +1,47 @@ + + +=============================================== +HPE 3PAR Share Plug-in For Docker: Create Help +=============================================== +Create a share in HPE 3PAR using HPE 3PAR volume plug-in for Docker. + +-o filePersona Presence of this flag allows the File Persona driver to process the request +-o cpg=x x specifies the cpg to be used for the share. This parameter can be used with or without + ‘fpg’ option. When used with ‘fpg’, the FPG is created with the specified name if it + does not exist. If it does exist, then share is created under it. + When used without ‘fpg’ option, default FPG under the specified CPG is selected for share + creation. If default FPG does not exist, a new default FPG is created under which the + share is created. +-o fpg=x x is the name of the file provisioning group (FPG). This option must be specified when user wants + to use a non-default FPG or a legacy FPG. The FPG may or may not be an existing one. + For a non-existing FPG x, a new FPG is created using the CPG that is either explicitly + specified with '-o cpg' option or configured in hpe.conf. + If FPG exists, be it a legacy FPG or Docker managed FPG, share is simply created under it. + In case this option is not specified, then a default FPG is created with size 16TiB if it + doesn't exist. Naming convention for default FPG is DockerFpg_n where n is an integer + starting from 0. +-o size=x x is the size of the share in GiB. By default, it is 1024 GiB. +-o help -o filePersona When used together, these options display this help content +-o help=backends -o filePersona When used together, these options display status of the backends configured for File Persona +-o fsOwner=x x is the user id and group id that should own the root directory of nfs file share in the form of + [userId:groupId]. Administrator also need to make sure that local user and local group with these + ids are present on 3PAR before trying to mount the created share. + For such shares which has userId and groupId specified, mount will succeed only if users and + group with specified ids are present on 3PAR. +-o fsMode=x x is 1 to 4 octal digits that represent the file mode to be applied to the root directory of the + file system. Ex: fsMode="0754" , Here 0 before number is mandatory. This ensures specified user + of fsOwner will have rwx permissions, group will have rx permissions and others will have read + permissions. + x can also be ACL string. This also represents ACL permissions that are allowed on share directory. + fsMode contains list of ACEs. Use Commas to separate ACEs. Each ACE here contains 3 values named, + type, flag and permissions. These 3 values are separated by ':'. First ACE represents Owner, + Second ACE represents Group and third ACE represents EveryOne. These has to be represented in + order. Ex: A:fd:rwa,A:g:rwaxdnNcCoy,A:fdS:DtnNcy + type field can take only one of these values [A,D,U,L] + flag field can take one or more of these values [f,d,p,i,S,F,g] + permissions field can take one or more of these values [r,w,a,x,d,D,t,T,n,N,c,C,o,y] + Please refer 3PAR cli user guide more details on meaning of each flag. + Note: For fsMode values user can specify either of mode bits or ACL string. Both can not be used + simultaneously. While using fsMode it is mandatory to specify fsOwner. If Only fsMode is used + User will not be able to mount the share. This is because permissions and ownership changes are + done during the first mount call. diff --git a/config/setupcfg.py b/config/setupcfg.py index 8d1e9d85..d32e1795 100644 --- a/config/setupcfg.py +++ b/config/setupcfg.py @@ -15,12 +15,17 @@ from hpedockerplugin import configuration as conf from hpedockerplugin.hpe import hpe3par_opts as plugin_opts from oslo_log import log as logging +import logging as log from oslo_config import cfg +from logging.handlers import RotatingFileHandler host_opts = [ cfg.StrOpt('hpedockerplugin_driver', default='hpe.hpe_lefthand_iscsi.HPELeftHandISCSIDriver', help='HPE Docker Plugin Driver to use for volume creation'), + cfg.StrOpt('mount_prefix', + default=None, + help='Mount prefix for volume mount'), cfg.StrOpt('host_etcd_ip_address', default='0.0.0.0', help='Host IP Address to use for etcd communication'), @@ -56,8 +61,7 @@ cfg.StrOpt('ssh_hosts_key_file', default='/root/.ssh/ssh_known_hosts', help='File containing SSH host keys for the systems with which ' - 'the plugin needs to communicate. OPTIONAL: ' - 'Default=$state_path/ssh_known_hosts'), + 'the plugin needs to communicate'), ] CONF = cfg.CONF @@ -70,6 +74,40 @@ def setup_logging(name, level): logging.setup(CONF, name) LOG = logging.getLogger(None) + # Add option to do Log Rotation + handler = RotatingFileHandler('/etc/hpedockerplugin/3pardcv.log', + maxBytes=10000000, backupCount=100) + formatter = log.Formatter('%(asctime)-12s [%(levelname)s] ' + '%(name)s [%(thread)d] ' + '%(threadName)s %(message)s') + + handler.setFormatter(formatter) + LOG.logger.addHandler(handler) + + if level == 'INFO': + LOG.logger.setLevel(logging.INFO) + if level == 'DEBUG': + LOG.logger.setLevel(logging.DEBUG) + if level == 'WARNING': + LOG.logger.setLevel(logging.WARNING) + if level == 'ERROR': + LOG.logger.setLevel(logging.ERROR) + + +def setup_logging_for_ut(name, level): + + logging.setup(CONF, name) + LOG = logging.getLogger(None) + + # Add option to do Log Rotation + handler = log.FileHandler('./3pardcv.log') + formatter = log.Formatter('%(asctime)-12s [%(levelname)s] ' + '%(name)s [%(thread)d] ' + '%(threadName)s %(message)s') + + handler.setFormatter(formatter) + LOG.logger.addHandler(handler) + if level == 'INFO': LOG.logger.setLevel(logging.INFO) if level == 'DEBUG': @@ -80,6 +118,12 @@ def setup_logging(name, level): LOG.logger.setLevel(logging.ERROR) +def getdefaultconfig(configfile): + CONF(configfile, project='hpedockerplugin', version='1.0.0') + configuration = conf.Configuration(host_opts, config_group='DEFAULT') + return configuration + + def get_host_config(configfile): CONF(configfile, project='hpedockerplugin', version='1.0.0') return conf.Configuration(host_opts) diff --git a/docs/active-passive-based-replication.md b/docs/active-passive-based-replication.md new file mode 100644 index 00000000..c1fb6ba6 --- /dev/null +++ b/docs/active-passive-based-replication.md @@ -0,0 +1,171 @@ +# Active/Passive Based Replication # + +In Active/Passive based replication, only one array is in active state +at any point of time serving the VLUNs of a given replicated volume. + +When a remote copy group (RCG) is failed over manually via 3PAR CLI to the +secondary array, the secondary array becomes active. However, the VLUNs +of the failed over volumes are still not exported by the secondary array +to the host. In order to trigger that, the container/POD running on the +host needs to be restarted. + +## Configuring replication enabled backend +**For FC Host** +```sh +host_etcd_port_number= +hpe3par_username= +hpe3par_password= +hpe3par_cpg= +hpedockerplugin_driver=hpedockerplugin.hpe.hpe_3par_fc.HPE3PARFCDriver +logging=DEBUG +san_ip= +san_login= +san_password= +host_etcd_ip_address=[:PORT1[,IP2[:PORT2]][,IP3[:PORT3]]...] +hpe3par_api_url=https://:8080/api/v1 +replication_device = backend_id:, + replication_mode:, + cpg_map::, + snap_cpg_map:: + hpe3par_api_url:https://:8080/api/v1, + hpe3par_username:<3PAR-Username>, + hpe3par_password:<3PAR-Password>, + san_ip:<3PAR-SAN-IP>, + san_login:<3PAR-SAN-Username>, + san_password:<3PAR-SAN-Password> +``` + +*Note*: + +1. In case of asynchronous replication mode, *sync_period* field can optionally be +defined as part of *replication_device* entry and it should be between range 300 +and 31622400 seconds. If not defined, it defaults to 900 seconds. +2. *cpg_map* and *snap_cpg_map* in *replication_device* section are mandatory. If *snap_cpg_map* is not mentioned then it will be same as *cpg_map* +3. If password is encrypted for primary array, it must be encrypted for secondary array +as well using the same *pass-phrase* + + +**For ISCSI Host** +```sh +host_etcd_port_number= +hpe3par_username= +hpe3par_password= +hpe3par_cpg= +hpedockerplugin_driver=hpedockerplugin.hpe.hpe_3par_iscsi.HPE3PARISCSIDriver +logging=DEBUG +san_ip= +san_login= +san_password= +host_etcd_ip_address=[:PORT1[,IP2[:PORT2]][,IP3[:PORT3]]...] +hpe3par_api_url=https://:8080/api/v1 +hpe3par_iscsi_ips=[,ISCSI_IP2,ISCSI_IP3...] +replication_device=backend_id:, +replication_device = backend_id:, + replication_mode:, + cpg_map::, + snap_cpg_map:: + hpe3par_api_url:https://:8080/api/v1, + hpe3par_username:<3PAR-Username>, + hpe3par_password:<3PAR-Password>, + san_ip:<3PAR-SAN-IP>, + san_login:<3PAR-SAN-Username>, + san_password:<3PAR-SAN-Password> + hpe3par_iscsi_ips=[;ISCSI_IP2;ISCSI_IP3...] +``` +*Note*: + +1. *cpg_map* and *snap_cpg_map* in *replication_device* section are mandatory. If *snap_cpg_map* is not mentioned then it will be same as *cpg_map* +2. *hpe3par_iscsi_ips* MUST be defined upfront for both source and target arrays. +3. *hpe3par_iscsi_ips* can be a single ISCSI IP or a list of ISCSI IPs delimited by +semi-colon. Delimiter for this field is applicable for *replication_device* section ONLY. +4. If password is encrypted for primary array, it MUST be encrypted for secondary array +as well using the same *pass-phrase*. +5. In case of asynchronous replication mode, *sync_period* field can optionally be +defined as part of *replication_device* entry and it should be between range 300 +and 31622400 seconds. If not defined, it defaults to 900 seconds. + + +## Managing Replicated Volumes ### +### Create replicated volume ### +This command allows creation of replicated volume along with RCG creation if the RCG +does not exist on the array. Newly created volume is then added to the RCG. +Existing RCG name can be used to add multiple newly created volumes to it. +```sh +$ docker volume create -d hpe --name -o replicationGroup=<3PAR_RCG_Name> [Options...] +``` +where, +- *replicationGroup*: Name of a new or existing replication copy group on 3PAR array + +One or more following *Options* can be specified additionally: +1. *size:* Size of volume in GBs +2. *provisioning:* Provision type of a volume to be created. +Valid values are thin, dedup, full with thin as default. +3. *backend:* Name of the backend to be used for creation of the volume. If not +specified, "DEFAULT" is used providied it is initialized successfully. +4. *mountConflictDelay:* Waiting period in seconds to be used during mount operation +of the volume being created. This happens when this volume is mounted on say Node1 and +Node2 wants to mount it. In such a case, Node2 will wait for *mountConflictDelay* +seconds for Node1 to unmount the volume. If even after this wait, Node1 doesn't unmount +the volume, then Node2 forcefully removes VLUNs exported to Node1 and the goes ahead +with the mount process. +5. *compression:* This flag specifies if the volume is a compressed volume. Allowed +values are *True* and *False*. + +#### Example #### + +**Create a replicated volume having size 1GB with a non-existing RCG using backend "ActivePassiceRepBackend"** +```sh +$ docker volume create -d hpe --name Test_RCG_Vol -o replicationGroup=Test_RCG -o size=1 -o backend=ActivePassiceRepBackend +``` +This will create volume Test_RCG_Vol along with TEST_RCG remote copy group. The volume +will then be added to the TEST_RCG. +Please note that in case of failure during the operation at any stage, previous actions +are rolled back. +E.g. if for some reason, volume Test_RCG_Vol could not be added to Test_RCG, the volume +is removed from the array. + + +### Failover a remote copy group ### + +There is no single Docker command or option to support failover of a RCG. Instead, following +steps must be carried out in order to do it: +1. On the host, the container using the replicated volume must be stopped or exited if it is running. +This triggers unmount of the volume(s) from the primary array. + +2. On the primary array, stop the remote copy group manually: +```sh +$ stoprcopygroup +``` + +3. On the secondary array, execute *failover* command: +```sh +$ setrcopygroup failover +``` + +4. Restart the container. This time the VLUNs would be served by the failed-over or secondary array + +### Failback workflow for Active/Passive based replication ### +There is no single Docker command or option to support failback of a RCG. Instead, +following steps must be carried out in order to do it: +1. On the host, the container using the replicated volume must be stopped or exited if it is running. +This triggers unmount of the volume(s) from the failed-over or secondary array. + +2. On the secondary array, execute *recover* and *restore* commands: +```sh +$ setrcopygroup recover +$ setrcopygroup restore +``` + +3. Restart the container so that the primary array exports VLUNs to the host this time. + + +### Delete replicated volume ### +```sh +$ docker volume rm +``` +This command allows the user to delete a replicated volume. If this was the last +volume present in RCG then the RCG is also removed from the backend. + + +**See also:** +[Peer Persistence Based Replication](peer-persistence-based-replication.md) \ No newline at end of file diff --git a/docs/configuring_docker_ee_2_with_volume_plugin.md b/docs/configuring_docker_ee_2_with_volume_plugin.md new file mode 100644 index 00000000..648fc9a5 --- /dev/null +++ b/docs/configuring_docker_ee_2_with_volume_plugin.md @@ -0,0 +1,105 @@ +## Configuring HPE 3PAR Docker Volume Plugin for Docker EE 2.0 + +### Install OS (Ubuntu or CentOs) on all the nodes. + +Follow the steps to install docker Engine (EE 2.0) on all the nodes. + +### Install and enable containerized plugin on all the nodes. + Use the latest hpe.conf file and docker-compose.yml (Sample docker-compose.yml below). + +### Install UCP on master. (https://docs.docker.com/ee/ucp/admin/install/) + a. Pull the latest version of UCP + docker image pull docker/ucp:3.0.5 + b. Install UCP + +``` +docker container run --rm -it --name ucp \ + -v /var/run/docker.sock:/var/run/docker.sock \ + docker/ucp:3.0.5 install \ + --host-address --pod-cidr < >\ + --interactive +``` + + Example:- + + `docker container run --rm -it --name ucp -v /var/run/docker.sock:/var/run/docker.sock docker/ucp:3.0.5 install \ + --host-address 192.168.68.34 --pod-cidr 192.167.0.0/16 --interactive` + +Admin Username: {Set the user name} +Admin Password: {Set the password} + Confirm Admin Password: {Set the password} + Additional aliases: {Press Enter OR specify additional aliases if required } + Once the installation is complete ...It will display the login url + +- `mkdir -p /etc/kubernetes` +- `cp /var/lib/docker/volumes/ucp-node-certs/_data/kubelet.conf /etc/kubernetes/admin.conf` + +- Modify /etc/kubernetes/admin.conf with correct certificate-authority, server, client-certificate, client-key + +Follow all the steps to install dory/doryd on master node. + +### OPTIONAL if kubectl client is required). +``` + # Set the Kubernetes version as found in the UCP Dashboard or API + k8sversion=v1.8.11 + # Get the kubectl binary. + curl -LO https://storage.googleapis.com/kubernetes-release/release/$k8sversion/bin/linux/amd64/kubectl + # Make the kubectl binary executable. + chmod +x ./kubectl + # Move the kubectl executable to /usr/local/bin. + sudo mv ./kubectl /usr/local/bin/kubectl + +export KUBERNETES_SERVICE_HOST=192.168.68.41 +export KUBERNETES_SERVICE_PORT=443 +``` + +### Sample hpe.conf + +``` +[DEFAULT] +ssh_hosts_key_file = /root/.ssh/known_hosts +logging = DEBUG +hpe3par_debug = True +suppress_requests_ssl_warnings = False +host_etcd_ip_address = 192.168.68.41 +host_etcd_port_number = 2379 +hpedockerplugin_driver = hpedockerplugin.hpe.hpe_3par_iscsi.HPE3PARISCSIDriver +hpe3par_api_url = https://192.168.67.7:8080/api/v1 +hpe3par_username = 3paradm +hpe3par_password = 3pardata +san_login = 3paradm +san_ip = 192.168.67.7 +san_password = 3pardata +hpe3par_cpg = FC_r6 +hpe3par_snapcpg = FC_r6 +hpe3par_iscsi_ips = 192.168.68.201, 192.168.68.203 +mount_prefix = /var/lib/kubelet/plugins/hpe.com/3par/mounts/ +hpe3par_iscsi_chap_enabled = True +#use_multipath = True +#enforce_multipath = True +mount_conflict_delay = 30 +``` + + +### Sample docker-compose.yml +``` +hpedockerplugin: + container_name: legacy_plugin + image: dockerciuser/legacyvolumeplugin:plugin_v2 + net: host + privileged: true + volumes: + - /dev:/dev + - /run/lock:/run/lock + - /var/lib:/var/lib + - /var/run/docker/plugins:/var/run/docker/plugins:rw + - /etc:/etc + - /root/.ssh:/root/.ssh + - /sys:/sys + - /root/plugin/certs:/root/plugin/certs + - /sbin/iscsiadm:/sbin/ia + - /lib/modules:/lib/modules + - /lib/x86_64-linux-gnu:/lib64 + - /var/run/docker.sock:/var/run/docker.sock + - /var/lib/kubelet/plugins/hpe.com/3par/mounts/:/var/lib/kubelet/plugins/hpe.com/3par/mounts:rshared +``` diff --git a/docs/create_snapshot_schedule.md b/docs/create_snapshot_schedule.md index 46018e9c..25b90c32 100644 --- a/docs/create_snapshot_schedule.md +++ b/docs/create_snapshot_schedule.md @@ -29,7 +29,7 @@ docker command to create a snapshot schedule: ``` docker volume create -d hpe --name -o virtualCopyOf=volume1 -o scheduleFrequency="10 2 * * *" -o scheduleName=dailyOnceSchedule -o retentionHours=58 --o snaphotPrefix=pqr -o expHrs=5 -o retHrs=3 +-o snapshotPrefix=pqr -o expHrs=5 -o retHrs=3 ``` #### Note: diff --git a/docs/manual-install.md b/docs/manual-install.md index bb702d0e..ea35309f 100644 --- a/docs/manual-install.md +++ b/docs/manual-install.md @@ -65,6 +65,11 @@ sudo docker run -d -v /usr/share/ca-certificates/:/etc/ssl/certs -p 4001:4001 \ -initial-cluster-state new ``` +This has to be followed by the below command so that whenever Docker service restarts, etcd is also restarted: +docker update --restart always etcd + +It is highly recommended that existing etcd installations must also run the above update command to avoid manual restarts of etcd. + NOTE: If you want to save your etcd data you'll need to use the docker -v option to specify a local directory (or external volume) to save your data. In addition, if you are configuring an etcd cluster then you need to you "existing" instead of "new" if you want a specific node to rejoing an existing cluster. For more information on setting up an etcd cluster see: diff --git a/docs/openshift-3_10_installation.md b/docs/openshift-3_10_installation.md new file mode 100644 index 00000000..00a13cf3 --- /dev/null +++ b/docs/openshift-3_10_installation.md @@ -0,0 +1,118 @@ + +## Openshift Container Platform 3.10 installation + +###Prerequisites + +To install OpenShift Container Platform, you will need: + +* At least two physical or virtual RHEL 7+ machines, with fully qualified domain names (either real world or within a network) and password-less SSH access to each other + +### Instructions + +* Modify the /etc/profile file by adding the following: + ``` + export no_proxy="localhost,127.0.0.1,localaddress,.localdomain.com " + export http_proxy= http://:/ + export https_proxy= http://:/ + ``` + +* Modify the /etc/rhsm/rhsm.conf file by adding the following: + ``` + - an http proxy server to use (enter server FQDN) + proxy_hostname = + - port for http proxy server + proxy_port = + ``` +* Modify the /etc/yum.conf file: + ``` + proxy=http://:/ + ``` + +* Run the following before starting the server to make OpenShift Container Platform only run on one cor + ``` + $ export GOMAXPROCS=1 + ``` + +* As root on the target machines (both master and node), use subscription-manager to register the systems with Red Hat + + ``` + $ subscription-manager register + ``` + +* Pull the latest subscription data from RHSM: + ``` + $ subscription-manager refresh + ``` +* List the available subscriptions + ``` + $ subscription-manager list --available + ``` +* Find the pool ID that provides OpenShift Container Platform subscription and attach it. + ``` + $ subscription-manager attach --pool= + ``` +* Replace the string with the pool ID of the pool that provides OpenShift Container Platform. The pool ID is a long alphanumeric string + +* On both master and node, use subscription-manager to enable the repositories that are necessary in order to install OpenShift Container Platform + ``` + $ subscription-manager repos \ + --enable="rhel-7-server-rpms" \ + --enable="rhel-7-server-extras-rpms" \ + --enable="rhel-7-server-ose-3.10-rpms" \ + --enable="rhel-7-server-ansible-2.4-rpms" + + ``` + +* The installer for OpenShift Container Platform is provided by the openshift-ansible package. Install it using yum on both the master and the node + ``` + $ yum -y install wget git net-tools bind-utils iptables-services bridge-utils bash-completion kexec-tools sos psacct + $ yum -y update + $ yum -y install openshift-ansible + ``` + +* Also install the docker service on master and start it + ``` + $ yum install docker-1.13.1 + $ systemctl status docker + $ systemctl enable docker + $ systemctl start docker + ``` +* Set up password-less SSH access as this is required by the installer to gain access to the machines. On the master, run the following command. + ``` + $ ssh-keygen + ``` + Follow the prompts and just hit enter when asked for pass phrase. + + An easy way to distribute your SSH keys is by using a bash loop: + + ``` + $ for host in master.openshift.example.com \ + node.openshift.example.com; \ + do ssh-copy-id -i ~/.ssh/id_rsa.pub $host; \ + done + ``` + +* Create the inventory file as shown in the below link + + [Inventory Link](https://docs.openshift.com/container-platform/3.10/install/example_inventories.html#install-config-example-inventories) + + Example host file - [hosts.txt](https://github.com/hpe-storage/python-hpedockerplugin/files/2745186/hosts.txt) + + Edit the example inventory to use your host names, then save it to a file (default location is /etc/ansible/hosts) + +* Clone the openshift-ansible repository with release-3.10 branch checked out + + ``` + git clone https://github.com/openshift/openshift-ansible -b release-3.10 + ``` + +* Run the prerequisites.yml playbook using your inventory file: + ``` + $ ansible-playbook -i /openshift-ansible/playbooks/prerequisites.yml + + ``` +* Run the deploy_cluster.yml playbook using your inventory file: + ``` + $ ansible-playbook -i /openshift-ansible/playbooks/deploy_cluster.yml + ``` + diff --git a/docs/peer-persistence-based-replication.md b/docs/peer-persistence-based-replication.md new file mode 100644 index 00000000..49db1864 --- /dev/null +++ b/docs/peer-persistence-based-replication.md @@ -0,0 +1,161 @@ +# Peer Persistence based replication # +Peer Persistence feature of 3PAR provides a non-disruptive disaster recovery solution wherein in +case of disaster, the hosts automatically and seamlessly get connected to the secondary +array and start seeing the VLUNs which were earlier exported by the failed array. + +With Peer Persistence, when a Docker user mounts a replicated volume(s), HPE 3PAR Docker +Plugin creates VLUNs corresponding to the replicated volume(s) on BOTH +the arrays. However, they are served only by the active array with the other array being on +standby mode. When the corresponding RCG is switched over or primary array goes down, +the secondary array takes over and makes the VLUN(s) available. After swithover, the +active array goes in standby mode while the other array becomes active. + +**Pre-requisites** +1. Remote copy setup is up and running +2. Quorum Witness is running with primary and secondary arrays registered with it +3. Multipath daemon is running so that non-disruptive seamless mounting of VLUN(s) +on the host is possible. + + +## Configuring replication enabled backend +Compared to Active/Passive configuration, in Peer Persistence, the ONLY discriminator +is the presence of *quorum_witness_ip* sub-field under *replication_device* field - +rest of the fields are applicable. + +**For FC Host** + +```sh +host_etcd_port_number= +hpe3par_username= +hpe3par_password= +hpe3par_cpg= +hpedockerplugin_driver=hpedockerplugin.hpe.hpe_3par_fc.HPE3PARFCDriver +logging=DEBUG +san_ip= +san_login= +san_password= +host_etcd_ip_address=[:PORT1[,IP2[:PORT2]][,IP3[:PORT3]]...] +hpe3par_api_url=https://:8080/api/v1 +replication_device = backend_id:, + quorum_witness_ip:, + replication_mode:synchronous, + cpg_map::, + snap_cpg_map:: + hpe3par_api_url:https://:8080/api/v1, + hpe3par_username:<3PAR-Username>, + hpe3par_password:<3PAR-Password>, + san_ip:<3PAR-SAN-IP>, + san_login:<3PAR-SAN-Username>, + san_password:<3PAR-SAN-Password> +``` + +**Note:** + +1. *replication_mode* MUST be set to *synchronous* as a pre-requisite for Peer +Persistence based replication. +2. *cpg_map* and *snap_cpg_map* in *replication_device* section are mandatory. If *snap_cpg_map* is not mentioned then it will be same as *cpg_map* +3. If password is encrypted for primary array, it must be encrypted for secondary array +as well using the same *pass-phrase* + +**For ISCSI Host** +```sh +host_etcd_port_number= +hpe3par_username= +hpe3par_password= +hpe3par_cpg= +hpedockerplugin_driver=hpedockerplugin.hpe.hpe_3par_iscsi.HPE3PARISCSIDriver +logging=DEBUG +san_ip= +san_login= +san_password= +host_etcd_ip_address=[:PORT1[,IP2[:PORT2]][,IP3[:PORT3]]...] +hpe3par_api_url=https://:8080/api/v1 +hpe3par_iscsi_ips=[,ISCSI_IP2,ISCSI_IP3...] +replication_device=backend_id:, +replication_device = backend_id:, + quorum_witness_ip:, + replication_mode:synchronous, + cpg_map::, + snap_cpg_map:: + hpe3par_api_url:https://:8080/api/v1, + hpe3par_username:<3PAR-Username>, + hpe3par_password:<3PAR-Password>, + san_ip:<3PAR-SAN-IP>, + san_login:<3PAR-SAN-Username>, + san_password:<3PAR-SAN-Password> + hpe3par_iscsi_ips=[;ISCSI_IP2;ISCSI_IP3...] +``` +*Note*: + +1. *cpg_map* and *snap_cpg_map* in *replication_device* section are mandatory. If *snap_cpg_map* is not mentioned then it will be same as *cpg_map* +2. *hpe3par_iscsi_ips* MUST be defined upfront for both source and target arrays. +3. *hpe3par_iscsi_ips* can be a single ISCSI IP or a list of ISCSI IPs delimited by +semi-colon. Delimiter for this field is applicable for *replication_device* section ONLY. +4. If password is encrypted for primary array, it must be encrypted for secondary array +as well using the same *pass-phrase* +5. *replication_mode* MUST be set to *synchronous* as a pre-requisite for Peer +Persistence based replication. + +## Managing Replicated Volumes ### + +### Create replicated volume ### +This command allows creation of replicated volume along with RCG creation if the RCG +does not exist on the array. Newly created volume is then added to the RCG. +Existing RCG name can be used to add multiple newly created volumes to it. +```sh +docker volume create -d hpe --name -o replicationGroup=<3PAR_RCG_Name> [Options...] +``` +where, +- *replicationGroup*: Name of a new or existing replication copy group on 3PAR array + +One or more following *Options* can be specified additionally: +1. *size:* Size of volume in GBs +2. *provisioning:* Provision type of a volume to be created. +Valid values are thin, dedup, full with thin as default. +3. *backend:* Name of the backend to be used for creation of the volume. If not +specified, "DEFAULT" is used providied it is initialized successfully. +4. *mountConflictDelay:* Waiting period in seconds to be used during mount operation +of the volume being created. This happens when this volume is mounted on say Node1 and +Node2 wants to mount it. In such a case, Node2 will wait for *mountConflictDelay* +seconds for Node1 to unmount the volume. If even after this wait, Node1 doesn't unmount +the volume, then Node2 forcefully removes VLUNs exported to Node1 and the goes ahead +with the mount process. +5. *compression:* This flag specifies if the volume is a compressed volume. Allowed +values are *True* and *False*. + +#### Example #### + +**Create a replicated volume having size 1GB with a non-existing RCG using backend "ActivePassiceRepBackend"** +```sh +$ docker volume create -d hpe --name Test_RCG_Vol -o replicationGroup=Test_RCG -o size=1 -o backend=ActivePassiceRepBackend +``` +This will create volume Test_RCG_Vol along with TEST_RCG remote copy group. The volume +will then be added to the TEST_RCG. +Please note that in case of failure during the operation at any stage, previous actions +are rolled back. +E.g. if for some reason, volume Test_RCG_Vol could not be added to Test_RCG, the volume +is removed from the array. + + +### Switchover a remote copy group ### +There is no single Docker command or option to support switchover of a RCG from one +array to the other. Instead, following 3PAR command must be executed. + +```sh +$ setrcopygroup switchover +``` +where: +- *RCG_Name* is the name of remote copy group on the array where the above command is executed. + +Having done the switchover, multipath daemon takes care of seamless mounting of volume(s) from the +switched over array. + +### Delete replicated volume ### +This command allows user to delete a replicated volume. If this is the last volume +present in RCG then the RCG is also removed from the backend. +```sh +docker volume rm +``` + +**See also:** +[Active/Passive Based Replication](active-passive-based-replication.md) \ No newline at end of file diff --git a/docs/replication.md b/docs/replication.md new file mode 100644 index 00000000..38175677 --- /dev/null +++ b/docs/replication.md @@ -0,0 +1,49 @@ +# Replication: HPE 3PAR Docker Storage Plugin # + +This feature allows Docker users to create replicated volume(s) using +HPE 3PAR Storage Plugin. Docker CLI does not directly support +replication. HPE 3PAR Storage Plugin extends Docker's "volume create" +command interface via optional parameter in order to make it possible. + +HPE 3PAR Storage Plugin assumes that an already working 3PAR Remote +Copy setup is present. The plugin has to be configured with the +details of this setup in a configuration file called hpe.conf. + +On the 3PAR front, core to the idea of replication is the concept of +remote copy group (RCG) that aggregates all the volumes that need to +be replicated simultaneously to a remote site. + +HPE 3PAR Storage Plugin extends Docker's "volume create" command via +optional parameter 'replicationGroup'. This represents the name of the +RCG on 3PAR which may or may not exist. In the former case, it gets +created and the new volume is added to it. In the latter case, the +newly created volume is added to the existing RCG. + +'replicationGroup' flag is effective only if the backend in +the configuration file hpe.conf has been configured as a +replication-enabled backend. Multiple backends with different +permutations and combinations can be configured. + +**Note:** + +1. For a replication-enabled backend, it is mandatory to specify +'replicationGroup' option while creating replicated volume. +2. User cannot create non-replicated/standard volume(s) using +replication-enabled backend. In order to do so, user would need to +define another backend in hpe.conf with similar details as that of +replication-enabled backend except that "replication_device" field is +omitted. +3. For a backend that is NOT replication-enabled, specifying 'replicationGroup' +is incorrect and results in error. +4. For a given RCG, mixed transport protocol is not supported. E.g. volumes v1, v2 and v3 + are part of RCG called TestRCG, then on primary array, if these volumes are exported via + FC protocol then on secondary array those CANNOT be exported via ISCSI (after failover) + and vice versa. +5. Cold remote site (e.g. ISCSI IPs on remote array not configured) is not supported. +For ISCSI based transport protocol, the ISCSI IPs on both primary and secondary arrays +MUST be defined upfront in hpe.conf. + +HPE 3PAR Docker Storage Plugin supports two types of replication the details of +which can be found at: +1. [Active/Passive Based Replication](active-passive-based-replication.md) and +2. [Peer Persistence Based Replication](peer-persistence-based-replication.md). diff --git a/docs/secret-management.md b/docs/secret-management.md index 33ee40fa..e75b96b3 100644 --- a/docs/secret-management.md +++ b/docs/secret-management.md @@ -2,30 +2,40 @@ This section describes the steps that need to be taken in order to use secrets in encrypted format rather than plain text. -### Using encryption_utility.py +### Using Encryption utility -To encrypt the password use encryption_utility.py. User need to wget the encryption_utility.py +To encrypt the password user need to use a python package, "py-3parencryptor". +This package can be installed using the below command on linux machine -#### Pre-requisite +```` +$ pip install py-3parencryptor -- Below python packages should be installed on host machine to run the encryption utility -``` - py-etcd - pycrypto ```` + +#### Pre-requisite + - hpe.conf should be present in /etc/hpedockerplugin/ path with etcd details in it. - etcd should be running - 3PAR plugin should be disabled -#### Running the utility with -a option +#### About the package + +When py-3parencryptor is installed on machine. It can be used with the help of hpe3parencryptor command like below. +You have to use the same passphrase to encrypt all the passwords for a backend. +There can be 4 possible password: +1. hpe3par_password +2. san_password +3. hpe3par_password for replication array +4. san_password for replication array. + +After generating the password replace the password with encrypted one. -In order to get the encrypted password user need to run the utility with -a option as below ```` -#python encryption_utility.py -a +#hpe3parencryptor -a Example: -#python encryption_utility.py -a "@123#" "password" +#hpe3parencryptor -a "@123#" "password" SUCCESSFUL: Encrypted password: +CB1E8Je1j8= ```` @@ -36,11 +46,35 @@ Use the encrypted password generated by utility as hpe3par_password in hpe.conf enable the plugin now #### Running the utility with -d option -If user wants to remove the current encrypted password and replace it with plain text or new encrypted password, + If user wants to remove the current encrypted password and replace it with plain text or new encrypted password, user need to delete the current password by using -d option in the utility. ```` -# python encryption_utility.py -d +# hpe3parencryptor -d Key Successfully deleted ```` +## For Multiple backend + +### Encrypting a specific backend +- When multiple backend present in the configuration file(hpe.conf). User can use the utility to encrypt the password on backend basis. +- With --backend option user can provide the backend for which backend they want to encrypt the passwords. + +```` +#hpe3parencryptor -a --backend + +```` +### Removing encrypted password from a specific backend + +Users can remove the encrypted password of a specific backend. Users can use the utility to delete that. +There is an additional optional argument with -d, --backend. + +```` +# hpe3parencryptor -d --backend + +```` + +#### Note : +```` +If --backend is not used, in both the case (-a and -d), package will take the default backend for performing the operations. +```` diff --git a/docs/share_usage.md b/docs/share_usage.md new file mode 100644 index 00000000..fe49305c --- /dev/null +++ b/docs/share_usage.md @@ -0,0 +1,325 @@ +# File Persona usage guide + +The HPE 3PAR File Persona feature allows user to manage file shares on 3PAR +arrays through Docker interface. It supports basic create, retrieve, delete, +mount and unmount operations. Usage details of how each operation can be +exercised via Docker CLI is described below. + +## Prerequisites +1. HPE 3PAR OS version must be >= 3.3.1 (MU3) +2. Must have File Persona (102400G) license +3. File Service must be configured on the array + +## Configuring backend for file share +In order to use HPE 3PAR File Persona feature, user needs to +configure a backend one for each target array as below: + + +```sh +[DEFAULT] + +# ssh key file required for driver ssh communication with array +ssh_hosts_key_file = /root/.ssh/known_hosts + +# IP Address and port number of the ETCD instance +# to be used for storing the share meta data +host_etcd_ip_address = xxx.xxx.xxx.xxx +host_etcd_port_number = 2379 + +# Client certificate and key details for secured ETCD cluster +# host_etcd_client_cert = /root/plugin/certs/client.pem +# host_etcd_client_key = /root/plugin/certs/client-key.pem + +# Logging level for the plugin logs +logging = DEBUG + +# Logging level for 3PAR client logs +hpe3par_debug = True + +# Suppress Requests Library SSL warnings +suppress_requests_ssl_warnings = True + +# Set the driver to be File driver +hpedockerplugin_driver = hpedockerplugin.hpe.hpe_3par_file.HPE3PARFileDriver + +hpe3par_api_url = https://xxx.xxx.xxx.xxx:8080/api/v1 +hpe3par_username = +hpe3par_password = +hpe3par_san_ip = xxx.xxx.xxx.xxx +hpe3par_san_login = +hpe3par_san_password = + +# Server IP pool is mandatory and can be specified as a mix of range of IPs and +# individual IPs delimited by comma +# Each range or individual IP must be followed by the corresponding subnet mask +# delimited by semi-colon +# E.g.: IP-Range:Subnet-Mask,Individual-IP:SubnetMask… +hpe3par_server_ip_pool = xxx.xxx.xxx.xxx-xxx.xxx.xxx.yyy:255.255.255.0 + +# Override default size of FPG here. It must be in the range 1TiB – 64TiB. If +# not specified here, it defaults to 64 +hpe3par_default_fpg_size = 10 +``` +User can define multiple backends in case more than one array needs to be managed +by the plugin. + +User can also define backends for block driver(s) along with file driver(s). +However, a default backend is mandatory for both block and file drivers for the +default use cases to work. Since ‘DEFAULT’ section can be consumed by either +block or file driver but not both at the same time, the other driver +is left out without a default backend. In order to satisfy the need for the other +driver to have default backend, HPE 3PAR Plugin introduces two new keywords to +denote default backend names to be used in such a situation: +1. DEFAULT_FILE and +2. DEFAULT_BLOCK + +In case where user already has ‘DEFAULT’ backend configured for block driver, +and file driver also needs to be configured, then ‘DEFAULT_FILE’ backend MUST +be defined. In this case, if there is a non-default backend defined for file +driver without 'DEFAULT_FILE' backend defined, plugin won't get initialized +properly. + +E.g. in the below configuration, we have two backends, first one for block and +the second one for file. As you can see, default backend is missing for the file +driver. Due to this, the driver will fail to initialize. +``` +[DEFAULT] +... +hpedockerplugin_driver = hpedockerplugin.hpe.hpe_3par_fc.HPE3PARFCDriver +... + +[3PAR_FILE] +... +hpedockerplugin_driver = hpedockerplugin.hpe.hpe_3par_file.HPE3PARFileDriver +... +``` +Similar is the vice-versa case, where ‘DEFAULT’ is configured as file driver +and the user wants to configure block driver as well. In this case, ‘DEFAULT_BLOCK’ +MUST be configured for the plugin to work correctly. + +Below is that table of all possible default configurations along +with the behavior column for each combination: + +|DEFAULT | DEFAULT_BLOCK | DEFAULT_FILE | BEHAVIOR | +|--------|---------------|--------------|-----------------| +|BLOCK |-- |-- | Okay | +|FILE |-- |-- | Okay | +|-- |BLOCK |-- |DEFAULT_BLOCK becomes the default for Block driver| +|-- |-- |FILE |DEFAULT_FILE becomes the default for File driver| +|BLOCK |-- |FILE |DEFAULT_FILE becomes the default for File driver| +|FILE |BLOCK |-- |DEFAULT_BLOCK becomes the default for Block driver| +|BLOCK |BLOCK |FILE |DEFAULT_BLOCK becomes like any other non-default backend in multi-backend configuration for Block driver. DEFAULT_FILE becomes the default for File driver| +|FILE |BLOCK |FILE |DEFAULT_FILE becomes like any other non-default backend in multi-backend configuration for File driver. DEFAULT_BLOCK becomes the default for Block driver| +|BLOCK |FILE |-- |DEFAULT_BLOCK is not allowed to be configured for File driver. Plugin initialization fails in this case.| +|FILE |-- |BLOCK |DEFAULT_FILE is not allowed to be configured for Block driver. Plugin initialization fails in this case.| + + +Although HPE does not recommend it, but if the user configures multiple backends +that are identical in terms of target array and CPG, then the default FPG +created for such backends would not be the same – rather a different default +FPG would be created for each backend. + +## Command to create HPE share +```sh +$ docker volume create –d hpe --name <-o filePersona> +[ -o size= –o cpg= -o fpg= + -o fsOwner= -o fsMode= ] +``` + +**Where:** + +- ***size:*** optional parameter which specifies the desired size of the share in GiB. By default it is 1024 GiB. +- ***cpg:*** optional parameter which specifies the cpg to be used for the share. This parameter can be used with or without ‘fpg’ option. When used with ‘fpg’, the FPG is created with the specified name if it doesn’t exist. If it does exist, then share is created under it. When used without ‘fpg’ option, default FPG under the specified CPG is selected for share creation. If default FPG doesn’t exist, a new default FPG is created under which the share is created. +- ***fpg:*** optional parameter which specifies the FPG to be used for share creation. If the FPG does not exist, a new FPG with the specified name is created using either the CPG specified using ‘cpg’ option or specified in configuration file. +- ***fsOwner:*** optional parameter which specifies the user-id and group-id that should own the root directory of NFS file share in the form [userId:groupId]. Administrator must ensure that local user and local group with these IDs are present on 3PAR before trying to mount the share otherwise mount will fail. +- ***fsMode:*** optional parameter which specifies the permissions whose value is 1 to 4 octal digits + representing the file mode to be applied to the root directory of the file system. + Ex: fsMode="0754". Here 0 as the first digit is mandatory. This ensures specified user of + fsOwner will have rwx permissions, group will have r-x permissions and others will have + just the read permission. + fsMode can also be an ACL string representing ACL permissions that are applied on the share + directory. It contains three ACEs delimited by comma with each ACE consisting of three + parts: + + 1. type, + 2. flag and + 3. permissions + + These three parts are delimited by semi-colon. + Out of the three ACEs in the ACL, the first ACE represents the ‘owner’, second one the ‘group’ and the + third one ‘everyone’ to be specified in the same order. + + E.g.: ```sh A:fd:rwa,A:g:rwaxdnNcCoy,A:fdS:DtnNcy``` + + * type field can take only one of these values [A,D,U,L] + * flag field can take one or more of these values [f,d,p,i,S,F,g] + * permissions field can take one or more of these values [r,w,a,x,d,D,t,T,n,N,c,C,o,y] + + Please refer 3PAR CLI user guide more details on meaning of each flag. + **Note:** For fsMode values user can specify either of mode bits or ACL string. Both cannot be used + simultaneously. While using fsMode it is mandatory to specify fsOwner. If Only fsMode is used, user + will not be able to mount the share. + +### Creating default HPE share +``` +docker volume create -d hpe --name -o filePersona +``` +This command creates share of default size 1TiB with name ‘share_name’ on +default FPG. If default FPG is not present, then it is created on the CPG +specified in configuration file hpe.conf. If ‘hpe3par_default_fpg_size’ is +defined in hpe.conf, then FPG is created with the specified size. Otherwise, +FPG is created with default size of 16TiB. + +Please note that FPG creation is a long operation which takes around 3 minutes +and hence it is done asynchronously on a child thread. User must keep inspecting +the status of the share which is in 'CREATING' state during this time. Once the +FPG, VFS and file store are created and quota is applied, the status of share is +set to 'AVAILABLE' state. User is not allowed to do any operations while the +share is in 'CREATING' state. + +If for some reason a failure is encountered, the status of the share is set +to 'FAILED' state and the reason for failure can be seen by inspecting the share. + +A share in 'FAILED' state can be removed. + +**Note:** ‘size’ can be specified to override the default share size of 1TiB. + + +### Creating a share using non-default CPG + +``` +docker volume create -d hpe --name -o filePersona -o cpg= +``` +This command creates share of default size 1TiB on the default FPG whose parent CPG is ‘cpg_name’. If +default FPG is not present, it is created on CPG ‘cpg_name’ with size ‘hpe3par_default_fpg_size’ if it +is defined in hpe.conf. Else its size defaults to 16TiB. + +**Note:** ‘size’ can be specified to override the default share size of 1TiB. + + +### Creating a share using non-default or legacy FPG +``` +docker volume create -d hpe --name -o filePersona -o fpg= +``` +This command creates a share of default size of 1TiB on the specified FPG ‘fpg_name’. +The specified FPG 'fpg_name' may or may not exist. + +When this command is executed the plugin does the following: +1. If the FPG 'fpg_name' exists and is Docker managed, share is created under + it provided that enough space is available on the FPG to accommodate the + share. +2. If the FPG 'fpg_name' exists and is a legacy FPG, share is created under it + provided that enough space is available on the FPG to accommodate the share +3. If the FPG 'fpg_name' does not exist, it is created with size + 'hpe3par_default_fpg_size' configured in hpe.conf provided none of the 3PAR + limits are hit. Post FPG creation, share is created under it. + +If enough space is not there or any 3PAR limit is hit, the status of share is +set to 'FAILED' along with appropriate error message which can be seen while +inspecting the share details. + +**Note:** ‘size’ can be specified to override the default share size of 1TiB. + +### Creating a share on a non-default FPG and CPG +``` +docker volume create -d hpe --name -o filePersona -o fpg= -o cpg= +``` +This command creates a share of default size of 1TiB on the specified FPG ‘fpg_name’. +The specified FPG 'fpg_name' may or may not exist. + +When this command is executed the plugin does the following: +1. If the FPG 'fpg_name' exists and it is Docker managed and the specified + CPG 'cpg_name' matches with parent CPG of FPG 'fpg_name', share is created + under it provided that enough space is available on the FPG to accommodate + the share. If specified CPG 'cpg_name' does not match, share creation fails + with appropriate error. +2. If the FPG 'fpg_name' exists and it is a legacy FPG and the specified CPG + 'cpg_name' matches with the parent CPG of FPG 'fpg_name', share is created + under it provided that enough space is available on the FPG to accommodate + the share. If specified CPG 'cpg_name' does not match, share creation fails + with appropriate error. +3. If the FPG 'fpg_name' does not exist, it is created with size + 'hpe3par_default_fpg_size' configured in hpe.conf provided none of the 3PAR + limits are hit. Post FPG creation, share is created under it. + +If enough space is not there or any 3PAR limit is hit, the status of share is +set to 'FAILED' along with appropriate error message which can be seen while +inspecting the share details. + +**Note:** +1. ‘size’ can be specified to override the default share size of 1TiB. +2. The FPG must have enough capacity to accommodate the share. + +### Mounting a share +``` +docker run -it --rm --mount src=,dst=,volume-driver=hpe --name alpine /bin/sh +``` + +This command allows mounting of share 'share-name' inside the container 'container-name' on mount +directory 'mount-dir' using alpine image. A share can be mounted multiple times +on the same host or different hosts that have access to the share. A share that +is mounted multiple times on a host is unmounted only after the last container +mounting it is exited or stopped. + +Permissions if present are applied after mounting the share. + +**Note:** VFS IPs must be reachable from Docker host for share to be mounted successfully. + +### Un-mounting a share +If container shell prompt is there, simply type 'exit' to unmount the share. +If container is in detached mode, then retrieve container ID using +```docker ps -a``` and simply type: +``` +docker stop +``` + +### Inspecting a share +``` +docker volume inspect +``` +Displays details of the share being inspected + +### Listing shares +``` +docker volume ls +``` +Lists all the shares + +### Removing a share +``` +docker volume rm +``` +This command allows removing a share. If the share being removed happens to be +the last share under its parent FPG, then the parent FPG is also removed which +happens asynchronously on a child thread. + +**Note:** Any user data present on the share will be lost post this operation. + +### Displaying help +``` +docker volume create -d hpe -o filePersona –o help +``` +This command displays help content of the file command with possible options that +can be used with it. + +### Displaying backend initialization status +``` +docker volume create -d hpe -o filePersona –o help=backends +``` +This command displays the initialization status of all the backends that have +been configured for file driver. + +## Known behavior / issues +1. All the operations must be performed sequentially. E.g. concurrent creation + of multiple shares can lead to ETCD lock failures. +2. When block related configuration parameters are used inadvertently in file + driver configuration or vice-versa, it does not result in any error - the + plugin simply ignores it. Eg: snapcpg, a block configuration parameter, + when used in file driver configuration, it is ignored. +3. When both 'DEFAULT' and 'DEFAULT_BLOCK' backends are defined as block driver, + 'DEFAULT_BLOCK' is not treated as a special keyword. Rather it becomes like + any other backend defined in a multi-backend configuration. Same goes when + 'DEFAULT' and 'DEFAULT_FILE' are defined as file driver. +4. When two backend sections are identically defined, even then each backend + is treated differently and results in having their individual default FPGs + when default share creation is done using both the backends. diff --git a/docs/suse caasp/README.md b/docs/suse caasp/README.md index fad3e17a..a702ead4 100644 --- a/docs/suse caasp/README.md +++ b/docs/suse caasp/README.md @@ -142,17 +142,15 @@ Installing the HPE 3PAR Volume Plug-in for Docker (Containerized Plug-in) for SU **HPE 3PAR Fiber Channel:** - + - >**Note:** Step 4 is needed for now, since we have not published the latest image - >to the Docker public registry. Once we have published the image, this - > is no longer necessary. +4. Either you can build the container image by following instructions in step 5 below, or use an pre-existing 2.1 image of the plugin container by substituting `image: hpestorage/legacyvolumeplugin:2.1-suse` in docker-compose.yml given in step 6 -4. **Build the containerized image** +5. **Build the containerized image** ```bash - $ git clone https://github.com/hpe-storage/python-hpedockerplugin.git ~/container_code + $ git clone https://github.com/hpe-storage/python-hpedockerplugin.git ~/container_code $ cd ~/container_code - $ git checkout plugin_v2 + $ git checkout v210 $ ./containerize.sh ``` Observe the built container image by docker images command @@ -162,7 +160,7 @@ Installing the HPE 3PAR Volume Plug-in for Docker (Containerized Plug-in) for SU REPOSITORY TAG IMAGE ID CREATED SIZE hpe-storage/python-hpedockerplugin plugin_v2 9b540a18a9b2 4 weeks ago 239MB ``` -5. **Deploy the HPE 3PAR Volume Plug-In for Docker** +6. **Deploy the HPE 3PAR Volume Plug-In for Docker** ```bash $ cd ~ @@ -175,6 +173,7 @@ Installing the HPE 3PAR Volume Plug-in for Docker (Containerized Plug-in) for SU image: hpe-storage/python-hpedockerplugin:plugin_v2 container_name: volplugin net: host + restart: always privileged: true volumes: - /dev:/dev @@ -197,7 +196,7 @@ Installing the HPE 3PAR Volume Plug-in for Docker (Containerized Plug-in) for SU > **Note: Please make sure etcd service in running state.** -6. **Start the HPE 3PAR Volume Plug-in for Docker +7. **Start the HPE 3PAR Volume Plug-in for Docker (Containerized Plug-in)** Make sure you are in the location of the **docker-compose.yml** file @@ -299,6 +298,14 @@ Installing the HPE 3PAR Volume Plug-in for Docker (Containerized Plug-in) for SU working installation of SUSE CaaS integrated with HPE 3PAR Volume Plug-in for Docker** +##### Known Issues: + +All the known issues regarding plugin can be found at the link +below: + +**Right now the containerized plugin on SUSE CaaS platform is qualified on Fibre Channel Driver only.** +On iSCSI Driver, there is still an outstanding open issue -- https://github.com/hpe-storage/python-hpedockerplugin/issues/198 + Usage of the HPE 3PAR Volume Plug-in for Docker in Kubernetes/SUSE CaaS ======================================================================= @@ -475,10 +482,6 @@ features refer: -##### Known Issues: - -All the known issues regarding plugin can be found at the link -below: Learn more visit diff --git a/docs/usage.md b/docs/usage.md index 1e33738c..3f111503 100644 --- a/docs/usage.md +++ b/docs/usage.md @@ -56,6 +56,18 @@ Note -- 'vvset_name' should be present in 3par docker volume create -d hpe --name -o importVol=<3par_volume|3par_snapshot> ``` +#### Displaying help + +``` +docker volume create -d hpe -o help +``` + +#### Displaying available backends with their status + +``` +docker volume create -d hpe -o help=backends +``` + #### Deleting a volume ``` @@ -119,9 +131,14 @@ Use the following command to mount a volume and start a bash prompt: docker run -it -v :// --volume-driver hpe bash ``` -Note: If the volume does not exist it will be created. here can be both snapshot (or) a base volume created by the plugin. +Note: +1. If the volume does not exist it will be created. +2. Volume created through this command will always be via backend 'DEFAULT'. +3. If the backend 'DEFAULT' is replication enabled and volume doesn't exist, this command will not succeed + Hence it is highly recommended that 'DEFAULT' backend is not replication enabled. + The image used for mounting can be any image located on https://hub.docker.com/ or the local filesystem. See https://docs.docker.com/v1.8/userguide/dockerimages/ for more details. diff --git a/hpedockerplugin/backend_async_initializer.py b/hpedockerplugin/backend_async_initializer.py new file mode 100644 index 00000000..be1d46a8 --- /dev/null +++ b/hpedockerplugin/backend_async_initializer.py @@ -0,0 +1,67 @@ +# (c) Copyright [2016] Hewlett Packard Enterprise Development LP +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Class starts a thread for each backend defined in hpe.conf +for asynchronous initialization and reports the status of +initialization via the manager_objs backed to the caller. + +""" + +import six +import threading +from oslo_log import log as logging + +LOG = logging.getLogger(__name__) + + +class BackendInitializerThread(threading.Thread): + def __init__(self, orchestrator, manager_objs, + host_config, + config, + etcd_util, + node_id, + backend_name): + threading.Thread.__init__(self) + self.orchestrator = orchestrator + self.manager_objs = manager_objs + self.backend_name = backend_name + self.host_config = host_config + self.config = config + self.etcd_util = etcd_util + self.node_id = node_id + + def run(self): + LOG.info("Starting initializing backend " + self.backend_name) + + volume_mgr = {} + try: + volume_mgr_obj = self.orchestrator.get_manager( + self.host_config, + self.config, + self.etcd_util, + self.node_id, + self.backend_name) + volume_mgr['mgr'] = volume_mgr_obj + volume_mgr['backend_state'] = 'OK' + + except Exception as ex: + volume_mgr['mgr'] = None + volume_mgr['backend_state'] = 'FAILED' + LOG.error('CHILD-THREAD: INITIALIZING backend: %s FAILED Error:' + '%s' % (self.backend_name, six.text_type(ex))) + finally: + LOG.info('in finally : %s , %s ' % (self.backend_name, + volume_mgr)) + self.manager_objs[self.backend_name] = volume_mgr diff --git a/hpedockerplugin/backend_orchestrator.py b/hpedockerplugin/backend_orchestrator.py index 813d6dfc..86d6f115 100644 --- a/hpedockerplugin/backend_orchestrator.py +++ b/hpedockerplugin/backend_orchestrator.py @@ -25,9 +25,17 @@ """ +import abc +import json from oslo_log import log as logging -import hpedockerplugin.etcdutil as util +import os +import six +import uuid import hpedockerplugin.volume_manager as mgr +import hpedockerplugin.etcdutil as util +import threading +import hpedockerplugin.backend_async_initializer as async_initializer +from twisted.internet import threads LOG = logging.getLogger(__name__) @@ -35,51 +43,218 @@ class Orchestrator(object): - def __init__(self, host_config, backend_configs): + def __init__(self, host_config, backend_configs, def_backend_name): LOG.info('calling initialize manager objs') - self.etcd_util = self._get_etcd_util(host_config) + self._def_backend_name = def_backend_name + self._etcd_client = self._get_etcd_client(host_config) + self._initialize_orchestrator(host_config) self._manager = self.initialize_manager_objects(host_config, backend_configs) + # This is the dictionary which have the volume -> backend map entries + # cache after doing an etcd volume read operation. + self.volume_backends_map = {} + self.volume_backend_lock = threading.Lock() @staticmethod - def _get_etcd_util(host_config): - return util.EtcdUtil( - host_config.host_etcd_ip_address, - host_config.host_etcd_port_number, - host_config.host_etcd_client_cert, - host_config.host_etcd_client_key) + def _initialize_orchestrator(host_config): + pass + + def get_default_backend_name(self): + return self._def_backend_name + + @abc.abstractmethod + def _get_etcd_client(self, host_config): + pass + + @staticmethod + def _get_node_id(): + # Save node-id if it doesn't exist + node_id_file_path = '/etc/hpedockerplugin/.node_id' + if not os.path.isfile(node_id_file_path): + node_id = str(uuid.uuid4()) + with open(node_id_file_path, 'w') as node_id_file: + node_id_file.write(node_id) + else: + with open(node_id_file_path, 'r') as node_id_file: + node_id = node_id_file.readline() + + return node_id def initialize_manager_objects(self, host_config, backend_configs): manager_objs = {} + node_id = self._get_node_id() for backend_name, config in backend_configs.items(): - LOG.info('INITIALIZING backend : %s' % backend_name) - manager_objs[backend_name] = mgr.VolumeManager(host_config, - config, - self.etcd_util, - backend_name) + try: + LOG.info('INITIALIZING backend: %s asynchronously' + % backend_name) + + # First initialize the manager_objs key with state as + # INITIALIZING + volume_mgr = {} + volume_mgr['backend_state'] = 'INITIALIZING' + volume_mgr['mgr'] = None + manager_objs[backend_name] = volume_mgr + + thread = async_initializer.BackendInitializerThread( + self, + manager_objs, + host_config, + config, + self._etcd_client, + node_id, + backend_name + ) + thread.start() + + except Exception as ex: + LOG.error('MAIN-THREAD: INITIALIZING backend: %s FAILED ' + 'Error: %s' + % (backend_name, six.text_type(ex))) + + LOG.info("Backends INITIALIZED => %s" % manager_objs.keys()) return manager_objs def get_volume_backend_details(self, volname): LOG.info('Getting details for volume : %s ' % (volname)) - vol = self.etcd_util.get_vol_byname(volname) - current_backend = DEFAULT_BACKEND_NAME - if vol is not None and 'backend' in vol: - current_backend = vol['backend'] + if volname in self.volume_backends_map: + current_backend = self.volume_backends_map[volname] + LOG.debug(' Returning the backend details from cache %s , %s' + % (volname, current_backend)) + return current_backend + else: + return self.add_cache_entry(volname) - return current_backend + def add_cache_entry(self, volname): + # Using this style of locking + # https://docs.python.org/3/library/threading.html + self.volume_backend_lock.acquire() + try: + vol = self.get_meta_data_by_name(volname) + if vol is not None and 'backend' in vol: + current_backend = vol['backend'] + # populate the volume backend map for caching + LOG.debug(' Populating cache %s, %s ' + % (volname, current_backend)) + self.volume_backends_map[volname] = current_backend + return current_backend + else: + # throw an exception for the condition + # where the backend can't be read from volume + # metadata in etcd + LOG.info(' vol obj read from etcd : %s' % vol) + return self._def_backend_name + finally: + self.volume_backend_lock.release() - def volumedriver_remove(self, volname): + def _execute_request_for_backend(self, backend_name, request, volname, + *args, **kwargs): + LOG.info(' Operating on backend : %s on volume %s ' + % (backend_name, volname)) + LOG.info(' Request %s ' % request) + LOG.info(' with args %s ' % str(args)) + LOG.info(' with kwargs is %s ' % str(kwargs)) + volume_mgr_info = self._manager.get(backend_name) + if volume_mgr_info: + volume_mgr = volume_mgr_info['mgr'] + if volume_mgr is not None: + # populate the volume backend map for caching + return getattr(volume_mgr, request)(volname, *args, **kwargs) + msg = "ERROR: Backend '%s' was NOT initialized successfully." \ + " Please check hpe.conf for incorrect entries and rectify " \ + "it." % backend_name + LOG.error(msg) + return json.dumps({u'Err': msg}) + + def __undeferred_execute_request__(self, request, volname, + *args, **kwargs): backend = self.get_volume_backend_details(volname) - return self._manager[backend].remove_volume(volname) + return self._execute_request_for_backend( + backend, + request, + volname, + *args, + **kwargs + ) - def volumedriver_unmount(self, volname, vol_mount, mount_id): + def _execute_request(self, request, volname, *args, **kwargs): backend = self.get_volume_backend_details(volname) - return self._manager[backend].unmount_volume(volname, - vol_mount, - mount_id) + d = threads.deferToThread(self._execute_request_for_backend, + backend, + request, + volname, + *args, + **kwargs) + d.addCallback(self.callback_func) + d.addErrback(self.error_callback_func) + return d + + def callback_func(self, response): + return response + + def error_callback_func(self, response): + LOG.info('In error_callback_func: error is %s' + % six.text_type(response)) + + @abc.abstractmethod + def get_manager(self, host_config, config, etcd_util, + node_id, backend_name): + pass + + @abc.abstractmethod + def get_meta_data_by_name(self, name): + pass + + +class VolumeBackendOrchestrator(Orchestrator): + def __init__(self, host_config, backend_configs, def_backend_name): + super(VolumeBackendOrchestrator, self).__init__( + host_config, backend_configs, def_backend_name) + + def _get_etcd_client(self, host_config): + # return util.HpeVolumeEtcdClient( + return util.EtcdUtil( + host_config.host_etcd_ip_address, + host_config.host_etcd_port_number, + host_config.host_etcd_client_cert, + host_config.host_etcd_client_key) + + def get_manager(self, host_config, config, etcd_client, + node_id, backend_name): + return mgr.VolumeManager(host_config, config, etcd_client, + node_id, backend_name) + + def get_meta_data_by_name(self, name): + vol = self._etcd_client.get_vol_byname(name) + if vol and 'display_name' in vol: + return vol + return None + + def volume_exists(self, name): + vol = self._etcd_client.get_vol_byname(name) + return vol is not None + + def get_path(self, volname): + return self._execute_request('get_path', volname) + + def volumedriver_remove(self, volname): + ret_val = self._execute_request('remove_volume', volname) + with self.volume_backend_lock: + LOG.debug('Removing entry for volume %s from cache' % + volname) + # This if condition is to make the test code happy + if volname in self.volume_backends_map and \ + ret_val is not None: + del self.volume_backends_map[volname] + return ret_val + + def volumedriver_unmount(self, volname, vol_mount, mount_id): + return self._execute_request('unmount_volume', + volname, + vol_mount, + mount_id) def volumedriver_create(self, volname, vol_size, vol_prov, vol_flash, @@ -87,7 +262,9 @@ def volumedriver_create(self, volname, vol_size, fs_mode, fs_owner, mount_conflict_delay, cpg, snap_cpg, current_backend, rcg_name): - return self._manager[current_backend].create_volume( + ret_val = self._execute_request_for_backend( + current_backend, + 'create_volume', volname, vol_size, vol_prov, @@ -101,47 +278,67 @@ def volumedriver_create(self, volname, vol_size, current_backend, rcg_name) - def clone_volume(self, src_vol_name, clone_name, size, cpg, snap_cpg): + return ret_val + + def clone_volume(self, src_vol_name, clone_name, size, cpg, + snap_cpg, clone_options): + # Imran: Redundant call to get_volume_backend_details + # Why is backend being passed to clone_volume when it can be + # retrieved from src_vol or use DEFAULT if src_vol doesn't have it backend = self.get_volume_backend_details(src_vol_name) - return self._manager[backend].clone_volume(src_vol_name, clone_name, - size, cpg, snap_cpg) + LOG.info('orchestrator clone_opts : %s' % (clone_options)) + return self._execute_request('clone_volume', src_vol_name, clone_name, + size, cpg, snap_cpg, backend, + clone_options) def create_snapshot(self, src_vol_name, schedName, snapshot_name, snapPrefix, expiration_hrs, exphrs, retention_hrs, rethrs, mount_conflict_delay, has_schedule, schedFrequency): + # Imran: Redundant call to get_volume_backend_details + # Why is backend being passed to clone_volume when it can be + # retrieved from src_vol or use DEFAULT if src_vol doesn't have it backend = self.get_volume_backend_details(src_vol_name) - return self._manager[backend].create_snapshot(src_vol_name, - schedName, - snapshot_name, - snapPrefix, - expiration_hrs, - exphrs, - retention_hrs, - rethrs, - mount_conflict_delay, - has_schedule, - schedFrequency, backend) + return self._execute_request('create_snapshot', + src_vol_name, + schedName, + snapshot_name, + snapPrefix, + expiration_hrs, + exphrs, + retention_hrs, + rethrs, + mount_conflict_delay, + has_schedule, + schedFrequency, backend) def mount_volume(self, volname, vol_mount, mount_id): - backend = self.get_volume_backend_details(volname) - return self._manager[backend].mount_volume(volname, - vol_mount, mount_id) - - def get_path(self, volname): - backend = self.get_volume_backend_details(volname) - return self._manager[backend].get_path(volname) + return self._execute_request('mount_volume', volname, + vol_mount, mount_id) def get_volume_snap_details(self, volname, snapname, qualified_name): - backend = self.get_volume_backend_details(volname) - return self._manager[backend].get_volume_snap_details(volname, - snapname, - qualified_name) + return self._execute_request('get_volume_snap_details', volname, + snapname, qualified_name) - def manage_existing(self, volname, existing_ref, backend): - return self._manager[backend].manage_existing(volname, - existing_ref, - backend) + def manage_existing(self, volname, existing_ref, backend, manage_opts): + ret_val = self._execute_request_for_backend( + backend, 'manage_existing', volname, existing_ref, + backend, manage_opts) + self.add_cache_entry(volname) + return ret_val def volumedriver_list(self): - return self._manager[DEFAULT_BACKEND_NAME].list_volumes() + # Use the first volume manager list volumes + volume_mgr = None + volume_mgr_info = self._manager.get('DEFAULT') + if volume_mgr_info: + volume_mgr = volume_mgr_info['mgr'] + else: + volume_mgr_info = self._manager.get('DEFAULT_BLOCK') + if volume_mgr_info: + volume_mgr = volume_mgr_info['mgr'] + + if volume_mgr: + return volume_mgr.list_volumes() + else: + return [] diff --git a/hpedockerplugin/cmd/cmd.py b/hpedockerplugin/cmd/cmd.py new file mode 100644 index 00000000..46ba2c18 --- /dev/null +++ b/hpedockerplugin/cmd/cmd.py @@ -0,0 +1,10 @@ +import abc + + +class Cmd(object): + @abc.abstractmethod + def execute(self, args): + pass + + def unexecute(self, args): + pass diff --git a/hpedockerplugin/cmd/cmd_claimavailableip.py b/hpedockerplugin/cmd/cmd_claimavailableip.py new file mode 100644 index 00000000..0084b339 --- /dev/null +++ b/hpedockerplugin/cmd/cmd_claimavailableip.py @@ -0,0 +1,105 @@ +import six +from oslo_log import log as logging + +from hpedockerplugin.cmd import cmd +from hpedockerplugin import exception + +LOG = logging.getLogger(__name__) + + +class ClaimAvailableIPCmd(cmd.Cmd): + def __init__(self, backend, config, fp_etcd, mediator): + self._backend = backend + self._fp_etcd = fp_etcd + self._config = config + self._locked_ip = None + self._mediator = mediator + + def execute(self): + try: + return self._get_available_ip() + except (exception.IPAddressPoolExhausted, + exception.EtcdMetadataNotFound) as ex: + msg = "Claim available IP failed. Reason: %s" % six.text_type(ex) + raise exception.VfsCreationFailed(reason=msg) + + def unexecute(self): + with self._fp_etcd.get_file_backend_lock(self._backend): + backend_metadata = self._fp_etcd.get_backend_metadata( + self._backend) + ips_in_use = backend_metadata['ips_in_use'] + if self._locked_ip in ips_in_use: + ips_in_use.remove(self._locked_ip) + + ips_locked_for_use = backend_metadata['ips_locked_for_use'] + if self._locked_ip in ips_locked_for_use: + ips_locked_for_use.remove(self._locked_ip) + + self._fp_etcd.save_backend_metadata(self._backend, + backend_metadata) + + def _get_available_ip(self): + with self._fp_etcd.get_file_backend_lock(self._backend): + try: + backend_metadata = self._fp_etcd.get_backend_metadata( + self._backend + ) + except exception.EtcdMetadataNotFound: + backend_metadata = { + 'ips_in_use': [], + 'ips_locked_for_use': [], + } + LOG.info("Backend metadata entry for backend %s not found." + "Creating %s..." % + (self._backend, six.text_type(backend_metadata))) + self._fp_etcd.save_backend_metadata(self._backend, + backend_metadata) + + # ips_in_use = backend_metadata['ips_in_use'] + all_in_use_backend_ips = self._get_all_in_use_ip_from_backend() + ips_locked_for_use = backend_metadata['ips_locked_for_use'] + total_ips_in_use = set(all_in_use_backend_ips + ips_locked_for_use) + ip_netmask_pool = self._config.hpe3par_server_ip_pool[0] + for netmask, ips in ip_netmask_pool.items(): + available_ips = ips - total_ips_in_use + if available_ips: + # Return first element from the set + available_ip = next(iter(available_ips)) + # Lock the available IP till VFS is created + ips_locked_for_use.append(available_ip) + # Save the updated meta-data + self._fp_etcd.save_backend_metadata(self._backend, + backend_metadata) + self._locked_ip = available_ip + return available_ip, netmask + raise exception.IPAddressPoolExhausted() + + def _get_all_in_use_ip_from_backend(self): + ips = [] + all_vfs = self._mediator.get_all_vfs() + for vfs in all_vfs: + all_ip_info = vfs['IPInfo'] + for ip_info in all_ip_info: + ips.append(ip_info['IPAddr']) + return ips + + def mark_ip_in_use(self): + with self._fp_etcd.get_file_backend_lock(self._backend): + if self._locked_ip: + try: + backend_metadata = self._fp_etcd.get_backend_metadata( + self._backend) + ips_in_use = backend_metadata['ips_in_use'] + ips_locked_for_use = \ + backend_metadata['ips_locked_for_use'] + # Move IP from locked-ip-list to in-use-list + ips_locked_for_use.remove(self._locked_ip) + ips_in_use.append(self._locked_ip) + self._fp_etcd.save_backend_metadata(self._backend, + backend_metadata) + except (exception.EtcdMetadataNotFound, Exception) as ex: + msg = "mark_ip_in_use failed: Metadata for backend " \ + "%s not found: Exception: %s" % (self._backend, + six.text_type(ex)) + LOG.error(msg) + raise exception.VfsCreationFailed(reason=msg) diff --git a/hpedockerplugin/cmd/cmd_createfpg.py b/hpedockerplugin/cmd/cmd_createfpg.py new file mode 100644 index 00000000..9743be5a --- /dev/null +++ b/hpedockerplugin/cmd/cmd_createfpg.py @@ -0,0 +1,145 @@ +import six +from oslo_log import log as logging + +from hpedockerplugin.cmd import cmd +from hpedockerplugin import exception + + +LOG = logging.getLogger(__name__) +FPG_SIZE = 16 + + +class CreateFpgCmd(cmd.Cmd): + def __init__(self, file_mgr, cpg_name, fpg_name, set_default_fpg=False): + self._file_mgr = file_mgr + self._fp_etcd = file_mgr.get_file_etcd() + self._mediator = file_mgr.get_mediator() + self._backend = file_mgr.get_backend() + self._cpg_name = cpg_name + self._fpg_name = fpg_name + self._set_default_fpg = set_default_fpg + self._backend_fpg_created = False + self._default_set = False + self._fpg_metadata_saved = False + + def execute(self): + with self._fp_etcd.get_fpg_lock(self._backend, self._cpg_name, + self._fpg_name): + LOG.info("Creating FPG %s on the backend using CPG %s" % + (self._fpg_name, self._cpg_name)) + try: + config = self._file_mgr.get_config() + fpg_size = FPG_SIZE + if config.hpe3par_default_fpg_size: + fpg_size = int(config.hpe3par_default_fpg_size) + LOG.info("Default FPG size overridden to %s" % fpg_size) + + self._mediator.create_fpg( + self._cpg_name, + self._fpg_name, + fpg_size + ) + self._backend_fpg_created = True + + if self._set_default_fpg: + self._add_to_default_fpg() + self._default_set = True + + fpg_metadata = { + 'fpg': self._fpg_name, + 'fpg_size': fpg_size, + } + self._fp_etcd.save_fpg_metadata(self._backend, + self._cpg_name, + self._fpg_name, + fpg_metadata) + self._fpg_metadata_saved = True + except (exception.ShareBackendException, + exception.EtcdMetadataNotFound) as ex: + msg = "Create new FPG %s failed. Msg: %s" \ + % (self._fpg_name, six.text_type(ex)) + LOG.error(msg) + raise exception.FpgCreationFailed(reason=msg) + + def unexecute(self): + if self._backend_fpg_created: + LOG.info("Deleting FPG %s from backend..." % self._fpg_name) + try: + self._mediator.delete_fpg(self._fpg_name) + except Exception as ex: + LOG.error("Undo: Failed to delete FPG %s from backend. " + "Exception: %s" % (self._fpg_name, + six.text_type(ex))) + if self._default_set: + LOG.info("Removing FPG %s as default FPG..." % self._fpg_name) + try: + self._remove_as_default_fpg() + except Exception as ex: + LOG.error("Undo: Failed to remove as default FPG " + "%s. Exception: %s" % (self._fpg_name, + six.text_type(ex))) + + if self._fpg_metadata_saved: + LOG.info("Removing metadata for FPG %s..." % self._fpg_name) + try: + self._fp_etcd.delete_fpg_metadata(self._backend, + self._cpg_name, + self._fpg_name) + except Exception as ex: + LOG.error("Undo: Delete FPG metadata failed." + "[backend: %s, cpg: %s, fpg: %s]. " + "Exception: %s" % (self._backend, + self._cpg_name, + self._fpg_name, + six.text_type(ex))) + + def _add_to_default_fpg(self): + with self._fp_etcd.get_file_backend_lock(self._backend): + try: + backend_metadata = self._fp_etcd.get_backend_metadata( + self._backend) + default_fpgs = backend_metadata.get('default_fpgs') + if default_fpgs: + fpg_list = default_fpgs.get(self._cpg_name) + if fpg_list: + fpg_list.append(self._fpg_name) + else: + default_fpgs[self._cpg_name] = [self._fpg_name] + else: + backend_metadata['default_fpgs'] = { + self._cpg_name: [self._fpg_name] + } + + # Save updated backend_metadata + self._fp_etcd.save_backend_metadata(self._backend, + backend_metadata) + except exception.EtcdMetadataNotFound as ex: + LOG.error("ERROR: Failed to set default FPG for backend %s" + % self._backend) + raise ex + except Exception as ex: + msg = "Failed to update default FPG list with FPG %s. " \ + "Exception: %s " % (self._fpg_name, six.text_type(ex)) + LOG.error(msg) + raise exception.HPEPluginEtcdException(reason=msg) + + def _remove_as_default_fpg(self): + with self._fp_etcd.get_file_backend_lock(self._backend): + try: + backend_metadata = self._fp_etcd.get_backend_metadata( + self._backend) + default_fpgs = backend_metadata['default_fpgs'] + if default_fpgs: + fpg_list = default_fpgs.get(self._cpg_name) + if fpg_list: + fpg_list.remove(self._fpg_name) + if not fpg_list: + backend_metadata.pop('default_fpgs') + + # Save updated backend_metadata + self._fp_etcd.save_backend_metadata(self._backend, + backend_metadata) + except exception.EtcdMetadataNotFound as ex: + LOG.error("ERROR: Failed to remove default FPG for backend %s" + % self._backend) + raise ex diff --git a/hpedockerplugin/cmd/cmd_createshare.py b/hpedockerplugin/cmd/cmd_createshare.py new file mode 100644 index 00000000..b56ad7a7 --- /dev/null +++ b/hpedockerplugin/cmd/cmd_createshare.py @@ -0,0 +1,57 @@ +import six + +from oslo_log import log as logging + +from hpedockerplugin.cmd import cmd +from hpedockerplugin import exception + +LOG = logging.getLogger(__name__) + + +class CreateShareCmd(cmd.Cmd): + def __init__(self, file_mgr, share_args): + self._file_mgr = file_mgr + self._etcd = file_mgr.get_etcd() + self._fp_etcd = file_mgr.get_file_etcd() + self._mediator = file_mgr.get_mediator() + self._config = file_mgr.get_config() + self._backend = file_mgr.get_backend() + self._share_args = share_args + self._status = 'CREATING' + self._share_created_at_backend = False + self._share_created_in_etcd = False + + def unexecute(self): + share_name = self._share_args['name'] + LOG.info("cmd::unexecute: Removing share entry from ETCD: %s" % + share_name) + + # Leaving the share entry in ETCD intact so that user can inspect + # the share and look for the reason of failure. Moreover, Docker + # daemon has the entry for this share as we returned success on the + # main thread. So it would be better that the user removes this failed + # share explicitly so that Docker daemon also updates its database + if self._share_created_at_backend: + LOG.info("CreateShareCmd:Undo Deleting share from backend: %s" + % share_name) + self._mediator.delete_share(self._share_args['id']) + LOG.info("CreateShareCmd:Undo Deleting fstore from backend: %s" + % share_name) + self._mediator.delete_file_store(self._share_args['fpg'], + share_name) + + def execute(self): + share_name = self._share_args['name'] + try: + LOG.info("Creating share %s on the backend" % share_name) + share_id = self._mediator.create_share(self._share_args) + self._share_created_at_backend = True + self._share_args['id'] = share_id + self._etcd.save_share(self._share_args) + self._share_created_in_etcd = True + except Exception as ex: + msg = "Share creation failed [share_name: %s, error: %s" %\ + (share_name, six.text_type(ex)) + LOG.error(msg) + self.unexecute() + raise exception.ShareCreationFailed(msg) diff --git a/hpedockerplugin/cmd/cmd_createvfs.py b/hpedockerplugin/cmd/cmd_createvfs.py new file mode 100644 index 00000000..bdcb59dc --- /dev/null +++ b/hpedockerplugin/cmd/cmd_createvfs.py @@ -0,0 +1,61 @@ +import six +from oslo_log import log as logging + +from hpedockerplugin.cmd import cmd +from hpedockerplugin import exception + +LOG = logging.getLogger(__name__) + + +class CreateVfsCmd(cmd.Cmd): + def __init__(self, file_mgr, cpg_name, fpg_name, vfs_name, ip, netmask): + self._file_mgr = file_mgr + self._share_etcd = file_mgr.get_etcd() + self._fp_etcd = file_mgr.get_file_etcd() + self._mediator = file_mgr.get_mediator() + self._backend = file_mgr.get_backend() + self._cpg_name = cpg_name + self._fpg_name = fpg_name + self._vfs_name = vfs_name + self._ip = ip + self._netmask = netmask + + def execute(self): + try: + LOG.info("Creating VFS %s on the backend" % self._vfs_name) + result = self._mediator.create_vfs(self._vfs_name, + self._ip, self._netmask, + fpg=self._fpg_name) + + self._update_fpg_metadata(self._ip, self._netmask) + LOG.info("create_vfs result: %s" % result) + + except exception.ShareBackendException as ex: + msg = "Create VFS failed. Reason: %s" % six.text_type(ex) + LOG.error(msg) + # TODO: Add code to undo VFS creation at the backend + # self._mediator.remove_vfs(self._fpg_name, self._vfs_name) + raise exception.VfsCreationFailed(reason=msg) + + def unexecute(self): + # No need to implement this as FPG delete should delete this too + pass + + def _update_fpg_metadata(self, ip, netmask): + with self._fp_etcd.get_fpg_lock(self._backend, self._cpg_name, + self._fpg_name): + fpg_info = self._fp_etcd.get_fpg_metadata(self._backend, + self._cpg_name, + self._fpg_name) + fpg_info['vfs'] = self._vfs_name + ip_subnet_map = fpg_info.get('ips') + if ip_subnet_map: + ips = ip_subnet_map.get(netmask) + if ips: + ips.append(ip) + else: + ip_subnet_map[netmask] = [ip] + else: + fpg_info['ips'] = {netmask: [ip]} + self._fp_etcd.save_fpg_metadata(self._backend, self._cpg_name, + self._fpg_name, fpg_info) diff --git a/hpedockerplugin/cmd/cmd_deleteshare.py b/hpedockerplugin/cmd/cmd_deleteshare.py new file mode 100644 index 00000000..422bed8a --- /dev/null +++ b/hpedockerplugin/cmd/cmd_deleteshare.py @@ -0,0 +1,245 @@ +import json +import os +import six +from threading import Thread +import uuid + +from oslo_log import log as logging + +from hpedockerplugin.cmd import cmd +from hpedockerplugin import exception + +LOG = logging.getLogger(__name__) + + +class DeleteShareCmd(cmd.Cmd): + def __init__(self, file_mgr, share_info): + self._file_mgr = file_mgr + self._etcd = file_mgr.get_etcd() + self._fp_etcd = file_mgr.get_file_etcd() + self._mediator = file_mgr.get_mediator() + self._backend = file_mgr.get_backend() + self._share_info = share_info + self._cpg_name = share_info['cpg'] + self._fpg_name = share_info['fpg'] + self._mount_id = str(uuid.uuid4()) + + def execute(self): + share_name = self._share_info['name'] + LOG.info("Deleting share %s..." % share_name) + # Most likely nothing got created at the backend when share is + # not in AVAILABLE state + if self._share_info['status'] == 'FAILED': + LOG.info("Share %s is in FAILED state. Removing from ETCD..." + % share_name) + ret_val, status = self._delete_share_from_etcd(share_name) + return ret_val + + elif self._share_info['status'] == 'CREATING': + msg = ("Share %s is in CREATING state. Please wait for it to be " + "in AVAILABLE or FAILED state and then attempt remove." + % share_name) + LOG.info(msg) + return json.dumps({"Err": msg}) + + try: + # A file-store of a share on which files/dirs were created cannot + # be deleted unless it is made empty. Deleting share contents... + self._del_share_contents(share_name) + self._delete_share() + except exception.ShareBackendException as ex: + return json.dumps({"Err": ex.msg}) + + ret_val, status = self._delete_share_from_etcd(share_name) + if not status: + LOG.info("Delete share %s from ETCD failed for some reason..." + "Continuing with deleting filestore/fpg..." + % share_name) + + LOG.info("Spawning thread to allow file-store, FPG delete for share " + "%s if needed..." % share_name) + thread = Thread(target=self._continue_delete_on_thread) + thread.start() + return json.dumps({u"Err": ''}) + + def _continue_delete_on_thread(self): + LOG.info("Deleting file store %s and FPG if this is the last share " + "on child thread..." % self._share_info['name']) + self._delete_file_store() + with self._fp_etcd.get_fpg_lock( + self._backend, self._cpg_name, self._fpg_name + ): + # If shares are not present on FPG after this delete, then + # delete the FPG too. + if not self._mediator.shares_present_on_fpg(self._fpg_name): + if self._fpg_owned_by_docker(): + self._delete_fpg() + self._update_backend_metadata() + + def unexecute(self): + pass + + def _update_backend_metadata(self): + with self._fp_etcd.get_file_backend_lock(self._backend): + try: + backend_metadata = self._fp_etcd.get_backend_metadata( + self._backend + ) + self._release_ip(backend_metadata) + self._remove_fpg_from_default_fpgs(backend_metadata) + # Update backend metadata + self._fp_etcd.save_backend_metadata(self._backend, + backend_metadata) + except Exception as ex: + msg = "WARNING: Metadata for backend %s is not " \ + "present. Exception: %s" % \ + (self._backend, six.text_type(ex)) + LOG.warning(msg) + + def _fpg_owned_by_docker(self): + LOG.info("Checking if FPG %s is owned by Docker..." % self._fpg_name) + try: + self._fp_etcd.get_fpg_metadata( + self._backend, self._cpg_name, self._fpg_name) + LOG.info("FPG %s is owned by Docker!" % self._fpg_name) + return True + except exception.EtcdMetadataNotFound: + LOG.info("FPG %s is NOT owned by Docker!" % self._fpg_name) + return False + + def _delete_share(self): + """Deletes share from the backend + + :returns: None + + :raises: :class:`~hpedockerplugin.exception.ShareBackendException + + """ + share_name = self._share_info['name'] + LOG.info("Start delete share %s..." % share_name) + if self._share_info.get('id'): + LOG.info("Deleting share %s from backend..." % share_name) + self._mediator.delete_share(self._share_info['id']) + LOG.info("Share %s deleted from backend" % share_name) + + def _del_share_contents(self, share_name): + LOG.info("Deleting contents of share %s..." % share_name) + share_mounted = False + try: + LOG.info("Mounting share %s to delete the contents..." + % share_name) + resp = self._file_mgr._internal_mount_share(self._share_info) + LOG.info("Share %s mounted successfully" % share_name) + share_mounted = True + LOG.info("Resp from mount: %s" % resp) + mount_dir = resp['Mountpoint'] + cmd = 'rm -rf %s/*' % mount_dir + LOG.info("Executing command '%s' to delete share contents..." + % cmd) + ret_val = os.system(cmd) + if ret_val == 0: + LOG.info("Successfully deleted contents of share %s" + % share_name) + else: + LOG.error("Failed to delete contents of share %s. " + "Command error code: %s" % (share_name, ret_val)) + except Exception as ex: + msg = 'Failed to delete contents of share %s' % share_name + # Log error message but allow to continue with deletion of + # file-store and if required FPG. By this time the share is + # already deleted from ETCD and hence it is all the more + # important that deletion of file-store and FPG is attempted + # even after hitting this failure + LOG.error(msg) + finally: + if share_mounted: + LOG.info("Unmounting share %s after attempting to delete " + "its contents..." % share_name) + self._file_mgr._internal_unmount_share(self._share_info) + LOG.info("Unmounted share successfully %s after attempting " + "to delete its contents" % share_name) + + def _delete_file_store(self): + share_name = self._share_info['name'] + try: + LOG.info("Deleting file store %s from backend..." % share_name) + self._mediator.delete_file_store(self._fpg_name, share_name) + LOG.info("File store %s deleted from backend" % share_name) + except Exception as e: + msg = 'Failed to remove file store %(share_name)s from backend: ' \ + '%(e)s' \ + % ({'share_name': share_name, 'e': six.text_type(e)}) + LOG.error(msg) + + def _delete_share_from_etcd(self, share_name): + """Deletes share from ETCD. If delete fails, sets the share status + as FAILED + + :returns: 1. JSON dict with or without error message based on whether + operation was successful or not + 2. Boolean indicating if operation was successful or not + + :raises: None + + """ + try: + LOG.info("Removing share entry from ETCD: %s..." % share_name) + self._etcd.delete_share(share_name) + LOG.info("Removed share entry from ETCD: %s" % share_name) + return json.dumps({'Err': ''}), True + + except (exception.EtcdMetadataNotFound, + exception.HPEPluginEtcdException, + KeyError) as ex: + msg = "Delete share '%s' from ETCD failed: Reason: %s" \ + % (share_name, ex.msg) + LOG.error(msg) + LOG.info("Setting FAILED state for share %s..." % share_name) + self._share_info['status'] = 'FAILED' + self._share_info['detailedStatus'] = msg + try: + self._etcd.save_share(self._share_info) + except exception.HPEPluginSaveFailed as ex: + msg = "FATAL: Failed while saving share '%s' in FAILED " \ + "state to ETCD. Check if ETCD is running." % share_name + LOG.error(msg) + return json.dumps({'Err': msg}), False + + def _delete_fpg(self): + LOG.info("Deleting FPG %s from backend..." % self._fpg_name) + self._mediator.delete_fpg(self._fpg_name) + self._delete_fpg_from_etcd() + + def _delete_fpg_from_etcd(self): + LOG.info("Deleting FOG %s/%s/%s from ETCD..." % + (self._backend, self._cpg_name, self._fpg_name)) + self._fp_etcd.delete_fpg_metadata( + self._backend, self._cpg_name, self._fpg_name + ) + + def _release_ip(self, backend_metadata): + vfs_ip = self._share_info.get('vfsIPs')[0] + ip_to_release = vfs_ip[0] + LOG.info("Releasing IP %s to IP Pool..." % ip_to_release) + + # Release IP to server IP pool + ips_in_use = backend_metadata['ips_in_use'] + + # 'vfsIPs': [(IP1, Subnet1), (IP2, Subnet2)...], + ips_in_use.remove(ip_to_release) + + def _remove_fpg_from_default_fpgs(self, backend_metadata): + # Remove FPG from default FPG list + default_fpgs = backend_metadata.get('default_fpgs') + if default_fpgs: + fpg_list = default_fpgs.get(self._cpg_name) + if self._fpg_name in fpg_list: + LOG.info("Removing default FPG entry [cpg:%s," + "fpg:%s..." + % (self._cpg_name, self._fpg_name)) + fpg_list.remove(self._fpg_name) + + # If last fpg got removed from the list, remove + # the CPG entry from default_fpgs + if not fpg_list: + del default_fpgs[self._cpg_name] diff --git a/hpedockerplugin/cmd/cmd_generate_fpg_vfs_names.py b/hpedockerplugin/cmd/cmd_generate_fpg_vfs_names.py new file mode 100644 index 00000000..84395830 --- /dev/null +++ b/hpedockerplugin/cmd/cmd_generate_fpg_vfs_names.py @@ -0,0 +1,56 @@ +import six +from oslo_log import log as logging + +from hpedockerplugin.cmd import cmd +from hpedockerplugin import exception + +LOG = logging.getLogger(__name__) + + +class GenerateFpgVfsNamesCmd(cmd.Cmd): + def __init__(self, backend, cpg, fp_etcd): + self._backend = backend + self._cpg_name = cpg + self._fp_etcd = fp_etcd + + def execute(self): + return self._generate_default_fpg_vfs_names() + + def _generate_default_fpg_vfs_names(self): + LOG.info("Cmd: Generating default FPG and VFS names...") + with self._fp_etcd.get_file_backend_lock(self._backend): + try: + backend_metadata = self._fp_etcd.get_backend_metadata( + self._backend) + counter = int(backend_metadata.get('counter', 0)) + 1 + backend_metadata['counter'] = counter + new_fpg_name = "DockerFpg_%s" % counter + new_vfs_name = "DockerVfs_%s" % counter + + # Save updated backend_metadata + self._fp_etcd.save_backend_metadata(self._backend, + backend_metadata) + return new_fpg_name, new_vfs_name + except exception.EtcdMetadataNotFound: + new_fpg_name = "DockerFpg_0" + new_vfs_name = "DockerVfs_0" + + # Default FPG must be created at the backend first and then + # only, default_fpgs can be updated in ETCD + backend_metadata = { + 'ips_in_use': [], + 'ips_locked_for_use': [], + 'counter': 0 + } + LOG.info("Backend metadata entry for backend %s not found." + "Creating %s..." % + (self._backend, six.text_type(backend_metadata))) + self._fp_etcd.save_backend_metadata(self._backend, + backend_metadata) + LOG.info("Cmd: Returning FPG %s and VFS %s" % + (new_fpg_name, new_vfs_name)) + return new_fpg_name, new_vfs_name + + def unexecute(self): + # May not require implementation + pass diff --git a/hpedockerplugin/cmd/cmd_initshare.py b/hpedockerplugin/cmd/cmd_initshare.py new file mode 100644 index 00000000..3900e0ff --- /dev/null +++ b/hpedockerplugin/cmd/cmd_initshare.py @@ -0,0 +1,27 @@ +from oslo_log import log as logging +from hpedockerplugin.cmd import cmd + +LOG = logging.getLogger(__name__) + + +class InitializeShareCmd(cmd.Cmd): + def __init__(self, backend, share_args, share_etcd): + self._backend = backend + self._share_args = share_args + self._share_etcd = share_etcd + + def execute(self): + LOG.info("Initializing status for share %s..." % + self._share_args['name']) + self._share_args['status'] = 'CREATING' + self._share_etcd.save_share(self._share_args) + LOG.info("Status initialized for share %s" % + self._share_args['name']) + + # Using unexecute to mark share as FAILED + def unexecute(self): + LOG.info("Marking status of share %s as FAILED..." % + self._share_args['name']) + self._share_args['status'] = 'FAILED' + self._share_etcd.save_share(self._share_args) + LOG.info("Marked status of share %s as FAILED" % self._share_name) diff --git a/hpedockerplugin/cmd/cmd_setquota.py b/hpedockerplugin/cmd/cmd_setquota.py new file mode 100644 index 00000000..2c413efb --- /dev/null +++ b/hpedockerplugin/cmd/cmd_setquota.py @@ -0,0 +1,87 @@ +import six +from oslo_log import log as logging + +from hpedockerplugin.cmd import cmd +from hpedockerplugin import exception + +LOG = logging.getLogger(__name__) + + +class SetQuotaCmd(cmd.Cmd): + def __init__(self, file_mgr, cpg_name, fpg_name, vfs_name, + share_name, size): + self._file_mgr = file_mgr + self._share_etcd = file_mgr.get_etcd() + self._fp_etcd = file_mgr.get_file_etcd() + self._mediator = file_mgr.get_mediator() + self._backend = file_mgr.get_backend() + self._share_name = share_name + self._size = size + self._cpg_name = cpg_name + self._fpg_name = fpg_name + self._vfs_name = vfs_name + self._quota_id = None + + def execute(self): + # import pdb + # pdb.set_trace() + try: + fstore = self._share_name + self._quota_id = self._mediator.update_capacity_quotas( + fstore, self._size, self._fpg_name, self._vfs_name) + + share = self._update_share_metadata(self._quota_id, add=True) + + LOG.info("Updated quota metadata for share: %s" % share) + + except (exception.ShareBackendException, + exception.HPEPluginSaveFailed) as ex: + msg = "Set quota failed. Msg: %s" % six.text_type(ex) + LOG.error(msg) + self.unexecute() + raise exception.SetQuotaFailed(reason=msg) + + def unexecute(self): + if self._quota_id: + try: + self._mediator.remove_quota(self._quota_id) + self._update_share_metadata(self._quota_id, add=False) + except Exception: + LOG.error("ERROR: Undo quota failed for %s" % + self._share_name) + + def _update_share_metadata(self, quota_id, add=True): + share = self._share_etcd.get_share(self._share_name) + if add: + share['quota_id'] = quota_id + share['status'] = 'AVAILABLE' + elif 'quota_id' in share: + share.pop('quota_id') + share['status'] = 'FAILED' + self._share_etcd.save_share(share) + return share + +# class UnsetQuotaCmd(cmd.Cmd): +# def __init__(self, file_mgr, share_name): +# self._file_mgr = file_mgr +# self._share_etcd = file_mgr.get_etcd() +# self._mediator = file_mgr.get_mediator() +# self._share_name = share_name +# +# def execute(self): +# try: +# share = self._share_etcd.get_share(self._share_name) +# quota_id = share['quota_id'] +# self._mediator.remove_quota(quota_id) +# self._update_share_metadata(share) +# except Exception: +# LOG.error("ERROR: Unset quota failed for %s" % +# self._share_name) +# +# def unexecute(self): +# pass +# +# def _update_share_metadata(self, share): +# if 'quota_id' in share: +# share.pop('quota_id') +# self._share_etcd.save_share(share) diff --git a/hpedockerplugin/etcdutil.py b/hpedockerplugin/etcdutil.py index cd987be5..647f93d6 100644 --- a/hpedockerplugin/etcdutil.py +++ b/hpedockerplugin/etcdutil.py @@ -28,6 +28,332 @@ LOCKROOT = '/volumes-lock' RCG_LOCKROOT = '/rcg-lock' +SHAREROOT = '/shares' +FILEPERSONAROOT = '/file-persona' + +SHARE_LOCKROOT = "/share-lock" +FILE_BACKEND_LOCKROOT = "/fp-backend-lock" +FILE_CPG_LOCKROOT = "/fp-cpg-lock" +FILE_FPG_LOCKROOT = "/fp-fpg-lock" + + +class HpeEtcdClient(object): + + def __init__(self, host, port, client_cert, client_key): + self.host = host + self.port = port + + LOG.info('HpeEtcdClient datatype of host is %s ' % type(self.host)) + host_tuple = () + if isinstance(self.host, str): + if ',' in self.host: + host_list = [h.strip() for h in host.split(',')] + + for i in host_list: + temp_tuple = (i.split(':')[0], int(i.split(':')[1])) + host_tuple = host_tuple + (temp_tuple,) + + host_tuple = tuple(host_tuple) + + LOG.info('HpeEtcdClient host_tuple is %s, host is %s ' % + (host_tuple, self.host)) + + if client_cert is not None and client_key is not None: + if len(host_tuple) > 0: + LOG.info('HpeEtcdClient host tuple is not None') + self.client = etcd.Client(host=host_tuple, port=port, + protocol='https', + cert=(client_cert, client_key), + allow_reconnect=True) + else: + LOG.info('HpeEtcdClient host %s ' % host) + self.client = etcd.Client(host=host, port=port, + protocol='https', + cert=(client_cert, client_key)) + else: + LOG.info('HpeEtcdClient no certs') + if len(host_tuple) > 0: + LOG.info('Use http protocol') + self.client = etcd.Client(host=host_tuple, port=port, + protocol='http', + allow_reconnect=True) + else: + self.client = etcd.Client(host, port) + + def make_root(self, root): + try: + self.client.read(root) + except etcd.EtcdKeyNotFound: + self.client.write(root, None, dir=True) + except Exception as ex: + msg = (_('Could not init HpeEtcdClient: %s'), six.text_type(ex)) + LOG.error(msg) + raise exception.HPEPluginMakeEtcdRootException(reason=msg) + return + + def save_object(self, etcd_key, obj): + val = json.dumps(obj) + try: + self.client.write(etcd_key, val) + except Exception as ex: + msg = 'Failed to save object to ETCD: %s'\ + % six.text_type(ex) + LOG.error(msg) + raise exception.HPEPluginSaveFailed(obj=obj) + else: + LOG.info('Write key: %s to ETCD, value is: %s', etcd_key, val) + + def update_object(self, etcd_key, key_to_update, val): + result = self.client.read(etcd_key) + val = json.loads(result.value) + val[key_to_update] = val + val = json.dumps(val) + result.value = val + self.client.update(result) + LOG.info(_LI('Update key: %s to ETCD, value is: %s'), etcd_key, val) + + def delete_object(self, etcd_key): + try: + self.client.delete(etcd_key) + LOG.info(_LI('Deleted key: %s from ETCD'), etcd_key) + except etcd.EtcdKeyNotFound: + msg = "Key to delete not found ETCD: [key=%s]" % etcd_key + LOG.info(msg) + raise exception.EtcdMetadataNotFound(msg=msg) + except Exception as ex: + msg = "Unknown error encountered: %s" % six.text_type(ex) + LOG.info(msg) + raise exception.HPEPluginEtcdException(reason=msg) + + def get_object(self, etcd_key): + try: + result = self.client.read(etcd_key) + return json.loads(result.value) + except etcd.EtcdKeyNotFound: + msg = "Key not found ETCD: [key=%s]" % etcd_key + LOG.info(msg) + raise exception.EtcdMetadataNotFound(msg) + except Exception as ex: + msg = 'Failed to read key %s: Msg: %s' %\ + (etcd_key, six.text_type(ex)) + LOG.error(msg) + raise exception.EtcdUnknownException(reason=msg) + + def get_objects(self, root): + ret_list = [] + objects = self.client.read(root, recursive=True) + for obj in objects.children: + if obj.key != root: + ret_obj = json.loads(obj.value) + ret_list.append(ret_obj) + return ret_list + + def get_value(self, key): + result = self.client.read(key) + return result.value + + +# Manages File Persona metadata under /file-persona key +class HpeFilePersonaEtcdClient(object): + def __init__(self, host, port, client_cert, client_key): + self._client = HpeEtcdClient(host, port, + client_cert, client_key) + self._client.make_root(FILEPERSONAROOT) + self._root = FILEPERSONAROOT + + def create_cpg_entry(self, backend, cpg): + etcd_key = '/'.join([self._root, backend, cpg]) + try: + self._client.read(etcd_key) + except etcd.EtcdKeyNotFound: + self._client.write(etcd_key, None, dir=True) + return True + except Exception as ex: + msg = (_('Could not init HpeEtcdClient: %s'), six.text_type(ex)) + LOG.error(msg) + raise exception.HPEPluginMakeEtcdRootException(reason=msg) + return False + + def delete_cpg_entry(self, backend, cpg): + etcd_key = '/'.join([self._root, backend, cpg]) + self._client.delete_object(etcd_key) + + def save_fpg_metadata(self, backend, cpg, fpg, fp_metadata): + etcd_key = '/'.join([self._root, backend, cpg, fpg]) + self._client.save_object(etcd_key, fp_metadata) + + def update_fpg_metadata(self, backend, cpg, fpg, key, val): + etcd_key = '/'.join([self._root, backend, cpg, fpg]) + self._client.update_object(etcd_key, key, val) + + def delete_fpg_metadata(self, backend, cpg, fpg): + etcd_key = '/'.join([self._root, backend, cpg, fpg]) + self._client.delete_object(etcd_key) + + def get_fpg_metadata(self, backend, cpg, fpg): + etcd_key = '/'.join([self._root, backend, cpg, fpg]) + return self._client.get_object(etcd_key) + + def get_all_fpg_metadata(self, backend, cpg): + etcd_key = '%s/%s/%s' % (self._root, backend, cpg) + return self._client.get_objects(etcd_key) + + def save_backend_metadata(self, backend, metadata): + etcd_key = '%s/%s.metadata' % (self._root, backend) + self._client.save_object(etcd_key, metadata) + + def update_backend_metadata(self, backend, key, val): + etcd_key = '%s/%s.metadata' % (self._root, backend) + self._client.update_object(etcd_key, key, val) + + def delete_backend_metadata(self, backend): + etcd_key = '%s/%s.metadata' % (self._root, backend) + self._client.delete_object(etcd_key) + + def get_backend_metadata(self, backend): + etcd_key = '%s/%s.metadata' % (self._root, backend) + return self._client.get_object(etcd_key) + + def get_lock(self, lock_type, name=None): + lockroot_map = { + 'FP_BACKEND': FILE_BACKEND_LOCKROOT, + 'FP_FPG': FILE_FPG_LOCKROOT + } + lock_root = lockroot_map.get(lock_type) + if lock_root: + return EtcdLock(lock_root + '/', self._client.client, name) + raise exception.EtcdInvalidLockType(type=lock_type) + + def get_file_backend_lock(self, backend): + return EtcdLock(FILE_BACKEND_LOCKROOT + '/', self._client.client, + name=backend) + + def get_cpg_lock(self, backend, cpg): + lock_key = '/'.join([backend, cpg]) + return EtcdLock(FILE_CPG_LOCKROOT + '/', self._client.client, + name=lock_key) + + def get_fpg_lock(self, backend, cpg, fpg): + lock_key = '/'.join([backend, cpg, fpg]) + return EtcdLock(FILE_FPG_LOCKROOT + '/', self._client.client, + name=lock_key) + + +class HpeShareEtcdClient(object): + + def __init__(self, host, port, client_cert, client_key): + self._client = HpeEtcdClient(host, port, + client_cert, client_key) + self._client.make_root(SHAREROOT) + self._root = SHAREROOT + '/' + + self._client.make_root(BACKENDROOT) + self.backendroot = BACKENDROOT + '/' + + def save_share(self, share): + etcd_key = self._root + share['name'] + self._client.save_object(etcd_key, share) + + def update_share(self, name, key, val): + etcd_key = self._root + name + self._client.update_object(etcd_key, key, val) + + def delete_share(self, share_name): + etcd_key = self._root + share_name + self._client.delete_object(etcd_key) + + def get_share(self, name): + etcd_key = self._root + name + return self._client.get_object(etcd_key) + + def get_all_shares(self): + return self._client.get_objects(SHAREROOT) + + def get_lock(self, lock_type, name=None): + return EtcdLock(SHARE_LOCKROOT + '/', self._client.client, name=name) + + def get_backend_key(self, backend): + passphrase = self.backendroot + backend + return self._client.get_value(passphrase) + + +# TODO: Eventually this will take over and EtcdUtil will be phased out +# class HpeVolumeEtcdClient(object): +# +# def __init__(self, host, port, client_cert, client_key): +# self._client = HpeEtcdClient(host, port, +# client_cert, client_key) +# self._client.make_root(VOLUMEROOT) +# self._root = VOLUMEROOT + '/' +# +# self._client.make_root(BACKENDROOT) +# self.backendroot = BACKENDROOT + '/' +# +# def save_vol(self, vol): +# etcd_key = self._root + vol['id'] +# self._client.save_object(etcd_key, vol) +# +# def update_vol(self, volid, key, val): +# etcd_key = self._root + volid +# self._client.update_object(etcd_key, key, val) +# +# def delete_vol(self, vol): +# etcd_key = self._root + vol['id'] +# self._client.delete_object(etcd_key) +# +# def get_vol_byname(self, volname): +# volumes = self._client.get_objects(self._root) +# LOG.info(_LI('Get volbyname: volname is %s'), volname) +# +# for child in volumes.children: +# if child.key != VOLUMEROOT: +# volmember = json.loads(child.value) +# vol = volmember['display_name'] +# if vol.startswith(volname, 0, len(volname)): +# if volmember['display_name'] == volname: +# return volmember +# elif volmember['name'] == volname: +# return volmember +# return None +# +# def get_vol_by_id(self, volid): +# etcd_key = self._root + volid +# return self._client.get_object(etcd_key) +# +# def get_all_vols(self): +# return self._client.get_objects(VOLUMEROOT) +# +# def get_vol_path_info(self, volname): +# vol = self.get_vol_byname(volname) +# if vol: +# if 'path_info' in vol and vol['path_info'] is not None: +# path_info = json.loads(vol['path_info']) +# return path_info +# if 'mount_path_dict' in vol: +# return vol['mount_path_dict'] +# return None +# +# def get_path_info_from_vol(self, vol): +# if vol: +# if 'path_info' in vol and vol['path_info'] is not None: +# return json.loads(vol['path_info']) +# if 'share_path_info' in vol: +# return vol['share_path_info'] +# return None +# +# def get_lock(self, lock_type): +# # By default this is volume lock-root +# lockroot_map = {'VOL': LOCKROOT, +# 'RCG': RCG_LOCKROOT} +# lock_root = lockroot_map.get(lock_type) +# if lock_root: +# return EtcdLock(lock_root + '/', self._client.client) +# raise exception.EtcdInvalidLockType(type=lock_type) +# +# def get_backend_key(self, backend): +# passphrase = self.backendroot + backend +# return self._client.get_value(passphrase) + class EtcdUtil(object): @@ -120,20 +446,12 @@ def delete_vol(self, vol): self.client.delete(volkey) LOG.info(_LI('Deleted key: %s from etcd'), volkey) - def _get_vol_byuuid(self, voluuid): - volkey = self.volumeroot + voluuid - result = self.client.read(volkey) - - volval = json.loads(result.value) - LOG.info(_LI('Read key: %s from etcd, result is: %s'), volkey, volval) - return volval - - def get_lock(self, lock_type): + def get_lock(self, lock_type, lock_name): # By default this is volume lock-root lock_root = LOCKROOT if lock_type == 'RCG': lock_root = RCG_LOCKROOT - return EtcdLock(lock_root + '/', self.client) + return EtcdLock(lock_root + '/', self.client, name=lock_name) def get_vol_byname(self, volname): volumes = self.client.read(self.volumeroot, recursive=True) @@ -156,8 +474,13 @@ def get_vol_by_id(self, volid): return json.loads(result.value) def get_all_vols(self): + ret_vol_list = [] volumes = self.client.read(self.volumeroot, recursive=True) - return volumes + for volinfo in volumes.children: + if volinfo.key != VOLUMEROOT: + vol = json.loads(volinfo.value) + ret_vol_list.append(vol) + return ret_vol_list def get_vol_path_info(self, volname): vol = self.get_vol_byname(volname) @@ -169,9 +492,8 @@ def get_vol_path_info(self, volname): def get_path_info_from_vol(self, vol): if vol: - info = json.loads(vol) - if 'path_info' in info and info['path_info'] is not None: - return json.loads(info['path_info']) + if 'path_info' in vol and vol['path_info'] is not None: + return json.loads(vol['path_info']) return None def get_backend_key(self, backend): @@ -181,27 +503,36 @@ def get_backend_key(self, backend): class EtcdLock(object): - def __init__(self, lock_root, client): + def __init__(self, lock_root, client, name): self._lock_root = lock_root self._client = client + self._name = name + self._lock = etcd.Lock(client, name) - def try_lock_name(self, name): - try: - LOG.debug("Try locking name %s", name) - self._client.write(self._lock_root + name, name, - prevExist=False) - LOG.debug("Name is locked : %s", name) - except Exception as ex: - msg = 'Name: %(name)s is already locked' % {'name': name} - LOG.exception(msg) - raise exception.HPEPluginLockFailed(obj=name) + def __enter__(self): + if self._name: + self.try_lock_name() - def try_unlock_name(self, name): - try: - LOG.debug("Try unlocking name %s", name) - self._client.delete(self._lock_root + name) - LOG.debug("Name is unlocked : %s", name) - except Exception as ex: - msg = 'Name: %(name)s unlock failed' % {'name': name} - LOG.exception(msg) - raise exception.HPEPluginUnlockFailed(obj=name) + def __exit__(self, exc_type, exc_val, exc_tb): + if self._name: + self.try_unlock_name() + + def try_lock_name(self): + LOG.debug("Try locking name %s", self._name) + self._lock.acquire(lock_ttl=300, timeout=300) + if self._lock.is_acquired: + LOG.debug("Name is locked : %s", self._name) + else: + msg = 'Failed to acquire lock: %(name)s' % {'name': self._name} + LOG.error(msg) + raise exception.HPEPluginLockFailed(obj=self._name) + + def try_unlock_name(self): + LOG.debug("Try unlocking name %s", self._name) + self._lock.release() + if not self._lock.is_acquired: + LOG.debug("Name is unlocked : %s", self._name) + else: + msg = 'Failed to release lock: %(name)s' % {'name': self._name} + LOG.error(msg) + raise exception.HPEPluginUnlockFailed(obj=self._name) diff --git a/hpedockerplugin/exception.py b/hpedockerplugin/exception.py index 2021014f..18027081 100644 --- a/hpedockerplugin/exception.py +++ b/hpedockerplugin/exception.py @@ -175,7 +175,7 @@ class HPEPluginStartPluginException(PluginException): class HPEPluginNotInitializedException(PluginException): - message = _("HPE Docker Volume plugin not ready.") + message = _("HPE Docker Volume plugin not ready: %(reason)s") class HPEPluginCreateException(PluginException): @@ -190,6 +190,11 @@ class HPEPluginMountException(PluginException): message = _("HPE Docker Volume Plugin Mount Failed: %(reason)s") +class HPEPluginCheckMountException(PluginException): + message = _("HPE Docker Volume Plugin Check if Mount already exists" + " on host Failed: %(reason)s") + + class HPEPluginUMountException(PluginException): message = _("HPE Docker Volume Plugin Unmount Failed: %(reason)s") @@ -236,6 +241,10 @@ class HPEPluginLockFailed(HPEPluginEtcdException): message = _("ETCD lock failed: %(obj)s") +class HPEPluginReadBackendFailed(HPEPluginEtcdException): + message = _("ETCD read for backend failed for vol: %(volname)s") + + class HPEPluginActiveDriverEntryNotFound(HPEPluginEtcdException): message = _("ETCD active driver info not found: %(key)s") @@ -245,7 +254,7 @@ class HPEPluginUnlockFailed(HPEPluginEtcdException): class HPEDriverException(PluginException): - pass + message = _("Driver exception: %(msg)") class HPEDriverInvalidInput(HPEDriverException): @@ -270,7 +279,8 @@ class HPEDriverCreateVolumeWithQosFailed(HPEDriverException): class HPEDriverGetQosFromVvSetFailed(HPEDriverException): - message = "" + message = _("Failed to retrieve QOS from VV-Set %(vvset_name)s:" + " %(reason)s") class HPEDriverSetFlashCacheOnVvsFailed(HPEDriverException): @@ -333,3 +343,101 @@ class InvalidRcgRoleForDeleteVolume(PluginException): class DeleteReplicatedVolumeFailed(PluginException): message = _("Delete Replication Volume Failed: %(reason)s") + + +class RcgStateInTransitionException(PluginException): + message = _("Remote copy group state is in transition: %(reason)s") + + +class HPEDriverNoQosOrFlashCacheSetForVolume(PluginException): + message = _("Volume in VVS without being associated with QOS or " + "flash-cache: %(reason)s") + + +class EtcdMetadataNotFound(PluginException): + message = _("ETCD metadata not found: %(msg)s") + + +class ShareBackendException(PluginException): + message = _("Share backend exception: %(msg)s") + + +class EtcdFpgEntryForCpgNotFound(PluginException): + message = _("FPG %(fpg)s does not exist under the specified/default " + "CPG %(cpg)s") + + +class FpgNotFound(PluginException): + message = _("FPG %(fpg)s does not exist") + + +class EtcdCpgEntryNotFound(PluginException): + message = _("CPG %(cpg)s does not exist %(cpg)s") + + +class CmdExecutionError(PluginException): + message = _("Failed to execute command. Cause: %(msg)s") + + +class EtcdInvalidLockType(PluginException): + message = _("Invalid lock type %(type)s specified") + + +class FileIPPoolExhausted(PluginException): + message = _("IP pool exhausted for %(backend)s") + + +class EtcdMaxSharesPerFpgLimitException(PluginException): + message = _("Max share limit reached for FPG %(fpg_name)s") + + +class EtcdDefaultFpgNotAvailable(PluginException): + message = _("No default FPG is available under CPG %(cpg)s") + + +class EtcdDefaultFpgNotPresent(PluginException): + message = _("No default FPG is not present for CPG %(cpg)s") + + +class EtcdBackendMetadataDoesNotExist(PluginException): + message = _("Backend metadata doesn't exist for backend: %(backend)s") + + +class EtcdUnknownException(PluginException): + message = _("Unknown exception occured: %(reason)s") + + +class IPAddressPoolExhausted(PluginException): + message = _("IP address pool exhausted") + + +class VfsCreationFailed(PluginException): + message = _("VFS creation failed: %(reason)s") + + +class ShareCreationFailed(PluginException): + message = _("Share creation failed: %(reason)s") + + +class FpgCreationFailed(PluginException): + message = _("FPG creation failed: %(reason)s") + + +class FpgAlreadyExists(PluginException): + message = _("FPG already exists: %(reason)s") + + +class UserGroupNotFoundOn3PAR(PluginException): + message = _("fsusergroup or fsuser doesn't exist on 3PAR: %(reason)s") + + +class SetQuotaFailed(PluginException): + message = _("Set quota failed: %(reason)s") + + +class HPEDriverNonExistentCpg(HPEDriverException): + message = "CPG %(cpg)s does not exist" + + +class FpgCapacityInsufficient(PluginException): + message = _("FPG %(fpg)s does not have enough capacity") diff --git a/hpedockerplugin/file_backend_orchestrator.py b/hpedockerplugin/file_backend_orchestrator.py new file mode 100644 index 00000000..e98e3fea --- /dev/null +++ b/hpedockerplugin/file_backend_orchestrator.py @@ -0,0 +1,143 @@ +import json +from oslo_log import log as logging + +from hpedockerplugin.backend_orchestrator import Orchestrator +import hpedockerplugin.etcdutil as util +import hpedockerplugin.file_manager as fmgr + +LOG = logging.getLogger(__name__) + + +class FileBackendOrchestrator(Orchestrator): + + fp_etcd_client = None + + def __init__(self, host_config, backend_configs, def_backend_name): + super(FileBackendOrchestrator, self).__init__( + host_config, backend_configs, def_backend_name) + + @staticmethod + def _get_fp_etcd_client(host_config): + return util.HpeFilePersonaEtcdClient( + host_config.host_etcd_ip_address, + host_config.host_etcd_port_number, + host_config.host_etcd_client_cert, + host_config.host_etcd_client_key + ) + + def _initialize_orchestrator(self, host_config): + FileBackendOrchestrator.fp_etcd_client = self._get_fp_etcd_client( + host_config + ) + + # Implementation of abstract function from base class + def get_manager(self, host_config, config, etcd_client, + node_id, backend_name): + LOG.info("Getting file manager...") + return fmgr.FileManager(host_config, config, etcd_client, + FileBackendOrchestrator.fp_etcd_client, + node_id, backend_name) + + # Implementation of abstract function from base class + def _get_etcd_client(self, host_config): + # Reusing volume code for ETCD client + return util.HpeShareEtcdClient( + host_config.host_etcd_ip_address, + host_config.host_etcd_port_number, + host_config.host_etcd_client_cert, + host_config.host_etcd_client_key) + + def get_meta_data_by_name(self, name): + LOG.info("Fetching share details from ETCD: %s" % name) + share = self._etcd_client.get_share(name) + if share: + LOG.info("Returning share details: %s" % share) + return share + LOG.info("Share details not found in ETCD: %s" % name) + return None + + def share_exists(self, name): + try: + self._etcd_client.get_share(name) + except Exception: + return False + else: + return True + + def create_share(self, **kwargs): + name = kwargs['name'] + # Removing backend from share dictionary + # This needs to be put back when share is + # saved to the ETCD store + backend = kwargs.get('backend') + return self._execute_request_for_backend( + backend, 'create_share', name, **kwargs) + + def create_share_help(self, **kwargs): + LOG.info("Working on share help content generation...") + create_help_path = "./config/create_share_help.txt" + create_help_file = open(create_help_path, "r") + create_help_content = create_help_file.read() + create_help_file.close() + LOG.info(create_help_content) + return json.dumps({u"Err": create_help_content}) + + def get_backends_status(self, **kwargs): + LOG.info("Getting backend status...") + line = "=" * 54 + spaces = ' ' * 42 + resp = "\n%s\nNAME%sSTATUS\n%s\n" % (line, spaces, line) + + printable_len = 45 + for k, v in self._manager.items(): + backend_state = v['backend_state'] + padding = (printable_len - len(k)) * ' ' + resp += "%s%s %s\n" % (k, padding, backend_state) + return json.dumps({u'Err': resp}) + + def remove_object(self, obj): + share_name = obj['name'] + return self._execute_request('remove_share', share_name, obj) + + def mount_object(self, obj, mount_id): + share_name = obj['name'] + return self._execute_request('mount_share', share_name, + obj, mount_id) + + def unmount_object(self, obj, mount_id): + share_name = obj['name'] + return self._execute_request('unmount_share', share_name, + obj, mount_id) + + def get_object_details(self, obj): + share_name = obj['name'] + return self._execute_request('get_share_details', share_name, obj) + + def list_objects(self): + file_mgr = None + file_mgr_info = self._manager.get('DEFAULT') + if file_mgr_info: + file_mgr = file_mgr_info['mgr'] + else: + file_mgr_info = self._manager.get('DEFAULT_FILE') + if file_mgr_info: + file_mgr = file_mgr_info['mgr'] + + share_list = [] + db_shares = self._etcd_client.get_all_shares() + if file_mgr: + for db_share in db_shares: + share_info = file_mgr.get_share_info_for_listing( + db_share['name'], + db_share + ) + share_list.append(share_info) + return share_list + + def get_path(self, obj): + mount_dir = '' + if 'path_info' in obj: + share_name = obj['name'] + mount_dir = self._execute_request('get_mount_dir', share_name) + response = json.dumps({u"Err": '', u"Mountpoint": mount_dir}) + return response diff --git a/hpedockerplugin/file_manager.py b/hpedockerplugin/file_manager.py new file mode 100644 index 00000000..bd6899ad --- /dev/null +++ b/hpedockerplugin/file_manager.py @@ -0,0 +1,917 @@ +import copy +import json +import sh +import six +import os +from threading import Thread + +from oslo_log import log as logging +from oslo_utils import netutils + +from hpedockerplugin.cmd.cmd_claimavailableip import ClaimAvailableIPCmd +from hpedockerplugin.cmd.cmd_createfpg import CreateFpgCmd +from hpedockerplugin.cmd.cmd_createvfs import CreateVfsCmd + +from hpedockerplugin.cmd.cmd_initshare import InitializeShareCmd +from hpedockerplugin.cmd.cmd_createshare import CreateShareCmd +from hpedockerplugin.cmd import cmd_generate_fpg_vfs_names +from hpedockerplugin.cmd import cmd_setquota +from hpedockerplugin.cmd import cmd_deleteshare + +import hpedockerplugin.exception as exception +import hpedockerplugin.fileutil as fileutil +import hpedockerplugin.hpe.array_connection_params as acp +from hpedockerplugin.i18n import _ +from hpedockerplugin.hpe import hpe_3par_mediator +from hpedockerplugin import synchronization +from hpedockerplugin.hpe import utils + +LOG = logging.getLogger(__name__) + + +class FileManager(object): + def __init__(self, host_config, hpepluginconfig, etcd_util, + fp_etcd_client, node_id, backend_name): + self._host_config = host_config + self._hpepluginconfig = hpepluginconfig + + self._etcd = etcd_util + self._fp_etcd_client = fp_etcd_client + self._node_id = node_id + self._backend = backend_name + + self._initialize_configuration() + + self._pwd_decryptor = utils.PasswordDecryptor(backend_name, + self._etcd) + self._pwd_decryptor.decrypt_password(self.src_bkend_config) + + # TODO: When multiple backends come into picture, consider + # lazy initialization of individual driver + try: + LOG.info("Initializing 3PAR driver...") + self._primary_driver = self._initialize_driver( + host_config, self.src_bkend_config) + + self._hpeplugin_driver = self._primary_driver + LOG.info("Initialized 3PAR driver!") + except Exception as ex: + msg = "Failed to initialize 3PAR driver for array: %s!" \ + "Exception: %s"\ + % (self.src_bkend_config.hpe3par_api_url, + six.text_type(ex)) + LOG.info(msg) + raise exception.HPEPluginStartPluginException( + reason=msg) + + def get_backend(self): + return self._backend + + def get_mediator(self): + return self._hpeplugin_driver + + def get_file_etcd(self): + return self._fp_etcd_client + + def get_etcd(self): + return self._etcd + + def get_config(self): + return self.src_bkend_config + + def _initialize_configuration(self): + self.src_bkend_config = self._get_src_bkend_config() + def_fpg_size = self.src_bkend_config.hpe3par_default_fpg_size + if def_fpg_size: + if def_fpg_size < 1 or def_fpg_size > 64: + msg = "Configured hpe3par_default_fpg_size MUST be in the " \ + "range 1 and 64. Specified value is %s" % def_fpg_size + LOG.error(msg) + raise exception.InvalidInput(msg) + + def _get_src_bkend_config(self): + LOG.info("Getting source backend configuration...") + hpeconf = self._hpepluginconfig + config = acp.ArrayConnectionParams() + for key in hpeconf.keys(): + value = getattr(hpeconf, key) + config.__setattr__(key, value) + + LOG.info("Got source backend configuration!") + return config + + def _initialize_driver(self, host_config, src_config): + + mediator = self._create_mediator(host_config, src_config) + try: + mediator.do_setup(timeout=30) + # self.check_for_setup_error() + return mediator + except Exception as ex: + msg = (_('hpeplugin_driver do_setup failed, error is: %s'), + six.text_type(ex)) + LOG.error(msg) + raise exception.HPEPluginNotInitializedException(reason=msg) + + @staticmethod + def _create_mediator(host_config, config): + return hpe_3par_mediator.HPE3ParMediator(host_config, config) + + def create_share(self, share_name, **args): + share_args = copy.deepcopy(args) + # ====== TODO: Uncomment later =============== + thread = Thread(target=self._create_share, + args=(share_name, share_args)) + + # Process share creation on child thread + thread.start() + # ====== TODO: Uncomment later =============== + + # ======= TODO: Remove this later ======== + # import pdb + # pdb.set_trace() + # self._create_share(share_name, share_args) + # ======= TODO: Remove this later ======== + + # Return success + return json.dumps({"Err": ""}) + + def _get_existing_fpg(self, share_args): + cpg_name = share_args['cpg'] + fpg_name = share_args['fpg'] + + def _check_if_space_sufficient(backend_fpg=None): + LOG.info("Checking if FPG %s has enough capcity..." % fpg_name) + available_capacity = self._get_fpg_available_capacity(fpg_name, + backend_fpg) + share_size_in_gib = share_args['size'] / 1024 + if available_capacity < share_size_in_gib: + LOG.info("FPG %s doesn't have enough capcity..." % fpg_name) + raise exception.FpgCapacityInsufficient(fpg=fpg_name) + LOG.info("FPG %s has enough capacity" % fpg_name) + + try: + fpg_info = self._fp_etcd_client.get_fpg_metadata( + self._backend, + cpg_name, fpg_name + ) + _check_if_space_sufficient() + except exception.EtcdMetadataNotFound: + LOG.info("Specified FPG %s not found in ETCD. Checking " + "if this is a legacy FPG..." % fpg_name) + # Assume it's a legacy FPG, try to get details + leg_fpg = self._hpeplugin_driver.get_fpg(fpg_name) + LOG.info("FPG %s is a legacy FPG" % fpg_name) + + _check_if_space_sufficient(leg_fpg) + + # CPG passed can be different than actual CPG + # used for creating legacy FPG. Override default + # or supplied CPG + if cpg_name != leg_fpg['cpg']: + msg = ("ERROR: Invalid CPG %s specified as an option or " + "configured in hpe.conf that doesn't match the parent " + "CPG %s of the specified legacy FPG %s. Please " + "specify CPG as '-o cpg=%s'" % + (cpg_name, leg_fpg['cpg'], fpg_name, leg_fpg['cpg'])) + LOG.error(msg) + raise exception.InvalidInput(msg) + + # Get backend VFS information + vfs_info = self._hpeplugin_driver.get_vfs(fpg_name) + vfs_name = vfs_info['name'] + ip_info = vfs_info['IPInfo'][0] + netmask = ip_info['netmask'] + ip = ip_info['IPAddr'] + + fpg_info = { + 'ips': {netmask: [ip]}, + 'fpg': fpg_name, + 'vfs': vfs_name, + } + + fpg_data = {'fpg': fpg_info} + yield fpg_data + + if fpg_data['result'] != 'DONE': + LOG.error("Share could not be created on FPG %s" % fpg_name) + raise exception.ShareCreationFailed(share_args['cpg']) + + def _get_fpg_available_capacity(self, fpg_name, backend_fpg=None): + if not backend_fpg: + LOG.info("Getting FPG %s from backend..." % fpg_name) + backend_fpg = self._hpeplugin_driver.get_fpg(fpg_name) + LOG.info("%s" % six.text_type(backend_fpg)) + LOG.info("Getting all quotas for FPG %s..." % fpg_name) + quotas = self._hpeplugin_driver.get_quotas_for_fpg(fpg_name) + used_capacity_GiB = 0 + for quota in quotas['members']: + used_capacity_GiB += (quota['hardBlockMiB'] / 1024) + fpg_total_capacity_GiB = backend_fpg['availCapacityGiB'] + LOG.info("Total capacity of FPG %s: %s GiB" % + (fpg_name, fpg_total_capacity_GiB)) + LOG.info("Capacity used on FPG %s is %s GiB" % + (fpg_name, used_capacity_GiB)) + fpg_avail_capacity = fpg_total_capacity_GiB - used_capacity_GiB + LOG.info("Available capacity on FPG %s is %s GiB" % + (fpg_name, fpg_avail_capacity)) + return fpg_avail_capacity + + # If default FPG is full, it raises exception + # EtcdMaxSharesPerFpgLimitException + def _get_default_available_fpg(self, share_args): + LOG.info("Getting default available FPG...") + processing_done = False + for fpg_name in self._get_current_default_fpg_name(share_args): + try: + fpg_available_capacity = self._get_fpg_available_capacity( + fpg_name + ) + LOG.info("FPG available capacity in GiB: %s" % + fpg_available_capacity) + # Share size in MiB - convert it to GiB + share_size_in_gib = share_args['size'] / 1024 + + # Yield only those default FPGs that have enough available + # capacity to create the requested share + if fpg_available_capacity >= share_size_in_gib: + LOG.info("Found default FPG with enough available " + "capacity %s GiB to create share of size %s GiB" + % (fpg_available_capacity, share_size_in_gib)) + # Get backend VFS information + vfs_info = self._hpeplugin_driver.get_vfs(fpg_name) + vfs_name = vfs_info['name'] + ip_info = vfs_info['IPInfo'][0] + netmask = ip_info['netmask'] + ip = ip_info['IPAddr'] + + fpg_info = { + 'ips': {netmask: [ip]}, + 'fpg': fpg_name, + 'vfs': vfs_name, + } + fpg_data = {'fpg': fpg_info} + yield fpg_data + + if fpg_data['result'] == 'DONE': + LOG.info("Share creation done using FPG %s" % + fpg_name) + processing_done = True + break + else: + LOG.info("Share could not be created on FPG %s. " + "Finding another default FPG with enough " + "capacity to create share of size %s" + % (fpg_name, share_size_in_gib)) + continue + + except exception.FpgNotFound: + LOG.warning("FPG %s present in ETCD but not found on backend. " + "Looking for next FPG" % fpg_name) + continue + + # Default FPGs were there but none of them could satisfy the + # requirement of creating share. New FPG must be created + # hence raising exception to execute FPG creation flow + if not processing_done: + raise exception.EtcdDefaultFpgNotPresent(share_args['cpg']) + + # TODO:Imran: Backend metadata needs modification + # Instead of one FPG, we need FPG listz + # Backend metadata + # {'default_fpgs': { + # cpg1: [fpg1, fpg2], + # cpg2: [fpg3] + # } + def _get_current_default_fpg_name(self, share_args): + cpg_name = share_args['cpg'] + try: + LOG.info("Fetching metadata for backend %s..." % self._backend) + backend_metadata = self._fp_etcd_client.get_backend_metadata( + self._backend) + LOG.info("Backend metadata: %s" % backend_metadata) + default_fpgs = backend_metadata.get('default_fpgs') + if default_fpgs: + LOG.info("Checking if default FPG present for CPG %s..." % + cpg_name) + fpg_list = default_fpgs.get(cpg_name, []) + for default_fpg in fpg_list: + LOG.info("Default FPG %s found for CPG %s" % + (default_fpg, cpg_name)) + yield default_fpg + else: + LOG.info("Default FPG not found under backend %s for CPG %s" + % (self._backend, cpg_name)) + raise exception.EtcdDefaultFpgNotPresent(cpg=cpg_name) + except exception.EtcdMetadataNotFound: + LOG.info("Metadata not found for backend %s" % self._backend) + raise exception.EtcdDefaultFpgNotPresent(cpg=cpg_name) + + def _unexecute(self, undo_cmds): + for undo_cmd in reversed(undo_cmds): + undo_cmd.unexecute() + + def _generate_default_fpg_vfs_names(self, share_args): + # Default share creation - generate default names + cmd = cmd_generate_fpg_vfs_names.GenerateFpgVfsNamesCmd( + self._backend, share_args['cpg'], + self._fp_etcd_client + ) + LOG.info("_generate_default_fpg_vfs_names: Generating default " + "FPG VFS names") + fpg_name, vfs_name = cmd.execute() + LOG.info("_generate_default_fpg_vfs_names: Generated: %s, %s" + % (fpg_name, vfs_name)) + return fpg_name, vfs_name + + @staticmethod + def _vfs_name_from_fpg_name(share_args): + # Generate VFS name using specified FPG with "-o fpg" option + fpg_name = share_args['fpg'] + vfs_name = fpg_name + '_vfs' + LOG.info("Returning FPG and VFS names: %s, %s" % (fpg_name, vfs_name)) + return fpg_name, vfs_name + + def _create_fpg(self, share_args, undo_cmds): + LOG.info("Generating FPG and VFS names...") + cpg = share_args['cpg'] + fpg_name, vfs_name = self._vfs_name_from_fpg_name(share_args) + LOG.info("Names generated: FPG=%s, VFS=%s" % + (fpg_name, vfs_name)) + LOG.info("Creating FPG %s using CPG %s" % (fpg_name, cpg)) + create_fpg_cmd = CreateFpgCmd(self, cpg, fpg_name, False) + create_fpg_cmd.execute() + LOG.info("FPG %s created successfully using CPG %s" % + (fpg_name, cpg)) + undo_cmds.append(create_fpg_cmd) + return fpg_name, vfs_name + + def _create_default_fpg(self, share_args, undo_cmds): + LOG.info("Generating FPG and VFS names...") + cpg = share_args['cpg'] + while True: + fpg_name, vfs_name = self._generate_default_fpg_vfs_names( + share_args + ) + LOG.info("Names generated: FPG=%s, VFS=%s" % + (fpg_name, vfs_name)) + LOG.info("Creating FPG %s using CPG %s" % (fpg_name, cpg)) + try: + create_fpg_cmd = CreateFpgCmd(self, cpg, fpg_name, True) + create_fpg_cmd.execute() + LOG.info("FPG %s created successfully using CPG %s" % + (fpg_name, cpg)) + undo_cmds.append(create_fpg_cmd) + return fpg_name, vfs_name + # Only if duplicate FPG exists, we need to retry FPG creation + except exception.FpgAlreadyExists as ex: + LOG.info("FPG %s could not be created. Error: %s" % + (fpg_name, six.text_type(ex))) + LOG.info("Retrying with new FPG name...") + continue + # Any exception other than duplicate FPG, raise it and fail + # share creation process. We could have removed this except + # block altogether. Keeping it so that the intent is known + # explicitly to any reader of the code + except Exception as ex: + raise ex + + def _create_share_on_fpg(self, share_args, fpg_getter, + fpg_creator, undo_cmds): + share_name = share_args['name'] + LOG.info("Creating share %s..." % share_name) + cpg = share_args['cpg'] + + def __create_share_and_quota(): + LOG.info("Creating share %s..." % share_name) + create_share_cmd = CreateShareCmd( + self, + share_args + ) + create_share_cmd.execute() + LOG.info("Share created successfully %s" % share_name) + undo_cmds.append(create_share_cmd) + + LOG.info("Setting quota for share %s..." % share_name) + set_quota_cmd = cmd_setquota.SetQuotaCmd( + self, + share_args['cpg'], + share_args['fpg'], + share_args['vfs'], + share_args['name'], + share_args['size'] + ) + set_quota_cmd.execute() + LOG.info("Quota set for share successfully %s" % share_name) + undo_cmds.append(set_quota_cmd) + + with self._fp_etcd_client.get_cpg_lock(self._backend, cpg): + try: + init_share_cmd = InitializeShareCmd( + self._backend, share_args, self._etcd + ) + init_share_cmd.execute() + + fpg_gen = fpg_getter(share_args) + while True: + try: + fpg_data = next(fpg_gen) + fpg_info = fpg_data['fpg'] + share_args['fpg'] = fpg_info['fpg'] + share_args['vfs'] = fpg_info['vfs'] + + # Only one IP per FPG is supported at the moment + # Given that, list can be dropped + subnet_ips_map = fpg_info['ips'] + subnet, ips = next(iter(subnet_ips_map.items())) + share_args['vfsIPs'] = [(ips[0], subnet)] + + __create_share_and_quota() + + # Set result to success so that FPG generator can stop + fpg_data['result'] = 'DONE' + break + + except exception.SetQuotaFailed: + fpg_data['result'] = 'IN_PROCESS' + self._unexecute(undo_cmds) + undo_cmds.clear() + + except StopIteration: + # Let the generator take the call whether it wants to + # report failure or wants to create new default FPG + # for this share + fpg_data['result'] = 'FAILED' + undo_cmds.clear() + break + except (exception.EtcdMaxSharesPerFpgLimitException, + exception.EtcdMetadataNotFound, + exception.EtcdDefaultFpgNotPresent, + exception.FpgNotFound): + LOG.info("FPG not found under backend %s for CPG %s" + % (self._backend, cpg)) + # In all the above cases, default FPG is not present + # and we need to create a new one + try: + # Generate FPG and VFS names. This will also initialize + # backend meta-data in case it doesn't exist + fpg_name, vfs_name = fpg_creator(share_args, undo_cmds) + share_args['fpg'] = fpg_name + share_args['vfs'] = vfs_name + + LOG.info("Trying to claim free IP from IP pool for " + "backend %s..." % self._backend) + # Acquire IP even before FPG creation. This will save the + # time by not creating FPG in case IP pool is exhausted + claim_free_ip_cmd = ClaimAvailableIPCmd( + self._backend, + self.src_bkend_config, + self._fp_etcd_client, + self._hpeplugin_driver + ) + ip, netmask = claim_free_ip_cmd.execute() + LOG.info("Acquired IP %s for VFS creation" % ip) + undo_cmds.append(claim_free_ip_cmd) + + LOG.info("Creating VFS %s under FPG %s" % + (vfs_name, fpg_name)) + create_vfs_cmd = CreateVfsCmd( + self, cpg, fpg_name, vfs_name, ip, netmask + ) + create_vfs_cmd.execute() + LOG.info("VFS %s created successfully under FPG %s" % + (vfs_name, fpg_name)) + undo_cmds.append(create_vfs_cmd) + + LOG.info("Marking IP %s to be in use by VFS /%s/%s" + % (ip, fpg_name, vfs_name)) + # Now that VFS has been created successfully, move the IP + # from locked-ip-list to ips-in-use list + claim_free_ip_cmd.mark_ip_in_use() + share_args['vfsIPs'] = [(ip, netmask)] + + __create_share_and_quota() + + except (exception.IPAddressPoolExhausted, + exception.VfsCreationFailed, + exception.FpgCreationFailed, + exception.HPEDriverNonExistentCpg) as ex: + msg = "Share creation on new FPG failed. Reason: %s" \ + % six.text_type(ex) + raise exception.ShareCreationFailed(reason=msg) + + except Exception as ex: + msg = "Unknown exception caught. Reason: %s" \ + % six.text_type(ex) + raise exception.ShareCreationFailed(reason=msg) + + except (exception.FpgCapacityInsufficient, + exception.InvalidInput) as ex: + msg = "Share creation failed. Reason: %s" % six.text_type(ex) + raise exception.ShareCreationFailed(reason=msg) + + except Exception as ex: + msg = "Unknown exception occurred while creating share " \ + "on new FPG. Reason: %s" % six.text_type(ex) + raise exception.ShareCreationFailed(reason=msg) + + @synchronization.synchronized_fp_share('{share_name}') + def _create_share(self, share_name, share_args): + # Check if share already exists + try: + self._etcd.get_share(share_name) + return + except exception.EtcdMetadataNotFound: + pass + + # Make copy of args as we are going to modify it + fpg_name = share_args.get('fpg') + undo_cmds = [] + + try: + if fpg_name: + self._create_share_on_fpg( + share_args, + self._get_existing_fpg, + self._create_fpg, + undo_cmds + ) + else: + self._create_share_on_fpg( + share_args, + self._get_default_available_fpg, + self._create_default_fpg, + undo_cmds + ) + except exception.PluginException as ex: + LOG.error(ex.msg) + share_args['status'] = 'FAILED' + share_args['detailedStatus'] = ex.msg + self._etcd.save_share(share_args) + self._unexecute(undo_cmds) + + def remove_share(self, share_name, share): + if 'path_info' in share: + msg = "Cannot delete share %s as it is in mounted state" \ + % share_name + LOG.error(msg) + return json.dumps({'Err': msg}) + cmd = cmd_deleteshare.DeleteShareCmd(self, share) + return cmd.execute() + + @staticmethod + def _rm_implementation_details(db_share): + LOG.info("Removing implementation details from share %s..." + % db_share['name']) + db_share_copy = copy.deepcopy(db_share) + db_share_copy.pop("nfsOptions") + if 'quota_id' in db_share_copy: + db_share_copy.pop("quota_id") + db_share_copy.pop("id") + db_share_copy.pop("readonly") + db_share_copy.pop("comment") + if 'path_info' in db_share_copy: + db_share_copy.pop('path_info') + + LOG.info("Implementation details removed: %s" % db_share_copy) + return db_share_copy + + def get_share_details(self, share_name, db_share): + devicename = '' + if db_share['status'] == 'AVAILABLE': + share_path = self._get_share_path(db_share) + else: + share_path = None + + mountdir = '' + path_info = db_share.get('path_info') + if path_info: + mountdir = self.get_mount_dir(share_name) + + db_share_copy = FileManager._rm_implementation_details(db_share) + db_share_copy['sharePath'] = share_path + size_in_gib = "%d GiB" % (db_share_copy['size'] / 1024) + db_share_copy['size'] = size_in_gib + LOG.info("Returning share: %s" % db_share_copy) + # use volinfo as volname could be partial match + resp = {'Name': share_name, + 'Mountpoint': mountdir, + 'Devicename': devicename, + 'Status': db_share_copy} + response = json.dumps({u"Err": '', u"Volume": resp}) + LOG.debug("Get share: \n%s" % str(response)) + return response + + def get_share_info_for_listing(self, share_name, db_share): + path_info = db_share.get('path_info') + if path_info: + mount_dir = self.get_mount_dir(share_name) + else: + mount_dir = '' + + share_info = { + 'Name': share_name, + 'Mountpoint': mount_dir, + } + return share_info + + @staticmethod + def _is_share_not_mounted(share): + return 'node_mount_info' not in share + + def _is_share_mounted_on_this_node(self, node_mount_info): + return self._node_id in node_mount_info + + def _update_mount_id_list(self, share, mount_id): + node_mount_info = share['node_mount_info'] + + # Check if mount_id is unique + if mount_id in node_mount_info[self._node_id]: + LOG.info("Received duplicate mount-id: %s. Ignoring" + % mount_id) + return + + LOG.info("Adding new mount-id %s to node_mount_info..." + % mount_id) + node_mount_info[self._node_id].append(mount_id) + LOG.info("Updating etcd with modified node_mount_info: %s..." + % node_mount_info) + self._etcd.save_share(share) + LOG.info("Updated etcd with modified node_mount_info: %s!" + % node_mount_info) + + def get_mount_dir(self, share_name): + if self._host_config.mount_prefix: + mount_prefix = self._host_config.mount_prefix + else: + mount_prefix = None + mnt_prefix = fileutil.mkfile_dir_for_mounting(mount_prefix) + return "%s%s" % (mnt_prefix, share_name) + + def _create_mount_dir(self, mount_dir): + LOG.info('Creating Directory %(mount_dir)s...', + {'mount_dir': mount_dir}) + sh.mkdir('-p', mount_dir) + LOG.info('Directory: %(mount_dir)s successfully created!', + {'mount_dir': mount_dir}) + + def _get_share_path(self, db_share): + fpg = db_share['fpg'] + vfs = db_share['vfs'] + file_store = db_share['name'] + vfs_ip, netmask = db_share['vfsIPs'][0] + share_path = "%s:/%s/%s/%s" % (vfs_ip, + fpg, + vfs, + file_store) + return share_path + + def _internal_mount_share(self, share): + share_name = share['name'] + LOG.info("Performing internal mount for share %s..." % share_name) + mount_dir = self.get_mount_dir(share_name) + LOG.info("Mount directory for share is %s " % mount_dir) + share_path = self._get_share_path(share) + LOG.info("Share path is %s " % share_path) + my_ip = netutils.get_my_ipv4() + self._hpeplugin_driver.add_client_ip_for_share(share['id'], + my_ip) + self._create_mount_dir(mount_dir) + LOG.info("Mounting share path %s to %s" % (share_path, mount_dir)) + if utils.is_host_os_rhel(): + sh.mount('-o', 'context="system_u:object_r:nfs_t:s0"', + '-t', 'nfs', share_path, mount_dir) + else: + sh.mount('-t', 'nfs', share_path, mount_dir) + LOG.debug('Device: %(path)s successfully mounted on %(mount)s', + {'path': share_path, 'mount': mount_dir}) + + response = { + u"Name": share_name, + u"Mountpoint": mount_dir, + u"Devicename": share_path + } + return response + + def _internal_unmount_share(self, share): + share_name = share['name'] + mount_dir = self.get_mount_dir(share_name) + LOG.info('Unmounting share %s from mount-dir %s...' + % (share_name, mount_dir)) + sh.umount(mount_dir) + LOG.info('Removing mount dir from node %s: %s...' + % (mount_dir, self._node_id)) + sh.rm('-rf', mount_dir) + + # Remove my_ip from client-ip list this being last + # un-mount of share for this node + my_ip = netutils.get_my_ipv4() + LOG.info("Remove %s from client IP list" % my_ip) + self._hpeplugin_driver.remove_client_ip_for_share( + share['id'], my_ip) + + def mount_share(self, share_name, share, mount_id): + if 'status' in share: + if share['status'] == 'FAILED': + msg = "Share %s is in FAILED state. Please remove it and " \ + "create a new one and then retry mount" % share_name + LOG.error(msg) + return json.dumps({u"Err": msg}) + elif share['status'] == 'CREATING': + msg = "Share %s is in CREATING state. Please wait for it " \ + "to be in AVAILABLE state and then retry mount" \ + % share_name + LOG.error(msg) + return json.dumps({u"Err": msg}) + elif share['status'] == 'AVAILABLE': + msg = "Share %s is in AVAILABLE state. Attempting mount..." \ + % share_name + LOG.info(msg) + else: + msg = "ERROR: Share %s is in UNKNOWN state. Aborting " \ + "mount..." % share_name + LOG.error(msg) + return json.dumps({u"Err": msg}) + + fUser = None + fGroup = None + fMode = None + if share['fsOwner']: + fOwner = share['fsOwner'].split(':') + fUser = int(fOwner[0]) + fGroup = int(fOwner[1]) + if share['fsMode']: + try: + fMode = int(share['fsMode']) + except ValueError: + fMode = share['fsMode'] + share_path = self._get_share_path(share) + LOG.info("Share path: %s " % share_path) + + # 'path_info': { + # node_id1: ['mnt_id1', 'mnt_id2',...], + # node_id2: ['mnt_id3', 'mnt_id4',...], + # } + mount_dir = self.get_mount_dir(share_name) + LOG.info("Mount directory for file is %s " % mount_dir) + path_info = share.get('path_info') + + # ACLs need to be set only with the first mount + # For second mount onwards, path_info will be present in + # ETCD which will make acls_already_set set to True thereby + # avoiding redundant backend REST calls for check_user and + # set_ACL + acls_already_set = False + if path_info: + # Setting the flag to True would avoid backend REST calls + # to set_acl and check_user + acls_already_set = True + # Is the share mounted on this node? + mount_ids = path_info.get(self._node_id) + if mount_ids: + # Share is already mounted on this node + if mount_id not in mount_ids: + # Add mount_id information and return + mount_ids.append(mount_id) + # path_info got modified. Save it to ETCD + self._etcd.save_share(share) + response = json.dumps({ + u"Err": '', + u"Name": share_name, + u"Mountpoint": mount_dir, + u"Devicename": share_path + }) + return response + + # Either this is the first mount of this share on this node + # Or it was mounted on a different node and now it's being + # mounted on this node. Add host IP to Client IP list, create + # mount directory, apply permissions and mount file share + fUName = None + fGName = None + user_grp_perm = fUser or fGroup or fMode + if user_grp_perm and not acls_already_set: + LOG.info("Inside fUser or fGroup or fMode") + fUName, fGName = self._hpeplugin_driver.usr_check(fUser, + fGroup) + if fUName is None or fGName is None: + msg = ("Either user or group does not exist on 3PAR." + " Please create local users and group with" + " required user id and group id on 3PAR." + " Refer 3PAR cli user guide to create 3PAR" + " local users on 3PAR") + LOG.error(msg) + response = json.dumps({u"Err": msg, u"Name": share_name, + u"Mountpoint": mount_dir, + u"Devicename": share_path}) + return response + + my_ip = netutils.get_my_ipv4() + self._hpeplugin_driver.add_client_ip_for_share(share['id'], + my_ip) + client_ips = share['clientIPs'] + client_ips.append(my_ip) + + if path_info: + path_info[self._node_id] = [mount_id] + else: + # node_mnt_info not present + share['path_info'] = { + self._node_id: [mount_id] + } + + self._create_mount_dir(mount_dir) + LOG.info("Mounting share path %s to %s" % (share_path, mount_dir)) + if utils.is_host_os_rhel(): + sh.mount('-o', 'context="system_u:object_r:nfs_t:s0"', + '-t', 'nfs', share_path, mount_dir) + else: + sh.mount('-t', 'nfs', share_path, mount_dir) + LOG.debug('Device: %(path)s successfully mounted on %(mount)s', + {'path': share_path, 'mount': mount_dir}) + + if user_grp_perm and not acls_already_set: + os.chown(mount_dir, fUser, fGroup) + try: + if fMode is not None: + int(fMode) + sh.chmod(fMode, mount_dir) + except ValueError: + fUserId = share['id'] + try: + self._hpeplugin_driver.set_ACL(fMode, fUserId, fUName, + fGName) + except exception.ShareBackendException as ex: + msg = (_("Exception raised for ACL setting," + " but proceed. User is adviced to correct" + " the passed fsMode to suit its owner and" + " group requirement. Delete the share and " + " create new with correct fsMode value." + " Please also refer the logs for same. " + "Exception is %s") % six.text_type(ex)) + LOG.error(msg) + LOG.info("Unmounting the share,permissions are not set.") + sh.umount(mount_dir) + LOG.info("Removing the created directory.") + sh.rm('-rf', mount_dir) + LOG.error(msg) + response = json.dumps({u"Err": msg, u"Name": share_name, + u"Mountpoint": mount_dir, + u"Devicename": share_path}) + return response + self._etcd.save_share(share) + response = json.dumps({u"Err": '', u"Name": share_name, + u"Mountpoint": mount_dir, + u"Devicename": share_path}) + return response + + def unmount_share(self, share_name, share, mount_id): + # Start of volume fencing + LOG.info('Unmounting share: %s' % share) + + # 'path_info': { + # node_id1: ['mnt_id1', 'mnt_id2',...], + # node_id2: ['mnt_id3', 'mnt_id4',...], + # } + path_info = share.get('path_info') + if path_info: + mount_ids = path_info.get(self._node_id) + if mount_ids and mount_id in mount_ids: + LOG.info("Removing mount-id '%s' from mount-id-list..." + % mount_id) + mount_ids.remove(mount_id) + if not mount_ids: + # This is last un-mount being done on this node + del path_info[self._node_id] + mount_dir = self.get_mount_dir(share_name) + LOG.info('Unmounting share %s from mount-dir %s...' + % (share_name, mount_dir)) + sh.umount(mount_dir) + LOG.info('Removing mount dir from node %s: %s...' + % (mount_dir, self._node_id)) + sh.rm('-rf', mount_dir) + + # Remove my_ip from client-ip list this being last + # un-mount of share for this node + my_ip = netutils.get_my_ipv4() + LOG.info("Remove %s from client IP list" % my_ip) + client_ips = share['clientIPs'] + client_ips.remove(my_ip) + self._hpeplugin_driver.remove_client_ip_for_share( + share['id'], my_ip) + + # If no mount remains, delete path_info from share + if not path_info: + del share['path_info'] + + self._etcd.save_share(share) + LOG.info('Unmount completed for share: %s, %s' % + (share_name, mount_id)) + else: + LOG.error("ERROR: Mount-ID %s not found in ETCD for node %s" + % (mount_id, self._node_id)) + else: + LOG.error("ERROR: Meta-data indicates the share %s is not " + "mounted on any node" % share_name) + response = json.dumps({u"Err": ''}) + return response diff --git a/hpedockerplugin/fileutil.py b/hpedockerplugin/fileutil.py index 7e288952..c819b5ab 100644 --- a/hpedockerplugin/fileutil.py +++ b/hpedockerplugin/fileutil.py @@ -17,7 +17,9 @@ from sh import mkdir from sh import mount from sh import umount +from sh import grep import subprocess +import os from sh import rm from oslo_log import log as logging from hpedockerplugin.i18n import _, _LI @@ -70,7 +72,16 @@ def create_filesystem(path): return True -def mkdir_for_mounting(path): +def mkfile_dir_for_mounting(mount_prefix): + if mount_prefix: + global prefix + prefix = mount_prefix + return prefix + else: + return prefix + + +def mkdir_for_mounting(path, mount_prefix): try: data = path.split("/") # TODO: Investigate what triggers OS Brick to return a @@ -79,6 +90,13 @@ def mkdir_for_mounting(path): uuid = data[3] else: uuid = data[4] + + if mount_prefix: + global prefix + prefix = mount_prefix + + LOG.info('MOUNT PREFIX : %s' % prefix) + directory = prefix + uuid mkdir("-p", directory) except Exception as ex: @@ -92,12 +110,20 @@ def mount_dir(src, tgt): try: mount("-t", "ext4", src, tgt) except Exception as ex: - msg = (_('exception is : %s'), six.text_type(ex)) - LOG.error(msg) - raise exception.HPEPluginMountException(reason=msg) + msg = _('exception is : %s' % six.text_type(ex)) + if 'already mounted' in msg: + LOG.info('%s is already in mounted on %s' % (src, tgt)) + pass + else: + LOG.error(msg) + raise exception.HPEPluginMountException(reason=msg) return True +def check_if_file_exists(path): + return os.path.exists(path) + + def umount_dir(tgt): # For some reason sh.mountpoint does not work, so # using subprocess instead. @@ -138,3 +164,37 @@ def remove_file(tgt): LOG.error(msg) raise exception.HPEPluginRemoveDirException(reason=msg) return True + + +def check_if_mounted(src, tgt): + try: + # List all mounts with "mount -l". + # Then grep the list for the source and the target of the mount + # using regular expression with the paths. + # _ok_code=[0,1] is used because grep returns an ErrorCode_1 + # if it cannot find any matches on the pattern. + mapper_entry = find_mapper_entry(src) + mountpoint = grep( + grep(mount("-l"), "-E", mapper_entry, _ok_code=[0, 1]), + "-E", tgt, _ok_code=[0, 1] + ) + except Exception as ex: + msg = (_('exception is : %s'), six.text_type(ex)) + LOG.error(msg) + raise exception.HPEPluginCheckMountException(reason=msg) + # If there is no line matching the criteria from above then the + # mount is not present, return False. + if not mountpoint: + return False + else: + return True + + +def find_mapper_entry(src): + path = '/dev/mapper/' + for file in os.listdir(path): + print('real: %s , src %s' % (os.path.realpath(path + file), src)) + if os.path.realpath(path + file) == src: + return path + file + # In worst case return src + return src diff --git a/hpedockerplugin/hpe/array_connection_params.py b/hpedockerplugin/hpe/array_connection_params.py index ab329940..cb817d7a 100644 --- a/hpedockerplugin/hpe/array_connection_params.py +++ b/hpedockerplugin/hpe/array_connection_params.py @@ -15,3 +15,6 @@ def __getattr__(self, key): object.__getattribute__(self, key) except AttributeError: return None + + def is_param_present(self, param): + return param in dir(self) diff --git a/hpedockerplugin/hpe/hpe3par_opts.py b/hpedockerplugin/hpe/hpe3par_opts.py index 62422b8e..fdddb668 100644 --- a/hpedockerplugin/hpe/hpe3par_opts.py +++ b/hpedockerplugin/hpe/hpe3par_opts.py @@ -1,4 +1,5 @@ from oslo_config import cfg +from hpedockerplugin.hpe import vfs_ip_pool as ip_pool hpe3par_opts = [ @@ -17,8 +18,8 @@ secret=True, deprecated_name='hp3par_password'), cfg.ListOpt('hpe3par_cpg', - default=["OpenStack"], - help="List of the CPG(s) to use for volume creation", + default=[], + help="List of the CPG(s) to use for volume/share creation", deprecated_name='hp3par_cpg'), cfg.ListOpt('hpe3par_snapcpg', default=[], @@ -48,6 +49,12 @@ "standard dict config form: replication_device = " "target_device_id:," "key1:value1,key2:value2..."), + cfg.IntOpt('hpe3par_default_fpg_size', + default=16, + help='FPG size in TiB'), + cfg.MultiOpt('hpe3par_server_ip_pool', + item_type=ip_pool.VfsIpPool(), + help='Target server IP pool'), ] san_opts = [ diff --git a/hpedockerplugin/hpe/hpe_3par_common.py b/hpedockerplugin/hpe/hpe_3par_common.py index b7cb4371..faab093d 100644 --- a/hpedockerplugin/hpe/hpe_3par_common.py +++ b/hpedockerplugin/hpe/hpe_3par_common.py @@ -20,7 +20,6 @@ from oslo_utils import importutils from oslo_config import cfg from oslo_log import log as logging -from oslo_service import loopingcall from oslo_utils import units from hpedockerplugin import exception @@ -167,6 +166,7 @@ def client_login(self): raise exception.InvalidInput(reason=msg) known_hosts_file = self._host_config.ssh_hosts_key_file + policy = "AutoAddPolicy" if self._host_config.strict_ssh_host_key_policy: policy = "RejectPolicy" @@ -206,8 +206,8 @@ def check_for_setup_error(self): {"common_ver": self.VERSION, "rest_ver": hpe3parclient.get_version_string()}) - self.client_login() try: + self.client_login() cpg_names = self.src_bkend_config.hpe3par_cpg for cpg_name in cpg_names: self.validate_cpg(cpg_name) @@ -311,7 +311,8 @@ def get_qos_detail(self, vvset): msg = _("Failed to get qos from VV set %s - %s.") %\ (vvset, ex) LOG.error(msg) - raise exception.HPEDriverGetQosFromVvSetFailed(ex) + raise exception.HPEDriverGetQosFromVvSetFailed(vvset_name=vvset, + reason=ex) def get_vvset_detail(self, vvset): return self.client.getVolumeSet(vvset) @@ -339,8 +340,8 @@ def manage_existing(self, volume, existing_ref_details, is_snap=False, LOG.info(msg) pass else: - msg = _("Managing volume %s failed because its attached.") %\ - (existing_ref) + msg = "Managing volume %s failed because it is attached." % \ + existing_ref LOG.error(msg) raise exception.HPEDriverManageVolumeAttached(reason=msg) @@ -647,7 +648,8 @@ def _check_license_enabled(self, valid_licenses, license_to_check, 'valid_licenses': valid_licenses}) if valid_licenses: for license in valid_licenses: - if license_to_check in license.get('name'): + if license_to_check in license.get('name') or \ + 'Golden License' in license.get('name'): return True LOG.debug(("'%(capability)s' requires a '%(license)s' " "license which is not installed.") % @@ -826,8 +828,8 @@ def create_volume(self, volume): extras['snapCPG'] = cpg volume['snap_cpg'] = cpg - # Only set the dedup option if the backend supports it. - if self.API_VERSION >= DEDUP_API_VERSION: + # Only set the dedup option if the backend supports it. + if self.API_VERSION >= DEDUP_API_VERSION and tdvv: extras['tdvv'] = tdvv capacity = self._capacity_from_size(volume['size']) @@ -846,9 +848,9 @@ def create_volume(self, volume): extras['compression'] = compression else: err = (_("To create compression enabled volume, size of " - "the volume should be atleast 16GB. Fully " - "provisioned volume can not be compressed. " - "Please re enter requested volume size or " + "the volume should be at least 16GB. Fully " + "provisioned volume cannot be compressed. " + "Please re-enter requested volume size or " "provisioning type. ")) # LOG.error(err) raise exception.HPEDriverInvalidSizeForCompressedVolume( @@ -870,6 +872,27 @@ def create_volume(self, volume): message=msg) except hpeexceptions.HTTPBadRequest as ex: # LOG.error("Exception: %s", ex) + msg = "For compressed and deduplicated volumes both " \ + "'compression' and '%s' must be specified as true" + if (msg % 'tdvv') in ex.get_description(): + # Replace tdvv with dedup + msg = "For deduplicated and compressed volume, " \ + "provisioning must be specified as 'dedup' " \ + "and 'compression' must be specified as true" + raise exception.HPEDriverInvalidInput(reason=msg) + msg = "Either tpvv must be true OR for compressed and " \ + "deduplicated volumes both 'compression' and 'tdvv' " \ + "must be specified as true" + if msg in ex.get_description(): + msg = "For thin volume, 'provisioning' must be specified " \ + "as 'thin'. And for deduplicated and compressed " \ + "volume, 'provisioning' must be specified as 'dedup' " \ + "and 'compression' must be specified as true. " \ + "If any of " \ + "these conditions for a given type of volume " \ + "is not met volume creation will fail." + raise exception.HPEDriverInvalidInput(reason=msg) + raise exception.HPEDriverInvalidInput(reason=ex.get_description()) # except exception.InvalidInput as ex: # LOG.error("Exception: %s", ex) @@ -887,7 +910,7 @@ def delete_volume(self, volume, is_snapshot=False): if volume.get('rcg_info'): # this is replicated volume self._do_volume_replication_destroy(volume) - LOG.info("Deletion of replicated volume:%s successfull" + LOG.info("Deletion of replicated volume:%s successful" % volume) return @@ -962,7 +985,7 @@ def _do_volume_replication_destroy(self, volume): # TODO(sonivi): avoid volume deletion incase of failover # avoid volume deletion incase of switchover rcg_info = self.client.getRemoteCopyGroup(rcg_name) - if rcg_info.get('role') != 1: + if rcg_info.get('role') != self.ROLE_PRIMARY: # it's not primary msg = (_("Failed to delete volume: %(vol)s as rcg: %(rcg)s do" " not have valid role") % { @@ -990,7 +1013,8 @@ def _do_volume_replication_destroy(self, volume): LOG.info("vol:%(vol_name)s succesfully removed from RCG: " "%(rcg_name)s.", {'vol_name': vol_name, 'rcg_name': rcg_name}) - except Exception: + except Exception as ex: + LOG.error("%s" % six.iteritems(ex)) pass # Delete volume @@ -1019,10 +1043,10 @@ def _do_volume_replication_destroy(self, volume): # if other volumes are present, then start rcg LOG.info("Other Volumes are present in RCG:%(rcg_info)s", {'rcg_info': rcg_info}) - LOG.info("Starting RCG:%(rcg_name)s.", {'rcg_info': rcg_name}) + LOG.info("Starting RCG:%(rcg_name)s.", {'rcg_name': rcg_name}) self.client.startRemoteCopy(rcg_name) - LOG.info("Successfully started RCG:%(rcg_info)s.", - {'rcg_info': rcg_info}) + LOG.info("Successfully started RCG:%(rcg_name)s.", + {'rcg_name': rcg_name}) def _get_3par_hostname_from_wwn_iqn(self, wwns, iqns): if wwns is not None and not isinstance(wwns, list): @@ -1265,9 +1289,9 @@ def create_snapshot(self, snapshot): except hpeexceptions.HTTPForbidden as ex: LOG.error("Exception: %s", ex) raise exception.NotAuthorized() - except hpeexceptions.HTTPNotFound as ex: + except Exception as ex: LOG.error("Exception: %s", ex) - raise exception.NotFound() + raise exception.PluginException(ex) def create_cloned_volume(self, dst_volume, src_vref): LOG.info("Create clone of volume\n%s", json.dumps(src_vref, indent=2)) @@ -1333,21 +1357,9 @@ def create_cloned_volume(self, dst_volume, src_vref): self.create_volume(dst_volume) optional = {'priority': 1} - body = self.client.copyVolume(src_3par_vol_name, - dst_3par_vol_name, None, - optional=optional) - task_id = body['taskid'] - - task_status = self._wait_for_task_completion(task_id) - if task_status['status'] is not self.client.TASK_DONE: - dbg = {'status': task_status, 'id': dst_volume['id']} - msg = _('copy volume task failed: create_cloned_volume ' - 'id=%(id)s, status=%(status)s.') % dbg - LOG.error(msg) - raise exception.PluginException(msg) - else: - LOG.debug('Copy volume completed: create_cloned_volume: ' - 'id=%s.', dst_volume['id']) + self.client.copyVolume(src_3par_vol_name, + dst_3par_vol_name, None, + optional=optional) comments = {'volume_id': dst_volume['id'], 'name': dst_volume['name'], @@ -1377,7 +1389,7 @@ def _copy_volume(self, src_name, dest_name, cpg, snap_cpg=None, if snap_cpg is not None: optional['snapCPG'] = snap_cpg - if self.API_VERSION >= DEDUP_API_VERSION: + if self.API_VERSION >= DEDUP_API_VERSION and tdvv: optional['tdvv'] = tdvv if (compression is not None and @@ -1387,28 +1399,6 @@ def _copy_volume(self, src_name, dest_name, cpg, snap_cpg=None, body = self.client.copyVolume(src_name, dest_name, cpg, optional) return body['taskid'] - def _wait_for_task_completion(self, task_id): - """This waits for a 3PAR background task complete or fail. - - This looks for a task to get out of the 'active' state. - """ - # Wait for the physical copy task to complete - def _wait_for_task(task_id): - status = self.client.getTask(task_id) - LOG.debug("3PAR Task id %(id)s status = %(status)s", - {'id': task_id, - 'status': status['status']}) - if status['status'] is not self.client.TASK_ACTIVE: - self._task_status = status - raise loopingcall.LoopingCallDone() - - self._task_status = None - timer = loopingcall.FixedIntervalLoopingCall( - _wait_for_task, task_id) - timer.start(interval=1).wait() - - return self._task_status - def get_snapshots_by_vol(self, vol_id, snap_cpg): bkend_vol_name = utils.get_3par_vol_name(vol_id) LOG.debug("Querying snapshots for %s in %s cpg " @@ -1487,7 +1477,7 @@ def create_rcg(self, **kwargs): src_config = self.src_bkend_config tgt_config = self.tgt_bkend_config bkend_replication_mode = self._get_backend_replication_mode( - src_config.replication_mode) + tgt_config.replication_mode) cpg = tgt_config.hpe3par_cpg if isinstance(cpg, list): @@ -1587,3 +1577,6 @@ def delete_rcg(self, **kwargs): (rcg_name, six.text_type(ex))) LOG.error(msg) raise exception.HPERemoteCopyGroupBackendAPIException(data=msg) + + def is_vol_having_active_task(self, vol_name): + return self.client.isOnlinePhysicalCopy(vol_name) diff --git a/hpedockerplugin/hpe/hpe_3par_fc.py b/hpedockerplugin/hpe/hpe_3par_fc.py index be51daef..a81e88f6 100644 --- a/hpedockerplugin/hpe/hpe_3par_fc.py +++ b/hpedockerplugin/hpe/hpe_3par_fc.py @@ -550,3 +550,17 @@ def get_rcg(self, rcg_name): return common.get_rcg(rcg_name) finally: self._logout(common) + + def is_vol_having_active_task(self, vol_name): + common = self._login() + try: + return common.is_vol_having_active_task(vol_name) + finally: + self._logout(common) + + def get_domain(self, cpg_name): + common = self._login() + try: + return common.get_domain(cpg_name) + finally: + self._logout(common) diff --git a/hpedockerplugin/hpe/hpe_3par_iscsi.py b/hpedockerplugin/hpe/hpe_3par_iscsi.py index 9547ac5f..18b399bf 100644 --- a/hpedockerplugin/hpe/hpe_3par_iscsi.py +++ b/hpedockerplugin/hpe/hpe_3par_iscsi.py @@ -98,8 +98,8 @@ def do_setup(self, timeout): self._check_flags(common) common.check_for_setup_error() - common.client_login() try: + common.client_login() self.initialize_iscsi_ports(common) finally: self._logout(common) @@ -768,3 +768,17 @@ def get_rcg(self, rcg_name): return common.get_rcg(rcg_name) finally: self._logout(common) + + def is_vol_having_active_task(self, vol_name): + common = self._login() + try: + return common.is_vol_having_active_task(vol_name) + finally: + self._logout(common) + + def get_domain(self, cpg_name): + common = self._login() + try: + return common.get_domain(cpg_name) + finally: + self._logout(common) diff --git a/hpedockerplugin/hpe/hpe_3par_mediator.py b/hpedockerplugin/hpe/hpe_3par_mediator.py new file mode 100644 index 00000000..53ea1dd5 --- /dev/null +++ b/hpedockerplugin/hpe/hpe_3par_mediator.py @@ -0,0 +1,711 @@ +# Copyright 2015 Hewlett Packard Enterprise Development LP +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""HPE 3PAR Mediator for OpenStack Manila. +This 'mediator' de-couples the 3PAR focused client from the OpenStack focused +driver. +""" +import six + +from oslo_log import log +from oslo_service import loopingcall +from oslo_utils import importutils + +from hpedockerplugin import exception +from hpedockerplugin.i18n import _ + +hpe3parclient = importutils.try_import("hpe3parclient") +if hpe3parclient: + from hpe3parclient import file_client + from hpe3parclient import exceptions as hpeexceptions + +LOG = log.getLogger(__name__) +MIN_CLIENT_VERSION = (4, 0, 0) + +BAD_REQUEST = '400' +OTHER_FAILURE_REASON = 29 +NON_EXISTENT_CPG = 15 +INV_INPUT_ILLEGAL_CHAR = 69 +TASK_STATUS_NORMAL = 1 + +# Overriding these class variable so that minimum supported version is 3.3.1 +file_client.HPE3ParFilePersonaClient.HPE3PAR_WS_MIN_BUILD_VERSION = 30301460 +file_client.HPE3ParFilePersonaClient.HPE3PAR_WS_MIN_BUILD_VERSION_DESC = \ + '3.3.1 (MU3)' + + +class HPE3ParMediator(object): + """3PAR client-facing code for the 3PAR driver. + Version history: + 1.0.0 - Begin Liberty development (post-Kilo) + 1.0.1 - Report thin/dedup/hp_flash_cache capabilities + 1.0.2 - Add share server/share network support + 1.0.3 - Use hp3par prefix for share types and capabilities + 2.0.0 - Rebranded HP to HPE + 2.0.1 - Add access_level (e.g. read-only support) + 2.0.2 - Add extend/shrink + 2.0.3 - Fix SMB read-only access (added in 2.0.1) + 2.0.4 - Remove file tree on delete when using nested shares #1538800 + 2.0.5 - Reduce the fsquota by share size + when a share is deleted #1582931 + 2.0.6 - Read-write share from snapshot (using driver mount and copy) + 2.0.7 - Add update_access support + 2.0.8 - Multi pools support per backend + 2.0.9 - Fix get_vfs() to correctly validate conf IP addresses at + boot up #1621016 + """ + + VERSION = "2.0.9" + + def __init__(self, host_config, config): + self._host_config = host_config + self._config = config + self._client = None + self.client_version = None + + @staticmethod + def no_client(): + return hpe3parclient is None + + def _create_client(self): + return file_client.HPE3ParFilePersonaClient( + self._config.hpe3par_api_url) + + def do_setup(self, timeout=30): + + if self.no_client(): + msg = _('You must install hpe3parclient before using the 3PAR ' + 'driver. Run "pip install --upgrade python-3parclient" ' + 'to upgrade the hpe3parclient.') + LOG.error(msg) + raise exception.HPE3ParInvalidClient(message=msg) + + self.client_version = hpe3parclient.version_tuple + if self.client_version < MIN_CLIENT_VERSION: + msg = (_('Invalid hpe3parclient version found (%(found)s). ' + 'Version %(minimum)s or greater required. Run "pip' + ' install --upgrade python-3parclient" to upgrade' + ' the hpe3parclient.') % + {'found': '.'.join(map(six.text_type, self.client_version)), + 'minimum': '.'.join(map(six.text_type, + MIN_CLIENT_VERSION))}) + LOG.error(msg) + raise exception.HPE3ParInvalidClient(message=msg) + + try: + self._client = self._create_client() + except Exception as e: + msg = (_('Failed to connect to HPE 3PAR File Persona Client: %s') % + six.text_type(e)) + LOG.exception(msg) + raise exception.ShareBackendException(message=msg) + + try: + ssh_kwargs = {} + if self._config.san_ssh_port: + ssh_kwargs['port'] = self._config.san_ssh_port + if self._config.ssh_conn_timeout: + ssh_kwargs['conn_timeout'] = self._config.ssh_conn_timeout + if self._config.san_private_key: + ssh_kwargs['privatekey'] = \ + self._config.san_private_key + + self._client.setSSHOptions( + self._config.san_ip, + self._config.san_login, + self._config.san_password, + **ssh_kwargs + ) + + except Exception as e: + msg = (_('Failed to set SSH options for HPE 3PAR File Persona ' + 'Client: %s') % six.text_type(e)) + LOG.exception(msg) + raise exception.ShareBackendException(message=msg) + + LOG.info("HPE3ParMediator %(version)s, " + "hpe3parclient %(client_version)s", + {"version": self.VERSION, + "client_version": hpe3parclient.get_version_string()}) + + try: + wsapi_version = self._client.getWsApiVersion()['build'] + LOG.info("3PAR WSAPI %s", wsapi_version) + except Exception as e: + msg = (_('Failed to get 3PAR WSAPI version: %s') % + six.text_type(e)) + LOG.exception(msg) + raise exception.ShareBackendException(message=msg) + + if self._config.hpe3par_debug: + self._client.debug_rest(True) # Includes SSH debug (setSSH above) + + def _wsapi_login(self): + try: + self._client.login(self._config.hpe3par_username, + self._config.hpe3par_password) + except Exception as e: + msg = (_("Failed to Login to 3PAR (%(url)s) as %(user)s " + "because: %(err)s") % + {'url': self._config.hpe3par_api_url, + 'user': self._config.hpe3par_username, + 'err': six.text_type(e)}) + LOG.error(msg) + raise exception.ShareBackendException(msg=msg) + + def _wsapi_logout(self): + try: + self._client.http.unauthenticate() + except Exception as e: + msg = ("Failed to Logout from 3PAR (%(url)s) because %(err)s") + LOG.warning(msg, {'url': self._config.hpe3par_api_url, + 'err': six.text_type(e)}) + # don't raise exception on logout() + + def get_fpgs(self, filter): + try: + self._wsapi_login() + uri = '/fpgs?query="name EQ %s"' % filter + resp, body = self._client.http.get(uri) + return body['members'][0] + finally: + self._wsapi_logout() + + def get_fpg(self, fpg_name): + try: + self._wsapi_login() + uri = '/fpgs?query="name EQ %s"' % fpg_name + resp, body = self._client.http.get(uri) + if not body['members']: + LOG.info("FPG %s not found" % fpg_name) + raise exception.FpgNotFound(fpg=fpg_name) + return body['members'][0] + finally: + self._wsapi_logout() + + def get_vfs(self, fpg_name): + try: + self._wsapi_login() + uri = '/virtualfileservers?query="fpg EQ %s"' % fpg_name + resp, body = self._client.http.get(uri) + if not body['members']: + msg = "VFS for FPG %s not found" % fpg_name + LOG.info(msg) + raise exception.ShareBackendException(msg=msg) + return body['members'][0] + finally: + self._wsapi_logout() + + def get_all_vfs(self): + try: + self._wsapi_login() + uri = '/virtualfileservers' + resp, body = self._client.http.get(uri) + return body['members'] + finally: + self._wsapi_logout() + + @staticmethod + def _get_nfs_options(proto_opts, readonly): + """Validate the NFS extra_specs and return the options to use.""" + + nfs_options = proto_opts + if nfs_options: + options = nfs_options.split(',') + else: + options = [] + + # rw, ro, and (no)root_squash (in)secure options are not allowed in + # extra_specs because they will be forcibly set below. + # no_subtree_check and fsid are not allowed per 3PAR support. + # Other strings will be allowed to be sent to the 3PAR which will do + # further validation. + options_not_allowed = ['ro', 'rw', + 'no_root_squash', 'root_squash', + 'secure', 'insecure', + 'no_subtree_check', 'fsid'] + + invalid_options = [ + option for option in options if option in options_not_allowed + ] + + if invalid_options: + raise exception.InvalidInput(_('Invalid hp3par:nfs_options or ' + 'hpe3par:nfs_options in ' + 'extra-specs. The following ' + 'options are not allowed: %s') % + invalid_options) + + options.append('ro' if readonly else 'rw') + options.append('no_root_squash') + # options.append('insecure') + options.append('secure') + + return ','.join(options) + + def delete_file_store(self, fpg_name, fstore_name): + try: + self._wsapi_login() + query = '/filestores?query="name EQ %s AND fpg EQ %s"' %\ + (fstore_name, fpg_name) + body, fstore = self._client.http.get(query) + if body['status'] == '200' and fstore['total'] == 1: + fstore_id = fstore['members'][0]['id'] + del_uri = '/filestores/%s' % fstore_id + self._client.http.delete(del_uri) + except Exception: + msg = (_('ERROR: File store deletion failed: [fstore: %s,' + 'fpg:%s') % (fstore_name, fpg_name)) + LOG.error(msg) + raise exception.ShareBackendException(msg=msg) + finally: + self._wsapi_logout() + + def delete_fpg(self, fpg_name): + try: + self._wsapi_login() + query = '/fpgs?query="name EQ %s"' % fpg_name + resp, body = self._client.http.get(query) + if resp['status'] == '200' and body['total'] == 1: + fpg_id = body['members'][0]['id'] + del_uri = '/fpgs/%s' % fpg_id + resp, body = self._client.http.delete(del_uri) + if resp['status'] == '202': + task_id = body['taskId'] + self._wait_for_task_completion(task_id, 10) + except Exception: + msg = (_('ERROR: FPG deletion failed: [fpg: %s,') % fpg_name) + LOG.error(msg) + raise exception.ShareBackendException(msg=msg) + finally: + self._wsapi_logout() + + def update_capacity_quotas(self, fstore, size, fpg, vfs): + + def _sync_update_capacity_quotas(fstore, new_size, fpg, vfs): + """Update 3PAR quotas and return setfsquota output.""" + + hcapacity = new_size + scapacity = hcapacity + uri = '/filepersonaquotas/' + req_body = { + 'name': fstore, + 'type': 3, + 'vfs': vfs, + 'fpg': fpg, + 'softBlockMiB': scapacity, + 'hardBlockMiB': hcapacity + } + return self._client.http.post(uri, body=req_body) + + try: + self._wsapi_login() + resp, body = _sync_update_capacity_quotas( + fstore, size, fpg, vfs) + if resp['status'] != '201': + msg = (_('Failed to update capacity quota ' + '%(size)s on %(fstore)s') % + {'size': size, + 'fstore': fstore}) + LOG.error(msg) + raise exception.ShareBackendException(msg=msg) + + href = body['links'][0]['href'] + uri, quota_id = href.split('filepersonaquotas/') + + LOG.debug("Quota successfully set: resp=%s, body=%s" + % (resp, body)) + return quota_id + except Exception as e: + msg = (_('Failed to update capacity quota ' + '%(size)s on %(fstore)s with exception: %(e)s') % + {'size': size, + 'fstore': fstore, + 'e': six.text_type(e)}) + LOG.error(msg) + raise exception.ShareBackendException(msg=msg) + finally: + self._wsapi_logout() + + def remove_quota(self, quota_id): + uri = '/filepersonaquotas/%s' % quota_id + try: + self._wsapi_login() + self._client.http.delete(uri) + except Exception as ex: + msg = "mediator:remove_quota - failed to remove quota %s" \ + "at the backend. Exception: %s" % \ + (quota_id, six.text_type(ex)) + LOG.error(msg) + raise exception.ShareBackendException(msg=msg) + finally: + self._wsapi_logout() + + def get_file_stores_for_fpg(self, fpg_name): + uri = '/filestores?query="fpg EQ %s"' % fpg_name + try: + self._wsapi_login() + resp, body = self._client.http.get(uri) + return body + except Exception as ex: + msg = "mediator:get_file_shares - failed to get file stores " \ + "for FPG %s from the backend. Exception: %s" % \ + (fpg_name, six.text_type(ex)) + LOG.error(msg) + raise exception.ShareBackendException(msg=msg) + finally: + self._wsapi_logout() + + def shares_present_on_fpg(self, fpg_name): + fstores = self.get_file_stores_for_fpg(fpg_name) + for fstore in fstores['members']: + if fstore['name'] != '.admin': + return True + return False + + def get_quotas_for_fpg(self, fpg_name): + uri = '/filepersonaquotas?query="fpg EQ %s"' % fpg_name + try: + self._wsapi_login() + resp, body = self._client.http.get(uri) + return body + except Exception as ex: + msg = "mediator:get_quota - failed to get quotas for FPG %s" \ + "from the backend. Exception: %s" % \ + (fpg_name, six.text_type(ex)) + LOG.error(msg) + raise exception.ShareBackendException(msg=msg) + finally: + self._wsapi_logout() + + def _create_share(self, share_details): + fpg_name = share_details['fpg'] + vfs_name = share_details['vfs'] + share_name = share_details['name'] + proto_opts = share_details['nfsOptions'] + readonly = share_details['readonly'] + + args = { + 'name': share_name, + 'type': 1, + 'vfs': vfs_name, + 'fpg': fpg_name, + 'shareDirectory': None, + 'fstore': None, + 'nfsOptions': self._get_nfs_options(proto_opts, readonly), + 'nfsClientlist': ['127.0.0.1'], + 'comment': 'Docker created share' + } + + try: + uri = '/fileshares/' + resp, body = self._client.http.post(uri, body=args) + if resp['status'] != '201': + msg = (_('Failed to create share %(resp)s, %(body)s') % + {'resp': resp, 'body': body}) + LOG.exception(msg) + raise exception.ShareBackendException(msg=msg) + + href = body['links'][0]['href'] + uri, share_id = href.split('fileshares/') + LOG.debug("Share created successfully: %s" % body) + return share_id + except Exception as e: + msg = (_('Failed to create share %(share_name)s: %(e)s') % + {'share_name': share_name, 'e': six.text_type(e)}) + LOG.exception(msg) + raise exception.ShareBackendException(msg=msg) + + def create_share(self, share_details): + try: + self._wsapi_login() + return self._create_share(share_details) + finally: + self._wsapi_logout() + + def delete_share(self, share_id): + LOG.info("Mediator:delete_share %s: Entering..." % share_id) + uri = '/fileshares/%s' % share_id + try: + self._wsapi_login() + self._client.http.delete(uri) + except hpeexceptions.HTTPNotFound: + LOG.warning("Share %s not found on backend" % share_id) + pass + except Exception as ex: + msg = "Failed to remove share %s at the backend. Reason: %s" \ + % (share_id, six.text_type(ex)) + LOG.error(msg) + raise exception.ShareBackendException(msg=msg) + finally: + self._wsapi_logout() + + def _wait_for_task_completion(self, task_id, interval=1): + """This waits for a 3PAR background task complete or fail. + This looks for a task to get out of the 'active' state. + """ + + # Wait for the physical copy task to complete + def _wait_for_task(task_id, task_status): + status = self._client.getTask(task_id) + LOG.debug("3PAR Task id %(id)s status = %(status)s", + {'id': task_id, + 'status': status['status']}) + if status['status'] is not self._client.TASK_ACTIVE: + task_status.append(status) + raise loopingcall.LoopingCallDone() + + task_status = [] + + timer = loopingcall.FixedIntervalLoopingCall( + _wait_for_task, task_id, task_status) + timer.start(interval=interval).wait() + + if task_status[0]['status'] is not self._client.TASK_DONE: + msg = "ERROR: Task with id %d has failed with status %s" %\ + (task_id, task_status) + LOG.exception(msg) + raise exception.ShareBackendException(msg=msg) + + def create_fpg(self, cpg, fpg_name, size=16): + try: + self._wsapi_login() + uri = '/fpgs/' + args = { + 'name': fpg_name, + 'cpg': cpg, + 'sizeTiB': size, + 'comment': 'Docker created FPG' + } + resp, body = self._client.http.post(uri, body=args) + + LOG.info("Create FPG Response: %s" % six.text_type(resp)) + LOG.info("Create FPG Response Body: %s" % six.text_type(body)) + + task_id = body.get('taskId') + if task_id: + self._wait_for_task_completion(task_id, interval=10) + except hpeexceptions.HTTPBadRequest as ex: + error_code = ex.get_code() + LOG.error("Exception: %s" % six.text_type(ex)) + if error_code == OTHER_FAILURE_REASON: + LOG.error(six.text_type(ex)) + msg = ex.get_description() + if 'already exists' in msg or \ + msg.startswith('A createfpg task is already running'): + raise exception.FpgAlreadyExists(reason=msg) + raise exception.ShareBackendException(msg=ex.get_description()) + except hpeexceptions.HTTPNotFound as ex: + error_code = ex.get_code() + LOG.error("Exception: %s" % six.text_type(ex)) + if error_code == NON_EXISTENT_CPG: + msg = "Failed to create FPG %s on the backend. Reason: " \ + "CPG %s doesn't exist on array" % (fpg_name, cpg) + LOG.error(msg) + raise exception.ShareBackendException(msg=msg) + raise exception.ShareBackendException(msg=ex.get_description()) + except exception.ShareBackendException as ex: + msg = 'Create FPG task failed: cpg=%s,fpg=%s, ex=%s'\ + % (cpg, fpg_name, six.text_type(ex)) + LOG.error(msg) + raise exception.ShareBackendException(msg=msg) + except Exception as ex: + msg = (_('Failed to create FPG %s of size %s using CPG %s: ' + 'Exception: %s') % (fpg_name, size, cpg, ex)) + LOG.error(msg) + raise exception.ShareBackendException(msg=msg) + finally: + self._wsapi_logout() + + def create_vfs(self, vfs_name, ip, subnet, cpg=None, fpg=None, + size=16): + uri = '/virtualfileservers/' + ip_info = { + 'IPAddr': ip, + 'netmask': subnet + } + args = { + 'name': vfs_name, + 'IPInfo': ip_info, + 'cpg': cpg, + 'fpg': fpg, + 'comment': 'Docker created VFS' + } + try: + self._wsapi_login() + resp, body = self._client.http.post(uri, body=args) + if resp['status'] != '202': + msg = 'Create VFS task failed: vfs=%s, cpg=%s,fpg=%s' \ + % (vfs_name, cpg, fpg) + LOG.exception(msg) + raise exception.ShareBackendException(msg=msg) + + task_id = body['taskId'] + self._wait_for_task_completion(task_id, interval=3) + LOG.info("Created VFS '%s' successfully" % vfs_name) + except exception.ShareBackendException as ex: + msg = 'Create VFS task failed: vfs=%s, cpg=%s,fpg=%s, ex=%s'\ + % (vfs_name, cpg, fpg, six.text_type(ex)) + LOG.exception(msg) + raise exception.ShareBackendException(msg=msg) + + except Exception: + msg = (_('ERROR: VFS creation failed: [vfs: %s, ip:%s, subnet:%s,' + 'cpg:%s, fpg:%s, size=%s') % (vfs_name, ip, subnet, cpg, + fpg, size)) + LOG.exception(msg) + raise exception.ShareBackendException(msg=msg) + else: + self._check_vfs_status(task_id, fpg) + finally: + self._wsapi_logout() + + def _check_vfs_status(self, task_id, fpg): + LOG.info("Checking status of VFS under FPG %s..." % fpg) + vfs = self.get_vfs(fpg) + overall_state = vfs['overallState'] + + if overall_state != TASK_STATUS_NORMAL: + LOG.info("Overall state of VFS is not normal") + task = self._client.getTask(task_id) + detailed_status = task['detailedStatus'] + lines = detailed_status.split('\n') + error_line = '' + for line in lines: + idx = line.find('Error') + if idx != -1: + error_line += line[idx:] + '\n' + if error_line: + raise exception.ShareBackendException(msg=error_line) + else: + raise exception.ShareBackendException(msg=detailed_status) + + def set_ACL(self, fMode, fUserId, fUName, fGName): + # fsMode = "A:fdps:rwaAxdD,A:fFdps:rwaxdnNcCoy,A:fdgps:DtnNcy" + ACLList = [] + per_type = {"A": 1, "D": 2, "U": 3, "L": 4} + fsMode_list = fMode.split(",") + principal_list = ['OWNER@', 'GROUP@', 'EVERYONE@'] + for index, value in enumerate(fsMode_list): + acl_values = value.split(":") + acl_type = per_type.get(acl_values[0]) + acl_flags = acl_values[1] + acl_principal = "" + if index == 0: + acl_principal = principal_list[index] + if index == 1: + acl_principal = principal_list[index] + if index == 2: + acl_principal = principal_list[index] + acl_permission = acl_values[2] + acl_object = {} + acl_object['aclType'] = acl_type + acl_object['aclFlags'] = acl_flags + acl_object['aclPrincipal'] = acl_principal + acl_object['aclPermissions'] = acl_permission + ACLList.append(acl_object) + args = { + 'owner': fUName, + 'group': fGName, + 'ACLList': ACLList + } + LOG.info("ACL args being passed is %s ", args) + try: + self._wsapi_login() + uri = '/fileshares/' + fUserId + '/dirperms' + + self._client.http.put(uri, body=args) + + LOG.debug("Share permissions changed successfully") + + except hpeexceptions.HTTPBadRequest as ex: + msg = (_("File share permission change failed. Exception %s : ") + % six.text_type(ex)) + LOG.error(msg) + raise exception.ShareBackendException(msg=msg) + finally: + self._wsapi_logout() + + def _check_usr_grp_existence(self, fUserOwner, res_cmd): + fuserowner = str(fUserOwner) + uname_index = 0 + uid_index = 1 + user_name = None + first_line = res_cmd[1] + first_line_list = first_line.split(',') + for index, value in enumerate(first_line_list): + if value == 'Username': + uname_index = index + if value == 'UID': + uid_index = index + res_len = len(res_cmd) + end_index = res_len - 2 + for line in res_cmd[2:end_index]: + line_list = line.split(',') + if fuserowner == line_list[uid_index]: + user_name = line_list[uname_index] + return user_name + if user_name is None: + return None + + def usr_check(self, fUser, fGroup): + LOG.info("I am inside usr_check") + cmd1 = ['showfsuser'] + cmd2 = ['showfsgroup'] + try: + LOG.info("Executing first command: %s..." % cmd1) + cmd1.append('\r') + res_cmd1 = self._client._run(cmd1) + LOG.info("Resp: %s" % res_cmd1) + f_user_name = self._check_usr_grp_existence(fUser, res_cmd1) + LOG.info("Executing second command: %s..." % cmd2) + cmd2.append('\r') + res_cmd2 = self._client._run(cmd2) + LOG.info("Resp: %s" % res_cmd2) + f_group_name = self._check_usr_grp_existence(fGroup, res_cmd2) + return f_user_name, f_group_name + except hpeexceptions.SSHException as ex: + msg = (_('Failed to get the corresponding user and group name ' + 'reason is %s:') % six.text_type(ex)) + LOG.error(msg) + raise exception.ShareBackendException(msg=msg) + + def add_client_ip_for_share(self, share_id, client_ip): + uri = '/fileshares/%s' % share_id + body = { + 'nfsClientlistOperation': 1, + 'nfsClientlist': [client_ip] + } + try: + self._wsapi_login() + self._client.http.put(uri, body=body) + except hpeexceptions.HTTPBadRequest as ex: + msg = (_("It is first mount request but ip is already" + " added to the share. Exception %s : ") + % six.text_type(ex)) + LOG.info(msg) + finally: + self._wsapi_logout() + + def remove_client_ip_for_share(self, share_id, client_ip): + uri = '/fileshares/%s' % share_id + body = { + 'nfsClientlistOperation': 2, + 'nfsClientlist': [client_ip] + } + try: + self._wsapi_login() + self._client.http.put(uri, body=body) + finally: + self._wsapi_logout() diff --git a/hpedockerplugin/hpe/hpe_lefthand_iscsi.py b/hpedockerplugin/hpe/hpe_lefthand_iscsi.py deleted file mode 100644 index 8ac606a1..00000000 --- a/hpedockerplugin/hpe/hpe_lefthand_iscsi.py +++ /dev/null @@ -1,361 +0,0 @@ -# (c) Copyright [2016] Hewlett Packard Enterprise Development LP -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""HPE LeftHand SAN ISCSI REST Proxy. - -Volume driver for HPE LeftHand Storage array. -This driver requires 11.5 or greater firmware on the LeftHand array, using -the 2.0 or greater version of the hpelefthandclient. - -You will need to install the python hpelefthandclient module. -sudo pip install python-lefthandclient - -Set the following in the hpe.conf file to enable the -LeftHand iSCSI REST Driver along with the required flags: - -hpedockerplugin_driver = hpe.hpe_lefthand_iscsi.HPELeftHandISCSIDriver - -It also requires the setting of hpelefthand_api_url, hpelefthand_username, -hpelefthand_password for credentials to talk to the REST service on the -LeftHand array. - -""" - -from oslo_config import cfg -from oslo_log import log as logging -from oslo_utils import importutils -from oslo_utils import units - -from hpedockerplugin import exception -from hpedockerplugin.i18n import _, _LE, _LI, _LW - -from hpedockerplugin.hpe import san_driver -from hpedockerplugin.hpe import utils as volume_utils - -LOG = logging.getLogger(__name__) - -hpelefthandclient = importutils.try_import("hpelefthandclient") -if hpelefthandclient: - from hpelefthandclient import client as hpe_lh_client - from hpelefthandclient import exceptions as hpeexceptions - -hpelefthand_opts = [ - cfg.StrOpt('hpelefthand_api_url', - default=None, - help="HPE LeftHand WSAPI Server Url like " - "https://:8081/lhos", - deprecated_name='hplefthand_api_url'), - cfg.StrOpt('hpelefthand_username', - default=None, - help="HPE LeftHand Super user username", - deprecated_name='hplefthand_username'), - cfg.StrOpt('hpelefthand_password', - default=None, - help="HPE LeftHand Super user password", - secret=True, - deprecated_name='hplefthand_password'), - cfg.StrOpt('hpelefthand_clustername', - default=None, - help="HPE LeftHand cluster name", - deprecated_name='hplefthand_clustername'), - cfg.BoolOpt('hpelefthand_iscsi_chap_enabled', - default=False, - help='Configure CHAP authentication for iSCSI connections ' - '(Default: Disabled)', - deprecated_name='hplefthand_iscsi_chap_enabled'), - cfg.BoolOpt('hpelefthand_debug', - default=False, - help="Enable HTTP debugging to LeftHand", - deprecated_name='hplefthand_debug'), - cfg.BoolOpt('suppress_requests_ssl_warnings', - default=False, - help='Suppress requests library SSL certificate warnings.'), - -] - -CONF = cfg.CONF -CONF.register_opts(hpelefthand_opts) - -MIN_API_VERSION = "1.1" -MIN_CLIENT_VERSION = '2.0.0' - - -class HPELeftHandISCSIDriver(object): - """Executes REST commands relating to HPE/LeftHand SAN ISCSI volumes. - - Version history: - - .. code-block:: none - - 0.0.1 - Initial version of the LeftHand iSCSI driver created. - 0.0.2 - Added support for CHAP. - 0.0.3 - Added the ability to choose volume provisionings. - - """ - - VERSION = "0.0.3" - - valid_prov_values = ['thin', 'full', 'dedup'] - - def __init__(self, hpelefthandconfig): - - self.configuration = hpelefthandconfig - self.configuration.append_config_values(hpelefthand_opts) - - # TODO: Need to move the SAN opts values out, but where?!? - self.configuration.append_config_values(san_driver.san_opts) - self.configuration.append_config_values(san_driver.volume_opts) - - # blank is the only invalid character for cluster names - # so we need to use it as a separator - self.DRIVER_LOCATION = self.__class__.__name__ + ' %(cluster)s %(vip)s' - - def _login(self): - client = self._create_client() - try: - if self.configuration.hpelefthand_debug: - client.debug_rest(True) - - client.login( - self.configuration.hpelefthand_username, - self.configuration.hpelefthand_password) - - cluster_info = client.getClusterByName( - self.configuration.hpelefthand_clustername) - self.cluster_id = cluster_info['id'] - if len(cluster_info['virtualIPAddresses']) > 0: - virtual_ips = cluster_info['virtualIPAddresses'] - self.cluster_vip = virtual_ips[0]['ipV4Address'] - else: - # No VIP configured, so just use first storage node IP - LOG.warning(_LW('VIP is not configured using node IP ')) - ipAddrs = cluster_info['storageModuleIPAddresses'] - self.cluster_vip = ipAddrs[0] - - return client - except hpeexceptions.HTTPNotFound: - raise exception.ConnectionError( - _('LeftHand cluster not found')) - except Exception as ex: - raise exception.ConnectionError(ex) - - def _logout(self, client): - client.logout() - - def _create_client(self): - return hpe_lh_client.HPELeftHandClient( - self.configuration.hpelefthand_api_url, - suppress_ssl_warnings=CONF.suppress_requests_ssl_warnings) - - def do_setup(self): - """Set up LeftHand client.""" - if hpelefthandclient.version < MIN_CLIENT_VERSION: - ex_msg = (_("Invalid hpelefthandclient version found (" - "%(found)s). Version %(minimum)s or greater " - "required. Run 'pip install --upgrade " - "python-lefthandclient' to upgrade the " - "hpelefthandclient.") - % {'found': hpelefthandclient.version, - 'minimum': MIN_CLIENT_VERSION}) - LOG.error(ex_msg) - raise exception.InvalidInput(reason=ex_msg) - - def check_for_setup_error(self): - """Checks for incorrect LeftHand API being used on backend.""" - client = self._login() - try: - self.api_version = client.getApiVersion() - - LOG.info(_LI("HPELeftHand API version %s"), self.api_version) - - if self.api_version < MIN_API_VERSION: - LOG.warning(_LW("HPELeftHand API is version %(current)s. " - "A minimum version of %(min)s is needed for " - "manage/unmanage support."), - {'current': self.api_version, - 'min': MIN_API_VERSION}) - finally: - self._logout(client) - - def get_version_string(self): - return (_('REST %(proxy_ver)s hpelefthandclient %(rest_ver)s') % { - 'proxy_ver': self.VERSION, - 'rest_ver': hpelefthandclient.get_version_string()}) - - def create_volume(self, volume): - """Creates a volume.""" - # check for valid provisioning type - prov_value = volume['provisioning'] - if prov_value not in self.valid_prov_values: - err = (_("Must specify a valid provisioning type %(valid)s, " - "value '%(prov)s' is invalid.") % - {'valid': self.valid_prov_values, - 'prov': prov_value}) - LOG.error(err) - raise exception.InvalidInput(reason=err) - - thin_prov = True - - if prov_value == "full": - thin_prov = False - elif prov_value == "dedup": - err = (_("Dedup is not supported in the StoreVirtual driver.")) - LOG.error(err) - raise exception.InvalidInput(reason=err) - - client = self._login() - try: - optional = {'isThinProvisioned': thin_prov, - 'dataProtectionLevel': 0} - - clusterName = self.configuration.hpelefthand_clustername - optional['clusterName'] = clusterName - - volume_info = client.createVolume( - volume['name'], self.cluster_id, - volume['size'] * units.Gi, - optional) - - model_update = self._update_provider(volume_info) - volume['provider_location'] = model_update['provider_location'] - volume['provider_auth'] = '' - except Exception as ex: - raise exception.VolumeBackendAPIException(data=ex) - finally: - self._logout(client) - - def delete_volume(self, volume): - """Deletes a volume.""" - client = self._login() - try: - volume_info = client.getVolumeByName(volume['name']) - client.deleteVolume(volume_info['id']) - except hpeexceptions.HTTPNotFound: - LOG.error(_LE("Volume did not exist. It will not be deleted")) - except Exception as ex: - raise exception.VolumeBackendAPIException(ex) - finally: - self._logout(client) - - def initialize_connection(self, volume, connector): - """Assigns the volume to a server. - - Assign any created volume to a compute node/host so that it can be - used from that host. HPE VSA requires a volume to be assigned - to a server. - """ - client = self._login() - try: - server_info = self._create_server(connector, client) - volume_info = client.getVolumeByName(volume['name']) - - access_already_enabled = False - if volume_info['iscsiSessions'] is not None: - # Extract the server id for each session to check if the - # new server already has access permissions enabled. - for session in volume_info['iscsiSessions']: - server_id = int(session['server']['uri'].split('/')[3]) - if server_id == server_info['id']: - access_already_enabled = True - break - - if not access_already_enabled: - client.addServerAccess( - volume_info['id'], - server_info['id']) - - iscsi_properties = san_driver._get_iscsi_properties( - volume, - self.configuration.iscsi_ip_address) - - if ('chapAuthenticationRequired' in server_info and - server_info['chapAuthenticationRequired']): - iscsi_properties['auth_method'] = 'CHAP' - iscsi_properties['auth_username'] = connector['initiator'] - iscsi_properties['auth_password'] = ( - server_info['chapTargetSecret']) - - return {'driver_volume_type': 'iscsi', 'data': iscsi_properties} - except Exception as ex: - raise exception.VolumeBackendAPIException(ex) - finally: - self._logout(client) - - def terminate_connection(self, volume, connector, **kwargs): - """Unassign the volume from the host.""" - client = self._login() - try: - volume_info = client.getVolumeByName(volume['name']) - server_info = client.getServerByName(connector['host']) - volume_list = client.findServerVolumes(server_info['name']) - - removeServer = True - for entry in volume_list: - if entry['id'] != volume_info['id']: - removeServer = False - break - - client.removeServerAccess( - volume_info['id'], - server_info['id']) - - if removeServer: - client.deleteServer(server_info['id']) - except Exception as ex: - raise exception.VolumeBackendAPIException(ex) - finally: - self._logout(client) - - def _create_server(self, connector, client): - server_info = None - chap_enabled = self.configuration.hpelefthand_iscsi_chap_enabled - try: - server_info = client.getServerByName(connector['host']) - chap_secret = server_info['chapTargetSecret'] - if not chap_enabled and chap_secret: - LOG.warning(_LW('CHAP secret exists for host %s but CHAP is ' - 'disabled'), connector['host']) - if chap_enabled and chap_secret is None: - LOG.warning(_LW('CHAP is enabled, but server secret not ' - 'configured on server %s'), connector['host']) - return server_info - except hpeexceptions.HTTPNotFound: - # server does not exist, so create one - pass - - optional = None - if chap_enabled: - chap_secret = volume_utils.generate_password() - optional = {'chapName': connector['initiator'], - 'chapTargetSecret': chap_secret, - 'chapAuthenticationRequired': True - } - - server_info = client.createServer(connector['host'], - connector['initiator'], - optional) - return server_info - - def _update_provider(self, volume_info, cluster_vip=None): - if not cluster_vip: - cluster_vip = self.cluster_vip - # TODO(justinsb): Is this always 1? Does it matter? - cluster_interface = '1' - iscsi_portal = cluster_vip + ":3260," + cluster_interface - - return {'provider_location': ( - "%s %s %s" % (iscsi_portal, volume_info['iscsiIqn'], 0))} - - def create_export(self, volume, connector): - pass diff --git a/hpedockerplugin/hpe/san_driver.py b/hpedockerplugin/hpe/san_driver.py deleted file mode 100644 index e9aa49ca..00000000 --- a/hpedockerplugin/hpe/san_driver.py +++ /dev/null @@ -1,195 +0,0 @@ -# (c) Copyright [2016] Hewlett Packard Enterprise Development LP -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from oslo_config import cfg -from hpedockerplugin.i18n import _ - -from sh import iscsiadm - -volume_opts = [ - cfg.StrOpt('iscsi_ip_address', - default='my_ip', - help='The IP address that the iSCSI daemon is listening on'), - cfg.PortOpt('iscsi_port', - default=3260, - help='The port that the iSCSI daemon is listening on'), - cfg.BoolOpt('use_chap_auth', - default=False, - help='Option to enable/disable CHAP authentication for ' - 'targets.'), - cfg.StrOpt('chap_username', - default='', - help='CHAP user name.'), - cfg.StrOpt('chap_password', - default='', - help='Password for specified CHAP account name.', - secret=True), -] - -# TODO: How do we include san module and register san_opts -# We want to limit the amount of extra stuff we take from -# OpenStack, so just define san_opts here. -san_opts = [ - cfg.StrOpt('san_ip', - default='', - help='IP address of SAN controller'), - cfg.StrOpt('san_login', - default='admin', - help='Username for SAN controller'), - cfg.StrOpt('san_password', - default='', - help='Password for SAN controller', - secret=True), - cfg.StrOpt('san_private_key', - default='', - help='Filename of private key to use for SSH authentication'), - cfg.PortOpt('san_ssh_port', - default=22, - help='SSH port to use with SAN'), - cfg.IntOpt('ssh_conn_timeout', - default=30, - help="SSH connection timeout in seconds"), -] - - -CONF = cfg.CONF -CONF.register_opts(volume_opts) -CONF.register_opts(san_opts) - - -def _do_iscsi_discovery(volume, targetip): - # TODO(justinsb): Deprecate discovery and use stored info - # NOTE(justinsb): Discovery won't work with CHAP-secured targets (?) - - volume_name = volume['name'] - - try: - out = iscsiadm('iscsiadm', '-m', 'discovery', - '-t', 'sendtargets', '-p', targetip) - - except Exception as e: - print("Error from iscsiadm -m discovery: %s" % (targetip)) - print('exception is : %s' % (e)) - raise - - for target in out.splitlines(): - if (targetip in target and - volume_name in target): - return target - return None - - -""" -Leveraged _get_iscsi_properties from Cinder driver -Removed encryption and CHAP support for now. -""" - - -def _get_iscsi_properties(volume, targetip): - """Gets iscsi configuration - - We ideally get saved information in the volume entity, but fall back - to discovery if need be. Discovery may be completely removed in future - The properties are: - - :target_discovered: boolean indicating whether discovery was used - - :target_iqn: the IQN of the iSCSI target - - :target_portal: the portal of the iSCSI target - - :target_lun: the lun of the iSCSI target - - :volume_id: the id of the volume (currently used by xen) - - :auth_method:, :auth_username:, :auth_password: - - the authentication details. Right now, either auth_method is not - present meaning no authentication, or auth_method == `CHAP` - meaning use CHAP with the specified credentials. - - :access_mode: the volume access mode allow client used - ('rw' or 'ro' currently supported) - - :discard: boolean indicating if discard is supported - - In some of drivers that support multiple connections (for multipath - and for single path with failover on connection failure), it returns - :target_iqns, :target_portals, :target_luns, which contain lists of - multiple values. The main portal information is also returned in - :target_iqn, :target_portal, :target_lun for backward compatibility. - - Note that some of drivers don't return :target_portals even if they - support multipath. Then the connector should use sendtargets discovery - to find the other portals if it supports multipath. - """ - - properties = {} - - location = volume['provider_location'] - - if location: - # provider_location is the same format as iSCSI discovery output - properties['target_discovered'] = False - else: - location = _do_iscsi_discovery(volume, targetip) - - if not location: - msg = (_("Could not find iSCSI export for volume %s") - % (volume['name'])) - raise msg - - print("ISCSI Discovery: Found %s" % (location)) - properties['target_discovered'] = True - - results = location.split(" ") - portals = results[0].split(",")[0].split(";") - iqn = results[1] - nr_portals = len(portals) - - try: - lun = int(results[2]) - # TODO: Validate StoreVirtual LUN number is part of location details, - # after target IP - except (IndexError, ValueError): - lun = 0 - - if nr_portals > 1: - properties['target_portals'] = portals - properties['target_iqns'] = [iqn] * nr_portals - properties['target_luns'] = [lun] * nr_portals - properties['target_portal'] = portals[0] - properties['target_iqn'] = iqn - properties['target_lun'] = lun - - properties['volume_id'] = volume['id'] - - auth = volume['provider_auth'] - if auth: - (auth_method, auth_username, auth_secret) = auth.split() - - properties['auth_method'] = auth_method - properties['auth_username'] = auth_username - properties['auth_password'] = auth_secret - - geometry = volume.get('provider_geometry', None) - if geometry: - (physical_block_size, logical_block_size) = geometry.split() - properties['physical_block_size'] = physical_block_size - properties['logical_block_size'] = logical_block_size - - encryption_key_id = volume.get('encryption_key_id', None) - properties['encrypted'] = encryption_key_id is not None - - return properties diff --git a/hpedockerplugin/hpe/share.py b/hpedockerplugin/hpe/share.py new file mode 100644 index 00000000..14cd546d --- /dev/null +++ b/hpedockerplugin/hpe/share.py @@ -0,0 +1,23 @@ +DEFAULT_MOUNT_SHARE = "True" +MAX_SHARES_PER_FPG = 16 + + +def create_metadata(backend, cpg, fpg, share_name, size, + readonly=False, nfs_options=None, comment='', + fsMode=None, fsOwner=None): + return { + 'id': None, + 'backend': backend, + 'cpg': cpg, + 'fpg': fpg, + 'vfs': None, + 'name': share_name, + 'size': size, + 'readonly': readonly, + 'nfsOptions': nfs_options, + 'protocol': 'nfs', + 'clientIPs': [], + 'comment': comment, + 'fsMode': fsMode, + 'fsOwner': fsOwner, + } diff --git a/hpedockerplugin/hpe/utils.py b/hpedockerplugin/hpe/utils.py index b5cfe293..1529e5ea 100644 --- a/hpedockerplugin/hpe/utils.py +++ b/hpedockerplugin/hpe/utils.py @@ -15,11 +15,18 @@ """Volume-related Utilities and helpers.""" import six +import string import uuid +import platform +from Crypto.Cipher import AES from Crypto.Random import random -from oslo_serialization import base64 +from oslo_log import log as logging +import base64 +from oslo_serialization import base64 as oslo_base64 + +LOG = logging.getLogger(__name__) # Default symbols to use for passwords. Avoids visually confusing characters. # ~6 bits per symbol @@ -59,7 +66,7 @@ def generate_password(length=16, symbolgroups=DEFAULT_PASSWORD_SYMBOLS): def _encode_name(name): uuid_str = name.replace("-", "") vol_uuid = uuid.UUID('urn:uuid:%s' % uuid_str) - vol_encoded = base64.encode_as_text(vol_uuid.bytes) + vol_encoded = oslo_base64.encode_as_text(vol_uuid.bytes) # 3par doesn't allow +, nor / vol_encoded = vol_encoded.replace('+', '.') @@ -76,7 +83,7 @@ def _decode_name(name): name = name.replace('-', '/') name = name + "==" - vol_decoded = uuid.UUID(bytes=base64.decode_as_bytes(name)) + vol_decoded = uuid.UUID(bytes=oslo_base64.decode_as_bytes(name)) return str(vol_decoded) @@ -149,3 +156,61 @@ def get_3par_rcg_name(id): def get_remote3par_rcg_name(id, array_id): return get_3par_rcg_name(id) + ".r" + ( six.text_type(array_id)) + + +def is_host_os_rhel(): + platform_type = list(platform.linux_distribution()) + if 'Red Hat Enterprise Linux Server' in platform_type: + return True + else: + return False + + +class PasswordDecryptor(object): + def __init__(self, backend_name, etcd): + self._backend_name = backend_name + self._etcd = etcd + self._passphrase = self._get_passphrase() + + def _get_passphrase(self): + try: + passphrase = self._etcd.get_backend_key(self._backend_name) + return passphrase + except Exception as ex: + LOG.info('Exception occurred %s ' % six.text_type(ex)) + LOG.info("Using PLAIN TEXT for backend '%s'" % self._backend_name) + return None + + def decrypt_password(self, config): + if self._passphrase and config: + passphrase = self._key_check(self._passphrase) + config.hpe3par_password = \ + self._decrypt(config.hpe3par_password, passphrase) + config.san_password = \ + self._decrypt(config.san_password, passphrase) + + def _key_check(self, key): + KEY_LEN = len(key) + padding_string = string.ascii_letters + + KEY = key + if KEY_LEN < 16: + KEY = key + padding_string[:16 - KEY_LEN] + + elif KEY_LEN > 16 and KEY_LEN < 24: + KEY = key + padding_string[:24 - KEY_LEN] + + elif KEY_LEN > 24 and KEY_LEN < 32: + KEY = key + padding_string[:32 - KEY_LEN] + + elif KEY_LEN > 32: + KEY = key[:32] + else: + KEY = key + + return KEY + + def _decrypt(self, encrypted, passphrase): + aes = AES.new(passphrase, AES.MODE_CFB, '1234567812345678') + decrypt_pass = aes.decrypt(base64.b64decode(encrypted)) + return decrypt_pass.decode('utf-8') diff --git a/hpedockerplugin/hpe/vfs_ip_pool.py b/hpedockerplugin/hpe/vfs_ip_pool.py new file mode 100644 index 00000000..9bd6ee19 --- /dev/null +++ b/hpedockerplugin/hpe/vfs_ip_pool.py @@ -0,0 +1,96 @@ +from oslo_config import types +from oslo_log import log +import six + +from hpedockerplugin import exception + +LOG = log.getLogger(__name__) + + +class VfsIpPool(types.String, types.IPAddress): + """VfsIpPool type. + Used to represent VFS IP Pool for a single backend + Converts configuration value to an IP subnet dictionary + VfsIpPool value format:: + IP_address_1:SubnetA,IP_address_2-IP_address10:SubnetB,... + IP address is of type types.IPAddress + Optionally doing range checking. + If value is whitespace or empty string will raise error + :param type_name: Type name to be used in the sample config file. + """ + + def __init__(self, type_name='VfsIpPool'): + types.String.__init__(self, type_name=type_name) + types.IPAddress.__init__(self, type_name=type_name) + + def _get_ips_for_range(self, begin_ip, end_ip): + ips = [] + ip_tokens = begin_ip.split('.') + range_lower = int(ip_tokens[-1]) + ip_tokens = end_ip.split('.') + range_upper = int(ip_tokens[-1]) + if range_lower > range_upper: + msg = "ERROR: Invalid IP range specified %s-%s!" %\ + (begin_ip, end_ip) + raise exception.InvalidInput(reason=msg) + elif range_lower == range_upper: + return [begin_ip] + + # Remove the last token + ip_tokens.pop(-1) + for host_num in range(range_lower, range_upper + 1): + ip = '.'.join(ip_tokens + [str(host_num)]) + ips.append(ip) + return ips + + def _validate_ip(self, ip): + ip = types.String.__call__(self, ip.strip()) + # Validate if the IP address is good + try: + types.IPAddress.__call__(self, ip) + except ValueError as val_err: + msg = "ERROR: Invalid IP address specified: %s" % ip + LOG.error(msg) + raise exception.InvalidInput(msg) + + def __call__(self, value): + + if value is None or value.strip(' ') is '': + message = ("ERROR: Invalid configuration. " + "'hpe3par_server_ip_pool' must be set in the format " + "'IP1:Subnet1,IP2:Subnet2...,IP3-IP5:Subnet3'. Check " + "help for usage") + LOG.error(message) + raise exception.InvalidInput(err=message) + + values = value.split(",") + + # ip-subnet-dict = {subnet: set([ip-list])} + ip_subnet_dict = {} + for value in values: + if '-' in value: + ip_range, subnet = value.split(':') + begin_ip, end_ip = ip_range.split('-') + self._validate_ip(begin_ip) + self._validate_ip(end_ip) + self._validate_ip(subnet) + ips = self._get_ips_for_range(begin_ip, end_ip) + else: + ip, subnet = value.split(':') + self._validate_ip(ip) + self._validate_ip(subnet) + ips = [ip] + + ip_set = ip_subnet_dict.get(subnet) + if ip_set: + ip_set.update(ips) + else: + # Keeping it as set to avoid duplicates + ip_subnet_dict[subnet] = set(ips) + return ip_subnet_dict + + def __repr__(self): + return 'VfsIpPool' + + def _formatter(self, value): + return six.text_type(value) diff --git a/hpedockerplugin/hpe/volume.py b/hpedockerplugin/hpe/volume.py index 2c2a8bd4..d9d255ae 100644 --- a/hpedockerplugin/hpe/volume.py +++ b/hpedockerplugin/hpe/volume.py @@ -1,4 +1,5 @@ import uuid +from hpedockerplugin.hpe import utils DEFAULT_SIZE = 100 DEFAULT_PROV = "thin" @@ -11,6 +12,7 @@ DEFAULT_SCHEDULE = False QOS_PRIORITY = {1: 'Low', 2: 'Normal', 3: 'High'} +RCG_ROLE = {1: 'Primary', 2: 'Secondary'} PROVISIONING = {1: 'full', 2: 'thin', 6: 'dedup'} COMPRESSION = {1: 'true'} COPYTYPE = {1: 'base', 2: 'physical', 3: 'virtual'} @@ -25,6 +27,8 @@ def createvol(name, size=DEFAULT_SIZE, prov=DEFAULT_PROV, volume = {} volume['id'] = str(uuid.uuid4()) volume['name'] = volume['id'] + volume['3par_vol_name'] = utils.get_3par_name(volume['id'], + is_snap) volume['host'] = '' volume['size'] = size volume['availability_zone'] = '' diff --git a/hpedockerplugin/hpe_plugin_service.py b/hpedockerplugin/hpe_plugin_service.py index 42b9b1a3..17fa65d1 100644 --- a/hpedockerplugin/hpe_plugin_service.py +++ b/hpedockerplugin/hpe_plugin_service.py @@ -129,6 +129,31 @@ def setupservice(self): LOG.error(msg) raise exception.HPEPluginStartPluginException(reason=msg) + file_driver = 'hpedockerplugin.hpe.hpe_3par_file.HPE3PARFileDriver' + fc_driver = 'hpedockerplugin.hpe.hpe_3par_fc.HPE3PARFCDriver' + iscsi_driver = 'hpedockerplugin.hpe.hpe_3par_iscsi.HPE3PARISCSIDriver' + # backend_configs -> {'backend1': config1, 'backend2': config2, ...} + # all_configs -> {'block': backend_configs1, 'file': backend_configs2} + file_configs = {} + block_configs = {} + all_configs = {} + for backend_name, config in backend_configs.items(): + configured_driver = config.hpedockerplugin_driver.strip() + if configured_driver == file_driver: + file_configs[backend_name] = config + elif configured_driver == fc_driver or \ + configured_driver == iscsi_driver: + block_configs[backend_name] = config + else: + msg = "Bad driver name specified in hpe.conf: %s" %\ + configured_driver + raise exception.HPEPluginStartPluginException(reason=msg) + + if file_configs: + all_configs['file'] = (host_config, file_configs) + if block_configs: + all_configs['block'] = (host_config, block_configs) + # Set Logging level logging_level = backend_configs['DEFAULT'].logging setupcfg.setup_logging('hpe_storage_api', logging_level) @@ -137,8 +162,7 @@ def setupservice(self): endpoint = serverFromString(self._reactor, "unix:{}:mode=600". format(PLUGIN_PATH.path)) servicename = StreamServerEndpointService(endpoint, Site( - VolumePlugin(self._reactor, host_config, - backend_configs).app.resource())) + VolumePlugin(self._reactor, all_configs).app.resource())) return servicename diff --git a/hpedockerplugin/hpe_storage_api.py b/hpedockerplugin/hpe_storage_api.py index 26eae958..990ea536 100644 --- a/hpedockerplugin/hpe_storage_api.py +++ b/hpedockerplugin/hpe_storage_api.py @@ -19,7 +19,7 @@ """ import json import six -import re +import datetime from oslo_log import log as logging @@ -27,13 +27,17 @@ from hpedockerplugin.i18n import _, _LE, _LI from klein import Klein from hpedockerplugin.hpe import volume +from ratelimit import limits +from ratelimit.exception import RateLimitException +from backoff import on_exception, expo import hpedockerplugin.backend_orchestrator as orchestrator +import hpedockerplugin.request_validator as req_validator +import hpedockerplugin.file_backend_orchestrator as f_orchestrator +import hpedockerplugin.request_router as req_router LOG = logging.getLogger(__name__) -DEFAULT_BACKEND_NAME = "DEFAULT" - class VolumePlugin(object): """ @@ -42,7 +46,7 @@ class VolumePlugin(object): """ app = Klein() - def __init__(self, reactor, host_config, backend_configs): + def __init__(self, reactor, all_configs): """ :param IReactorTime reactor: Reactor time interface implementation. :param Ihpepluginconfig : hpedefaultconfig configuration @@ -50,13 +54,81 @@ def __init__(self, reactor, host_config, backend_configs): LOG.info(_LI('Initialize Volume Plugin')) self._reactor = reactor - self._host_config = host_config - self._backend_configs = backend_configs + self.orchestrator = None + if 'block' in all_configs: + block_configs = all_configs['block'] + self._host_config = block_configs[0] + self._backend_configs = block_configs[1] + if 'DEFAULT' in self._backend_configs: + self._def_backend_name = 'DEFAULT' + elif 'DEFAULT_BLOCK' in self._backend_configs: + self._def_backend_name = 'DEFAULT_BLOCK' + else: + msg = "ERROR: DEFAULT backend is not present for the BLOCK " \ + "driver configuration. If DEFAULT backend has been " \ + "configured for FILE driver, then DEFAULT_BLOCK " \ + "backend MUST be configured for BLOCK driver in " \ + "hpe.conf file." + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + if 'DEFAULT_FILE' in self._backend_configs: + msg = "ERROR: 'DEFAULT_FILE' backend cannot be defined " \ + "for BLOCK driver." + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + self.orchestrator = orchestrator.VolumeBackendOrchestrator( + self._host_config, self._backend_configs, + self._def_backend_name) + self._req_validator = req_validator.RequestValidator( + self._backend_configs) + + self._file_orchestrator = None + if 'file' in all_configs: + file_configs = all_configs['file'] + self._f_host_config = file_configs[0] + self._f_backend_configs = file_configs[1] + + if 'DEFAULT' in self._f_backend_configs: + self._f_def_backend_name = 'DEFAULT' + elif 'DEFAULT_FILE' in self._f_backend_configs: + self._f_def_backend_name = 'DEFAULT_FILE' + else: + msg = "ERROR: DEFAULT backend is not present for the FILE " \ + "driver configuration. If DEFAULT backend has been " \ + "configured for BLOCK driver, then DEFAULT_FILE " \ + "backend MUST be configured for FILE driver in " \ + "hpe.conf file." + raise exception.InvalidInput(reason=msg) + + if 'DEFAULT_BLOCK' in self._f_backend_configs: + msg = "ERROR: 'DEFAULT_BLOCK' backend cannot be defined " \ + "for FILE driver." + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + self._file_orchestrator = f_orchestrator.FileBackendOrchestrator( + self._f_host_config, self._f_backend_configs, + self._f_def_backend_name) + + self._req_router = req_router.RequestRouter( + vol_orchestrator=self.orchestrator, + file_orchestrator=self._file_orchestrator, + all_configs=all_configs) - # TODO: make device_scan_attempts configurable - # see nova/virt/libvirt/volume/iscsi.py - self.orchestrator = orchestrator.Orchestrator(host_config, - backend_configs) + def is_backend_initialized(self, backend_name): + if (backend_name not in self._backend_configs and + backend_name not in self._f_backend_configs): + return 'FAILED' + if backend_name in self.orchestrator._manager: + mgr_obj = self.orchestrator._manager[backend_name] + return mgr_obj.get('backend_state') + + if backend_name in self._file_orchestrator._manager: + mgr_obj = self._file_orchestrator._manager[backend_name] + return mgr_obj.get('backend_state') + + return 'INITIALIZING' def disconnect_volume_callback(self, connector_info): LOG.info(_LI('In disconnect_volume_callback: connector info is %s'), @@ -74,6 +146,8 @@ def plugin_activate(self, ignore_body=True): LOG.info(_LI('In Plugin Activate')) return json.dumps({u"Implements": [u"VolumeDriver"]}) + @on_exception(expo, RateLimitException, max_tries=8) + @limits(calls=25, period=30) @app.route("/VolumeDriver.Remove", methods=["POST"]) def volumedriver_remove(self, name): """ @@ -84,10 +158,36 @@ def volumedriver_remove(self, name): :return: Result indicating success. """ contents = json.loads(name.content.getvalue()) - volname = contents['Name'] + name = contents['Name'] - return self.orchestrator.volumedriver_remove(volname) + LOG.info("Routing remove request...") + try: + return self._req_router.route_remove_request(name) + # If share is not found by this name, allow volume driver + # to handle the request by passing the except clause + except exception.EtcdMetadataNotFound: + pass + except exception.PluginException as ex: + return json.dumps({"Err": ex.msg}) + except Exception as ex: + msg = six.text_type(ex) + LOG.error(msg) + return json.dumps({"Err": msg}) + if self.orchestrator: + try: + return self.orchestrator.volumedriver_remove(name) + except exception.PluginException as ex: + return json.dumps({"Err": ex.msg}) + except Exception as ex: + msg = six.text_type(ex) + LOG.error(msg) + return json.dumps({"Err": msg}) + + return json.dumps({"Err": ""}) + + @on_exception(expo, RateLimitException, max_tries=8) + @limits(calls=25, period=30) @app.route("/VolumeDriver.Unmount", methods=["POST"]) def volumedriver_unmount(self, name): """ @@ -104,20 +204,26 @@ def volumedriver_unmount(self, name): volname = contents['Name'] vol_mount = volume.DEFAULT_MOUNT_VOLUME - if ('Opts' in contents and contents['Opts'] and - 'mount-volume' in contents['Opts']): - vol_mount = str(contents['Opts']['mount-volume']) mount_id = contents['ID'] - return self.orchestrator.volumedriver_unmount(volname, - vol_mount, mount_id) + + try: + return self._req_router.route_unmount_request(volname, mount_id) + except exception.EtcdMetadataNotFound: + pass + + if self.orchestrator: + return self.orchestrator.volumedriver_unmount( + volname, vol_mount, mount_id) + return json.dumps({"Err": "Unmount failed: volume/file '%s' not found" + % volname}) @app.route("/VolumeDriver.Create", methods=["POST"]) - def volumedriver_create(self, name, opts=None): + def volumedriver_create(self, request, opts=None): """ Create a volume with the given name. - :param unicode name: The name of the volume. + :param unicode request: Request data :param dict opts: Options passed from Docker for the volume at creation. ``None`` if not supplied in the request body. Currently ignored. ``Opts`` is a parameter introduced in the @@ -126,19 +232,44 @@ def volumedriver_create(self, name, opts=None): :return: Result indicating success. """ - contents = json.loads(name.content.getvalue()) + contents = json.loads(request.content.getvalue()) if 'Name' not in contents: msg = (_('create volume failed, error is: Name is required.')) LOG.error(msg) raise exception.HPEPluginCreateException(reason=msg) - volname = contents['Name'] - is_valid_name = re.match("^[A-Za-z0-9]+[A-Za-z0-9_-]+$", volname) - if not is_valid_name: - msg = 'Invalid volume name: %s is passed.' % volname - LOG.debug(msg) - response = json.dumps({u"Err": msg}) - return response + name = contents['Name'] + + if ((self.orchestrator and + self.orchestrator.volume_exists(name)) or + (self._file_orchestrator and + self._file_orchestrator.share_exists(name))): + return json.dumps({'Err': ''}) + + # Try to handle this as file persona operation + if 'Opts' in contents and contents['Opts']: + if 'filePersona' in contents['Opts']: + try: + return self._req_router.route_create_request( + name, contents, self._file_orchestrator + ) + except exception.PluginException as ex: + LOG.error(six.text_type(ex)) + return json.dumps({'Err': ex.msg}) + except Exception as ex: + LOG.error(six.text_type(ex)) + return json.dumps({'Err': six.text_type(ex)}) + + if not self.orchestrator: + return json.dumps({"Err": "ERROR: Cannot create volume '%s'. " + "Volume driver is not configured" % + name}) + + # Continue with volume creation operations + try: + self._req_validator.validate_request(contents) + except exception.InvalidInput as ex: + return json.dumps({"Err": ex.msg}) vol_size = volume.DEFAULT_SIZE vol_prov = volume.DEFAULT_PROV @@ -153,22 +284,24 @@ def volumedriver_create(self, name, opts=None): snap_cpg = None rcg_name = None - current_backend = DEFAULT_BACKEND_NAME + current_backend = self._def_backend_name if 'Opts' in contents and contents['Opts']: # Verify valid Opts arguments. - valid_volume_create_opts = ['mount-volume', 'compression', - 'size', 'provisioning', 'flash-cache', - 'cloneOf', 'virtualCopyOf', - 'expirationHours', 'retentionHours', - 'qos-name', 'fsOwner', 'fsMode', - 'mountConflictDelay', - 'help', 'importVol', 'cpg', - 'snapcpg', 'scheduleName', - 'scheduleFrequency', 'snapshotPrefix', - 'expHrs', 'retHrs', 'backend', - 'replicationGroup'] + valid_volume_create_opts = [ + 'compression', 'size', 'provisioning', 'flash-cache', + 'cloneOf', 'virtualCopyOf', 'expirationHours', + 'retentionHours', 'qos-name', 'fsOwner', 'fsMode', + 'mountConflictDelay', 'help', 'importVol', 'cpg', + 'snapcpg', 'scheduleName', 'scheduleFrequency', + 'snapshotPrefix', 'expHrs', 'retHrs', 'backend', + 'replicationGroup', 'manager' + ] valid_snap_schedule_opts = ['scheduleName', 'scheduleFrequency', 'snapshotPrefix', 'expHrs', 'retHrs'] + mutually_exclusive = [ + ['virtualCopyOf', 'cloneOf', 'qos-name', 'replicationGroup'], + ['virtualCopyOf', 'cloneOf', 'backend'] + ] for key in contents['Opts']: if key not in valid_volume_create_opts: msg = (_('create volume/snapshot/clone failed, error is: ' @@ -180,66 +313,80 @@ def volumedriver_create(self, name, opts=None): return json.dumps({u"Err": six.text_type(msg)}) # mutually exclusive options check - mutually_exclusive_list = ['virtualCopyOf', 'cloneOf', 'qos-name', - 'replicationGroup'] input_list = list(contents['Opts'].keys()) - if (len(list(set(input_list) & - set(mutually_exclusive_list))) >= 2): - msg = (_('%(exclusive)s cannot be specified at the same ' - 'time') % {'exclusive': mutually_exclusive_list, }) - LOG.error(msg) - return json.dumps({u"Err": six.text_type(msg)}) + for li in mutually_exclusive: + if (len(list(set(input_list) & set(li))) >= 2): + msg = (_('%(exclusive)s cannot be specified at the same ' + 'time') % {'exclusive': li, }) + LOG.error(msg) + return json.dumps({u"Err": six.text_type(msg)}) if ('backend' in contents['Opts'] and contents['Opts']['backend'] != ""): current_backend = str(contents['Opts']['backend']) - - if 'importVol' in input_list: - if not len(input_list) == 1: - if len(input_list) == 2 and 'backend' in input_list: - pass - else: - msg = (_('%(input_list)s cannot be ' - ' specified at the same ' - 'time') % {'input_list': input_list, }) + if current_backend == 'DEFAULT_FILE': + msg = 'Backend DEFAULT_FILE is reserved for File ' \ + 'Persona. Cannot specify it for Block operations' + LOG.error(msg) + return json.dumps({'Err': msg}) + + # check if current_backend present in config file + if current_backend in self._backend_configs: + # check if current_backend is initialised + if current_backend not in self.orchestrator._manager: + msg = 'Backend: %s having incorrect/missing some ' \ + 'configuration.' % current_backend LOG.error(msg) - return json.dumps({u"Err": six.text_type(msg)}) + return json.dumps({u"Err": msg}) + else: + msg = 'Backend: %s not present in config.' \ + % current_backend + LOG.error(msg) + return json.dumps({u"Err": msg}) + if 'importVol' in input_list: existing_ref = str(contents['Opts']['importVol']) - return self.orchestrator.manage_existing(volname, + return self.orchestrator.manage_existing(name, existing_ref, - current_backend) + current_backend, + contents['Opts']) if 'help' in contents['Opts']: - create_help_path = "./config/create_help.txt" - create_help_file = open(create_help_path, "r") - create_help_content = create_help_file.read() - create_help_file.close() - LOG.error(create_help_content) - return json.dumps({u"Err": create_help_content}) + return self._process_help(contents['Opts']['help']) # Populating the values if ('size' in contents['Opts'] and contents['Opts']['size'] != ""): vol_size = int(contents['Opts']['size']) + if vol_size == 0: + msg = ("Please enter the valid integer value for size \ + parameter") + LOG.error(msg) + return json.dumps({u'Err': six.text_type(msg)}) if ('provisioning' in contents['Opts'] and contents['Opts']['provisioning'] != ""): vol_prov = str(contents['Opts']['provisioning']) - if ('compression' in contents['Opts'] and - contents['Opts']['compression'] != ""): - compression_val = str(contents['Opts']['compression']) + if 'compression' in contents['Opts']: + compression_val = str(contents['Opts'].get('compression')) if compression_val is not None: if compression_val.lower() not in valid_bool_opts: msg = \ - _('create volume failed, error is:' + _('create volume failed, error is: ' 'passed compression parameter' - ' do not have a valid value. ' - 'Valid vaues are: %(valid)s') % { + ' does not have a valid value. ' + 'Valid values are: %(valid)s') % { 'valid': valid_bool_opts} LOG.error(msg) return json.dumps({u'Err': six.text_type(msg)}) + else: + msg = \ + _('parameter compression passed without a value. ' + 'Valid values are: %(valid)s') % { + 'valid': valid_bool_opts} + LOG.error(msg) + return json.dumps({u'Err': six.text_type(msg)}) if ('flash-cache' in contents['Opts'] and contents['Opts']['flash-cache'] != ""): @@ -250,7 +397,7 @@ def volumedriver_create(self, name, opts=None): _('create volume failed, error is:' 'passed flash-cache parameter' ' do not have a valid value. ' - 'Valid vaues are: %(valid)s') % { + 'Valid values are: %(valid)s') % { 'valid': valid_bool_opts} LOG.error(msg) return json.dumps({u'Err': six.text_type(msg)}) @@ -270,7 +417,9 @@ def volumedriver_create(self, name, opts=None): contents['Opts']['fsOwner'] != ""): fs_owner = contents['Opts']['fsOwner'] try: - mode = fs_owner.split(':') + uid, gid = fs_owner.split(':') + int(uid) + int(gid) except ValueError as ex: return json.dumps({'Err': "Invalid value '%s' specified " "for fsOwner. Please " @@ -328,11 +477,24 @@ def volumedriver_create(self, name, opts=None): LOG.error(msg) response = json.dumps({u"Err": msg}) return response - return self.volumedriver_create_snapshot(name, + schedule_opts = valid_snap_schedule_opts[1:] + for s_o in schedule_opts: + if s_o in input_list: + if "scheduleName" not in input_list: + msg = (_('scheduleName is a mandatory parameter' + ' for creating a snapshot schedule')) + LOG.error(msg) + response = json.dumps({u"Err": msg}) + return response + break + return self.volumedriver_create_snapshot(request, mount_conflict_delay, opts) elif 'cloneOf' in contents['Opts']: - return self.volumedriver_clone_volume(name, opts) + LOG.info('hpe_storage_api: clone options : %s' % + contents['Opts']) + return self.volumedriver_clone_volume(request, + contents['Opts']) for i in input_list: if i in valid_snap_schedule_opts: if 'virtualCopyOf' not in input_list: @@ -343,12 +505,22 @@ def volumedriver_create(self, name, opts=None): return response rcg_name = contents['Opts'].get('replicationGroup', None) - try: - self._validate_rcg_params(rcg_name, current_backend) - except exception.InvalidInput as ex: - return json.dumps({u"Err": ex.msg}) - return self.orchestrator.volumedriver_create(volname, vol_size, + if (cpg and rcg_name) or (snap_cpg and rcg_name): + msg = "cpg/snap_cpg and replicationGroup options cannot be " \ + "specified together" + return json.dumps({u"Err": msg}) + + # It is possible that the user configured replication in hpe.conf + # but didn't specify any options. In that case too, this operation + # must fail asking for "replicationGroup" parameter + # Hence this validation must be done whether "Opts" is there or not + try: + self._validate_rcg_params(rcg_name, current_backend) + except exception.InvalidInput as ex: + return json.dumps({u"Err": ex.msg}) + + return self.orchestrator.volumedriver_create(name, vol_size, vol_prov, vol_flash, compression_val, @@ -359,6 +531,28 @@ def volumedriver_create(self, name, opts=None): current_backend, rcg_name) + def _process_help(self, help): + LOG.info("Working on help content generation...") + if help == 'backends': + + line = "=" * 54 + spaces = ' ' * 42 + resp = "\n%s\nNAME%sSTATUS\n%s\n" % (line, spaces, line) + + printable_len = 45 + for k, v in self.orchestrator._manager.items(): + backend_state = v['backend_state'] + padding = (printable_len - len(k)) * ' ' + resp += "%s%s %s\n" % (k, padding, backend_state) + return json.dumps({u'Err': resp}) + else: + create_help_path = "./config/create_help.txt" + create_help_file = open(create_help_path, "r") + create_help_content = create_help_file.read() + create_help_file.close() + LOG.error(create_help_content) + return json.dumps({u"Err": create_help_content}) + def _validate_rcg_params(self, rcg_name, backend_name): LOG.info("Validating RCG: %s, backend name: %s..." % (rcg_name, backend_name)) @@ -369,16 +563,17 @@ def _validate_rcg_params(self, rcg_name, backend_name): if rcg_name and not replication_device: msg = "Request to create replicated volume cannot be fulfilled " \ - "without defining 'replication_device' entry in " \ - "hpe.conf for the desired or default backend. " \ - "Please add it and execute the request again." + "without defining 'replication_device' entry defined in " \ + "hpe.conf for the backend '%s'. Please add it and execute " \ + "the request again." % backend_name raise exception.InvalidInput(reason=msg) if replication_device and not rcg_name: - msg = "Request to create replicated volume cannot be fulfilled " \ - "without specifying 'rcg_name' parameter in the request. " \ - "Please specify 'rcg_name' and execute the request again." - raise exception.InvalidInput(reason=msg) + LOG.info("'%s' is a replication enabled backend. " + "'replicationGroup' is not specified in the create " + "volume command. Proceeding to create a regular " + "volume without remote copy " + "capabilities." % (backend_name)) if rcg_name and replication_device: @@ -407,10 +602,11 @@ def _check_valid_replication_mode(mode): if (rep_mode == 'asynchronous' or rep_mode == 'streaming')\ and sync_period: try: - sync_period = int(replication_device.sync_period) + sync_period = int(sync_period) except ValueError as ex: msg = "Non-integer value '%s' not allowed for " \ - "'sync_period'" % replication_device.sync_period + "'sync_period'. %s" % ( + replication_device.sync_period, ex) raise exception.InvalidInput(reason=msg) else: SYNC_PERIOD_LOW = 300 @@ -430,7 +626,7 @@ def _check_schedule_frequency(self, schedFrequency): LOG.error(msg) raise exception.HPEPluginCreateException(reason=msg) - def volumedriver_clone_volume(self, name, opts=None): + def volumedriver_clone_volume(self, name, clone_opts=None): # Repeating the validation here in anticipation that when # actual REST call for clone is added, this # function will have minimal impact @@ -455,9 +651,11 @@ def volumedriver_clone_volume(self, name, opts=None): src_vol_name = str(contents['Opts']['cloneOf']) clone_name = contents['Name'] + LOG.info('hpe_storage_api - volumedriver_clone_volume ' + 'clone_options 1 : %s ' % clone_opts) return self.orchestrator.clone_volume(src_vol_name, clone_name, size, - cpg, snap_cpg) + cpg, snap_cpg, clone_opts) def volumedriver_create_snapshot(self, name, mount_conflict_delay, opts=None): @@ -491,13 +689,6 @@ def volumedriver_create_snapshot(self, name, mount_conflict_delay, if 'Opts' in contents and contents['Opts'] and \ 'expirationHours' in contents['Opts']: expiration_hrs = int(contents['Opts']['expirationHours']) - if has_schedule: - msg = ('create schedule failed, error is: setting ' - 'expiration_hours for docker snapshot is not' - ' allowed while creating a schedule.') - LOG.error(msg) - response = json.dumps({'Err': msg}) - return response retention_hrs = None if 'Opts' in contents and contents['Opts'] and \ @@ -505,6 +696,15 @@ def volumedriver_create_snapshot(self, name, mount_conflict_delay, retention_hrs = int(contents['Opts']['retentionHours']) if has_schedule: + if 'expirationHours' in contents['Opts'] or \ + 'retentionHours' in contents['Opts']: + msg = ('create schedule failed, error is : setting ' + 'expirationHours or retentionHours for docker base ' + 'snapshot is not allowed while creating a schedule') + LOG.error(msg) + response = json.dumps({'Err': msg}) + return response + if 'scheduleFrequency' not in contents['Opts']: msg = ('create schedule failed, error is: user ' 'has not passed scheduleFrequency to create' @@ -521,7 +721,7 @@ def volumedriver_create_snapshot(self, name, mount_conflict_delay, if exphrs is not None: if rethrs > exphrs: msg = ('create schedule failed, error is: ' - 'expiration hours cannot be greater than ' + 'expiration hours must be greater than ' 'retention hours') LOG.error(msg) response = json.dumps({'Err': msg}) @@ -550,12 +750,15 @@ def volumedriver_create_snapshot(self, name, mount_conflict_delay, response = json.dumps({'Err': msg}) return response schedName = str(contents['Opts']['scheduleName']) + if schedName == "auto": + schedName = self.generate_schedule_with_timestamp() + snapPrefix = str(contents['Opts']['snapshotPrefix']) schedNameLength = len(schedName) snapPrefixLength = len(snapPrefix) if schedNameLength > 31 or snapPrefixLength > 15: - msg = ('Please provide a schedlueName with max 31 ' + msg = ('Please provide a scheduleName with max 31 ' 'characters and snapshotPrefix with max ' 'length of 15 characters') LOG.error(msg) @@ -577,6 +780,18 @@ def volumedriver_create_snapshot(self, name, mount_conflict_delay, has_schedule, schedFrequency) + def generate_schedule_with_timestamp(self): + current_time = datetime.datetime.now() + current_time_str = str(current_time) + space_replaced = current_time_str.replace(' ', '_') + colon_replaced = space_replaced.replace(':', '_') + hypen_replaced = colon_replaced.replace('-', '_') + scheduleNameGenerated = hypen_replaced + LOG.info(' Schedule Name auto generated is %s' % scheduleNameGenerated) + return scheduleNameGenerated + + @on_exception(expo, RateLimitException, max_tries=8) + @limits(calls=25, period=30) @app.route("/VolumeDriver.Mount", methods=["POST"]) def volumedriver_mount(self, name): """ @@ -598,16 +813,25 @@ def volumedriver_mount(self, name): volname = contents['Name'] vol_mount = volume.DEFAULT_MOUNT_VOLUME - if ('Opts' in contents and contents['Opts'] and - 'mount-volume' in contents['Opts']): - vol_mount = str(contents['Opts']['mount-volume']) mount_id = contents['ID'] try: - return self.orchestrator.mount_volume(volname, vol_mount, mount_id) - except Exception as ex: - return {'Err': six.text_type(ex)} + return self._req_router.route_mount_request(volname, mount_id) + except exception.EtcdMetadataNotFound: + pass + + if self.orchestrator: + try: + return self.orchestrator.mount_volume(volname, + vol_mount, + mount_id) + except Exception as ex: + return json.dumps({'Err': six.text_type(ex)}) + + return json.dumps({"Err": "ERROR: Cannot mount volume '%s'. " + "Volume driver is not configured" % + volname}) @app.route("/VolumeDriver.Path", methods=["POST"]) def volumedriver_path(self, name): @@ -621,7 +845,15 @@ def volumedriver_path(self, name): contents = json.loads(name.content.getvalue()) volname = contents['Name'] - return self.orchestrator.get_path(volname) + try: + return self._req_router.route_get_path_request(volname) + except exception.EtcdMetadataNotFound: + pass + + if self.orchestrator: + return self.orchestrator.get_path(volname) + + return json.dumps({u"Err": '', u"Mountpoint": ''}) @app.route("/VolumeDriver.Capabilities", methods=["POST"]) def volumedriver_getCapabilities(self, body): @@ -655,8 +887,18 @@ def volumedriver_get(self, name): if token_cnt == 2: snapname = tokens[1] - return self.orchestrator.get_volume_snap_details(volname, snapname, - qualified_name) + # Check if share exists by this name. If so return its details + # else allow volume driver to process the request + try: + return self._req_router.get_object_details(volname) + except exception.EtcdMetadataNotFound: + pass + + if self.orchestrator: + return self.orchestrator.get_volume_snap_details(volname, + snapname, + qualified_name) + return json.dumps({u"Err": ''}) @app.route("/VolumeDriver.List", methods=["POST"]) def volumedriver_list(self, body): @@ -667,4 +909,14 @@ def volumedriver_list(self, body): :return: Result indicating success. """ - return self.orchestrator.volumedriver_list() + share_list = self._req_router.list_objects() + + volume_list = [] + if self.orchestrator: + volume_list = self.orchestrator.volumedriver_list() + + final_list = share_list + volume_list + if not final_list: + return json.dumps({u"Err": ''}) + + return json.dumps({u"Err": '', u"Volumes": final_list}) diff --git a/hpedockerplugin/request_context.py b/hpedockerplugin/request_context.py new file mode 100644 index 00000000..0c517d2c --- /dev/null +++ b/hpedockerplugin/request_context.py @@ -0,0 +1,716 @@ +import abc +import json +import re +from collections import OrderedDict + +from oslo_log import log as logging + +import hpedockerplugin.exception as exception +from hpedockerplugin.hpe import share + +LOG = logging.getLogger(__name__) + + +class RequestContextBuilderFactory(object): + def __init__(self, all_configs): + self._all_configs = all_configs + + # if 'block' in all_configs: + # block_configs = all_configs['block'] + # backend_configs = block_configs[1] + # self._vol_req_ctxt_creator = VolumeRequestContextBuilder( + # backend_configs) + # else: + # self._vol_req_ctxt_creator = NullRequestContextBuilder( + # "ERROR: Volume driver not enabled. Please provide hpe.conf " + # "file to enable it") + + if 'file' in all_configs: + file_configs = all_configs['file'] + f_backend_configs = file_configs[1] + self._file_req_ctxt_builder = FileRequestContextBuilder( + f_backend_configs) + else: + self._file_req_ctxt_builder = NullRequestContextBuilder( + "ERROR: File driver not enabled. Please provide hpe_file.conf " + "file to enable it") + + def get_request_context_builder(self): + return self._file_req_ctxt_builder + + +class NullRequestContextBuilder(object): + def __init__(self, msg): + self._msg = msg + + def build_request_context(self, contents, def_backend_name): + raise exception.InvalidInput(self._msg) + + +class RequestContextBuilder(object): + def __init__(self, backend_configs): + self._backend_configs = backend_configs + + def build_request_context(self, contents, def_backend_name): + LOG.info("build_request_context: Entering...") + self._validate_name(contents['Name']) + + req_ctxt_map = self._get_build_req_ctxt_map() + + if 'Opts' in contents and contents['Opts']: + # self._validate_mutually_exclusive_ops(contents) + self._validate_dependent_opts(contents) + + for op_name, req_ctxt_creator in req_ctxt_map.items(): + op_name = op_name.split(',') + found = not (set(op_name) - set(contents['Opts'].keys())) + if found: + return req_ctxt_creator(contents, def_backend_name) + return self._default_req_ctxt_creator(contents) + + @staticmethod + def _validate_name(vol_name): + is_valid_name = re.match("^[A-Za-z0-9]+[A-Za-z0-9_-]+$", vol_name) + if not is_valid_name: + msg = 'Invalid volume name: %s is passed.' % vol_name + raise exception.InvalidInput(reason=msg) + + @staticmethod + def _get_int_option(options, option_name, default_val): + opt = options.get(option_name) + if opt and opt != '': + try: + opt = int(opt) + except ValueError as ex: + msg = "ERROR: Invalid value '%s' specified for '%s' option. " \ + "Please specify an integer value." % (opt, option_name) + LOG.error(msg) + raise exception.InvalidInput(msg) + else: + opt = default_val + return opt + + # This method does the following: + # 1. Option specified + # - Some value: + # -- return if valid value else exception + # - Blank value: + # -- Return default if provided + # ELSE + # -- Throw exception if value_unset_exception is set + # 2. Option NOT specified + # - Return default value + @staticmethod + def _get_str_option(options, option_name, default_val, valid_values=None, + value_unset_exception=False): + opt = options.get(option_name) + if opt: + if opt != '': + opt = str(opt) + if valid_values and opt.lower() not in valid_values: + msg = "ERROR: Invalid value '%s' specified for '%s'" \ + "option. Valid values are: %s" %\ + (opt, option_name, valid_values) + LOG.error(msg) + raise exception.InvalidInput(msg) + + return opt + + if default_val: + return default_val + + if value_unset_exception: + return json.dumps({ + 'Err': "Value not set for option: %s" % opt + }) + return default_val + + def _validate_dependent_opts(self, contents): + pass + + # To be implemented by derived class + @abc.abstractmethod + def _get_build_req_ctxt_map(self): + pass + + def _default_req_ctxt_creator(self, contents): + pass + + @staticmethod + def _validate_mutually_exclusive_ops(contents): + mutually_exclusive_ops = ['virtualCopyOf', 'cloneOf', 'importVol', + 'replicationGroup'] + if 'Opts' in contents and contents['Opts']: + received_opts = contents.get('Opts').keys() + diff = set(mutually_exclusive_ops) - set(received_opts) + if len(diff) < len(mutually_exclusive_ops) - 1: + mutually_exclusive_ops.sort() + msg = "Operations %s are mutually exclusive and cannot be " \ + "specified together. Please check help for usage." % \ + mutually_exclusive_ops + raise exception.InvalidInput(reason=msg) + + @staticmethod + def _check_valid_fsMode_string(value): + valid_type = ['A', 'D', 'U', 'L'] + valid_flag = ['f', 'd', 'p', 'i', 'S', 'F', 'g'] + valid_perm1 = ['r', 'w', 'a', 'x', 'd', 'D', 't', 'T'] + valid_perm2 = ['n', 'N', 'c', 'C', 'o', 'y'] + valid_perm = valid_perm1 + valid_perm2 + type_flag_perm = value.split(':') + if len(type_flag_perm) != 3: + msg = "Incorrect value passed , please check correct "\ + "format and values to be passed in help" + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + vtype = type_flag_perm[0] + if vtype not in valid_type: + msg = "Incorrect value passed for type of a mode, please check "\ + "correct format and values to be passed." + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + passed_vflag_len = len(list(type_flag_perm[1])) + vflag = list(set(list(type_flag_perm[1]))) + if len(vflag) < passed_vflag_len: + msg = "Duplicate characters for given flag are passed. "\ + "Please correct the passed flag characters for fsMode." + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + if set(vflag) - set(valid_flag): + msg = "Invalid flag passed for the fsMode. Please "\ + "pass the correct flag characters" + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + passed_vperm_len = len(list(type_flag_perm[2])) + vperm = list(set(list(type_flag_perm[2]))) + if len(vperm) < passed_vperm_len: + msg = "Duplicate characters for given permission are passed. "\ + "Please correct the passed permissions for fsMode." + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + if set(vperm) - set(valid_perm): + msg = "Invalid characters for the permissions of fsMode are "\ + "passed. Please remove the invalid characters." + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + return True + + def _check_is_valid_acl_string(self, fsMode): + fsMode_list = fsMode.split(',') + if len(fsMode_list) != 3: + msg = "Passed acl string is not valid. "\ + "Pass correct acl string." + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + for value in fsMode_list: + self._check_valid_fsMode_string(value) + return True + + @staticmethod + def _is_valid_octal_num(fsMode): + return re.match('^0[0-7]{3}$', fsMode) + + def _validate_fsMode(self, fsMode): + is_valid_fs_mode = True + if ':' in fsMode: + is_valid_fs_mode = self._check_is_valid_acl_string(fsMode) + else: + is_valid_fs_mode = self._is_valid_octal_num(fsMode) + if not is_valid_fs_mode: + msg = "Invalid value passed for the fsMode." + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + @staticmethod + def _validate_fsOwner(fsOwner): + fsOwner_list = fsOwner.split(':') + if len(fsOwner_list) != 2: + msg = "Invalid value specified for fsOwner Option." + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + try: + for val in fsOwner_list: + int(val) + except ValueError as ex: + msg = "Please provide correct fsowner inforamtion. You have "\ + "passed non integer values." + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + @staticmethod + def _validate_opts(operation, contents, valid_opts, mandatory_opts=None): + LOG.info("Validating options for operation '%s'" % operation) + if 'Opts' in contents and contents['Opts']: + received_opts = contents.get('Opts').keys() + + if mandatory_opts: + diff = set(mandatory_opts) - set(received_opts) + if diff: + # Print options in sorted manner + mandatory_opts.sort() + msg = "One or more mandatory options %s are missing " \ + "for operation %s" % (mandatory_opts, operation) + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + diff = set(received_opts) - set(valid_opts) + if diff: + diff = list(diff) + diff.sort() + msg = "Invalid option(s) %s specified for operation %s. " \ + "Please check help for usage." % \ + (diff, operation) + LOG.error(msg) + raise exception.InvalidInput(reason=msg) + + +class FileRequestContextBuilder(RequestContextBuilder): + def __init__(self, backend_configs): + super(FileRequestContextBuilder, self).__init__(backend_configs) + + def _get_build_req_ctxt_map(self): + build_req_ctxt_map = OrderedDict() + # If share-dir is specified, file-store MUST be specified + build_req_ctxt_map['filePersona,help'] = self._create_help_req_ctxt + build_req_ctxt_map['filePersona'] = \ + self._create_share_req_ctxt + # build_req_ctxt_map['persona,cpg'] = \ + # self._create_share_req_ctxt + # build_req_ctxt_map['persona,cpg,size'] = \ + # self._create_share_req_ctxt + # build_req_ctxt_map['persona,cpg,size,fpg_name'] = \ + # self._create_share_req_ctxt + # build_req_ctxt_map['virtualCopyOf,shareName'] = \ + # self._create_snap_req_ctxt + # build_req_ctxt_map['updateShare'] = \ + # self._create_update_req_ctxt + return build_req_ctxt_map + + def _create_share_req_params(self, name, options, def_backend_name): + LOG.info("_create_share_req_params: Entering...") + # import pdb + # pdb.set_trace() + backend = self._get_str_option(options, 'backend', def_backend_name) + + if backend == 'DEFAULT_BLOCK': + msg = 'Backend DEFAULT_BLOCK is reserved for Block ' \ + 'operations. Cannot specify it for File operations' + LOG.error(msg) + raise exception.InvalidInput(msg) + + config = self._backend_configs.get(backend) + if not config: + raise exception.InvalidInput( + 'ERROR: Backend %s is not configured for File Persona' + % backend + ) + cpg = self._get_str_option( + options, 'cpg', + config.hpe3par_cpg[0] if config.hpe3par_cpg else None) + if not cpg: + raise exception.InvalidInput( + "ERROR: CPG is not configured in hpe.conf. Please specify" + "name of an existing CPG in hpe.conf and restart plugin") + + fpg = self._get_str_option(options, 'fpg', None) + fsMode = self._get_str_option(options, 'fsMode', None) + fsOwner = self._get_str_option(options, 'fsOwner', None) + if fsMode: + self._validate_fsMode(fsMode) + + if fsOwner: + self._validate_fsOwner(fsOwner) + + if fsMode: + if fsOwner is None: + raise exception.InvalidInput( + " ERROR: If mode bits or directory permissions" + " needs to be changed then, providing fsOwner" + " is mandetory") + + size_gib = self._get_int_option(options, 'size', 1024) + # Default share size or quota in MiB which is 1TiB + size = size_gib * 1024 + + fpg_size_gib = int(config.hpe3par_default_fpg_size) * 1024 + + if size_gib > fpg_size_gib: + raise exception.InvalidInput( + "ERROR: Share size cannot be greater than the FPG size. " + "Either specify hpe3par_default_fpg_size >= %s GiB or " + "specify option '-o size' < %s GiB" + % (size_gib, fpg_size_gib)) + + # TODO: This check would be required when VFS needs to be created. + # NOT HERE + # if not ip_subnet and not config.hpe3par_ip_pool: + # raise exception.InvalidInput( + # "ERROR: Unable to create share as neither 'ipSubnet' " + # "option specified not IP address pool hpe3par_ip_pool " + # "configured in configuration file specified") + + readonly_str = self._get_str_option(options, 'readonly', 'false') + readonly = str.lower(readonly_str) + if readonly == 'true': + readonly = True + elif readonly == 'false': + readonly = False + else: + raise exception.InvalidInput( + 'ERROR: Invalid value "%s" supplied for "readonly" option. ' + 'Valid values are case insensitive ["true", "false"]' + % readonly_str) + + nfs_options = self._get_str_option(options, 'nfsOptions', None) + comment = self._get_str_option(options, 'comment', None) + + share_details = share.create_metadata(backend, cpg, fpg, name, size, + readonly=readonly, + nfs_options=nfs_options, + comment=comment, fsMode=fsMode, + fsOwner=fsOwner) + LOG.info("_create_share_req_params: %s" % share_details) + return share_details + + def _create_share_req_ctxt(self, contents, def_backend_name): + LOG.info("_create_share_req_ctxt: Entering...") + valid_opts = ('backend', 'filePersona', 'cpg', 'fpg', + 'size', 'mountConflictDelay', 'fsMode', 'fsOwner') + mandatory_opts = ('filePersona',) + self._validate_opts("create share", contents, valid_opts, + mandatory_opts) + share_args = self._create_share_req_params(contents['Name'], + contents['Opts'], + def_backend_name) + ctxt = {'orchestrator': 'file', + 'operation': 'create_share', + 'kwargs': share_args} + LOG.info("_create_share_req_ctxt: Exiting: %s" % ctxt) + return ctxt + + def _create_help_req_ctxt(self, contents, def_backend_name): + LOG.info("_create_help_req_ctxt: Entering...") + valid_opts = ('filePersona', 'help', 'mountConflictDelay') + self._validate_opts("create help content for share", contents, + valid_opts, mandatory_opts=None) + options = contents['Opts'] + if options: + value = self._get_str_option(options, 'help', None) + if not value: + return { + 'orchestrator': 'file', + 'operation': 'create_share_help', + 'kwargs': {} + } + + if value == 'backends': + return { + 'orchestrator': 'file', + 'operation': 'get_backends_status', + 'kwargs': {} + } + else: + raise exception.InvalidInput( + "ERROR: Invalid value %s for option 'help' specified." + % value) + LOG.info("_create_help_req_ctxt: Exiting...") + + def _create_snap_req_ctxt(self, contents): + pass + + def _create_update_req_ctxt(self, contents): + pass + + +# TODO: This is work in progress - can be taken up later if agreed upon +# class VolumeRequestContextBuilder(RequestContextBuilder): +# def __init__(self, backend_configs): +# super(VolumeRequestContextBuilder, self).__init__(backend_configs) +# +# def _get_build_req_ctxt_map(self): +# build_req_ctxt_map = OrderedDict() +# build_req_ctxt_map['virtualCopyOf,scheduleName'] = \ +# self._create_snap_schedule_req_ctxt, +# build_req_ctxt_map['virtualCopyOf,scheduleFrequency'] = \ +# self._create_snap_schedule_req_ctxt +# build_req_ctxt_map['virtualCopyOf,snaphotPrefix'] = \ +# self._create_snap_schedule_req_ctxt +# build_req_ctxt_map['virtualCopyOf'] = \ +# self._create_snap_req_ctxt +# build_req_ctxt_map['cloneOf'] = \ +# self._create_clone_req_ctxt +# build_req_ctxt_map['importVol'] = \ +# self._create_import_vol_req_ctxt +# build_req_ctxt_map['replicationGroup'] = \ +# self._create_rcg_req_ctxt +# build_req_ctxt_map['help'] = self._create_help_req_ctxt +# return build_req_ctxt_map +# +# def _default_req_ctxt_creator(self, contents): +# return self._create_vol_create_req_ctxt(contents) +# +# @staticmethod +# def _validate_mutually_exclusive_ops(contents): +# mutually_exclusive_ops = ['virtualCopyOf', 'cloneOf', 'importVol', +# 'replicationGroup'] +# if 'Opts' in contents and contents['Opts']: +# received_opts = contents.get('Opts').keys() +# diff = set(mutually_exclusive_ops) - set(received_opts) +# if len(diff) < len(mutually_exclusive_ops) - 1: +# mutually_exclusive_ops.sort() +# msg = "Operations %s are mutually exclusive and cannot be " \ +# "specified together. Please check help for usage." % \ +# mutually_exclusive_ops +# raise exception.InvalidInput(reason=msg) +# +# @staticmethod +# def _validate_opts(operation, contents, valid_opts, mandatory_opts=None): +# if 'Opts' in contents and contents['Opts']: +# received_opts = contents.get('Opts').keys() +# +# if mandatory_opts: +# diff = set(mandatory_opts) - set(received_opts) +# if diff: +# # Print options in sorted manner +# mandatory_opts.sort() +# msg = "One or more mandatory options %s are missing " \ +# "for operation %s" % (mandatory_opts, operation) +# raise exception.InvalidInput(reason=msg) +# +# diff = set(received_opts) - set(valid_opts) +# if diff: +# diff = list(diff) +# diff.sort() +# msg = "Invalid option(s) %s specified for operation %s. " \ +# "Please check help for usage." % \ +# (diff, operation) +# raise exception.InvalidInput(reason=msg) +# +# def _create_vol_create_req_ctxt(self, contents): +# valid_opts = ['compression', 'size', 'provisioning', +# 'flash-cache', 'qos-name', 'fsOwner', +# 'fsMode', 'mountConflictDelay', 'cpg', +# 'snapcpg', 'backend'] +# self._validate_opts("create volume", contents, valid_opts) +# return {'operation': 'create_volume', +# '_vol_orchestrator': 'volume'} +# +# def _create_clone_req_ctxt(self, contents): +# valid_opts = ['cloneOf', 'size', 'cpg', 'snapcpg', +# 'mountConflictDelay'] +# self._validate_opts("clone volume", contents, valid_opts) +# return {'operation': 'clone_volume', +# 'orchestrator': 'volume'} +# +# def _create_snap_req_ctxt(self, contents): +# valid_opts = ['virtualCopyOf', 'retentionHours', 'expirationHours', +# 'mountConflictDelay', 'size'] +# self._validate_opts("create snapshot", contents, valid_opts) +# return {'operation': 'create_snapshot', +# '_vol_orchestrator': 'volume'} +# +# def _create_snap_schedule_req_ctxt(self, contents): +# valid_opts = ['virtualCopyOf', 'scheduleFrequency', 'scheduleName', +# 'snapshotPrefix', 'expHrs', 'retHrs', +# 'mountConflictDelay', 'size'] +# mandatory_opts = ['scheduleName', 'snapshotPrefix', +# 'scheduleFrequency'] +# self._validate_opts("create snapshot schedule", contents, +# valid_opts, mandatory_opts) +# return {'operation': 'create_snapshot_schedule', +# 'orchestrator': 'volume'} +# +# def _create_import_vol_req_ctxt(self, contents): +# valid_opts = ['importVol', 'backend', 'mountConflictDelay'] +# self._validate_opts("import volume", contents, valid_opts) +# +# # Replication enabled backend cannot be used for volume import +# backend = contents['Opts'].get('backend', 'DEFAULT') +# if backend == '': +# backend = 'DEFAULT' +# +# try: +# config = self._backend_configs[backend] +# except KeyError: +# backend_names = list(self._backend_configs.keys()) +# backend_names.sort() +# msg = "ERROR: Backend '%s' doesn't exist. Available " \ +# "backends are %s. Please use " \ +# "a valid backend name and retry." % \ +# (backend, backend_names) +# raise exception.InvalidInput(reason=msg) +# +# if config.replication_device: +# msg = "ERROR: Import volume not allowed with replication " \ +# "enabled backend '%s'" % backend +# raise exception.InvalidInput(reason=msg) +# +# volname = contents['Name'] +# existing_ref = str(contents['Opts']['importVol']) +# manage_opts = contents['Opts'] +# return {'orchestrator': 'volume', +# 'operation': 'import_volume', +# 'args': (volname, +# existing_ref, +# backend, +# manage_opts)} +# +# def _create_rcg_req_ctxt(self, contents): +# valid_opts = ['replicationGroup', 'size', 'provisioning', +# 'backend', 'mountConflictDelay', 'compression'] +# self._validate_opts('create replicated volume', contents, valid_opts) +# +# # It is possible that the user configured replication in hpe.conf +# # but didn't specify any options. In that case too, this operation +# # must fail asking for "replicationGroup" parameter +# # Hence this validation must be done whether "Opts" is there or not +# options = contents['Opts'] +# backend = self._get_str_option(options, 'backend', 'DEFAULT') +# create_vol_args = self._get_create_volume_args(options) +# rcg_name = create_vol_args['replicationGroup'] +# try: +# self._validate_rcg_params(rcg_name, backend) +# except exception.InvalidInput as ex: +# return json.dumps({u"Err": ex.msg}) +# +# return {'operation': 'create_volume', +# 'orchestrator': 'volume', +# 'args': create_vol_args} +# +# def _get_fs_owner(self, options): +# val = self._get_str_option(options, 'fsOwner', None) +# if val: +# fs_owner = val.split(':') +# if len(fs_owner) != 2: +# msg = "Invalid value '%s' specified for fsOwner. Please " \ +# "specify a correct value." % val +# raise exception.InvalidInput(msg) +# return fs_owner +# return None +# +# def _get_fs_mode(self, options): +# fs_mode_str = self._get_str_option(options, 'fsMode', None) +# if fs_mode_str: +# try: +# int(fs_mode_str) +# except ValueError as ex: +# msg = "Invalid value '%s' specified for fsMode. Please " \ +# "specify an integer value." % fs_mode_str +# raise exception.InvalidInput(msg) +# +# if fs_mode_str[0] != '0': +# msg = "Invalid value '%s' specified for fsMode. Please " \ +# "specify an octal value." % fs_mode_str +# raise exception.InvalidInput(msg) +# +# for mode in fs_mode_str: +# if int(mode) > 7: +# msg = "Invalid value '%s' specified for fsMode. Please"\ +# " specify an octal value." % fs_mode_str +# raise exception.InvalidInput(msg) +# return fs_mode_str +# +# def _get_create_volume_args(self, options): +# ret_args = dict() +# ret_args['size'] = self._get_int_option( +# options, 'size', volume.DEFAULT_SIZE) +# ret_args['provisioning'] = self._get_str_option( +# options, 'provisioning', volume.DEFAULT_PROV, +# ['full', 'thin', 'dedup']) +# ret_args['flash-cache'] = self._get_str_option( +# options, 'flash-cache', volume.DEFAULT_FLASH_CACHE, +# ['true', 'false']) +# ret_args['qos-name'] = self._get_str_option( +# options, 'qos-name', volume.DEFAULT_QOS) +# ret_args['compression'] = self._get_str_option( +# options, 'compression', volume.DEFAULT_COMPRESSION_VAL, +# ['true', 'false']) +# ret_args['fsOwner'] = self._get_fs_owner(options) +# ret_args['fsMode'] = self._get_fs_mode(options) +# ret_args['mountConflictDelay'] = self._get_int_option( +# options, 'mountConflictDelay', +# volume.DEFAULT_MOUNT_CONFLICT_DELAY) +# ret_args['cpg'] = self._get_str_option(options, 'cpg', None) +# ret_args['snapcpg'] = self._get_str_option(options, 'snapcpg', None) +# ret_args['replicationGroup'] = self._get_str_option( +# options, 'replicationGroup', None) +# +# return ret_args +# +# def _validate_rcg_params(self, rcg_name, backend_name): +# LOG.info("Validating RCG: %s, backend name: %s..." % (rcg_name, +# backend_name)) +# hpepluginconfig = self._backend_configs[backend_name] +# replication_device = hpepluginconfig.replication_device +# +# LOG.info("Replication device: %s" % six.text_type( +# replication_device)) +# +# if rcg_name and not replication_device: +# msg = "Request to create replicated volume cannot be fulfilled"\ +# "without defining 'replication_device' entry defined in"\ +# "hpe.conf for the backend '%s'. Please add it and execute"\ +# "the request again." % backend_name +# raise exception.InvalidInput(reason=msg) +# +# if replication_device and not rcg_name: +# backend_names = list(self._backend_configs.keys()) +# backend_names.sort() +# +# msg = "'%s' is a replication enabled backend. " \ +# "Request to create replicated volume cannot be fulfilled "\ +# "without specifying 'replicationGroup' option in the "\ +# "request. Please either specify 'replicationGroup' or use"\ +# "a normal backend and execute the request again. List of"\ +# "backends defined in hpe.conf: %s" % (backend_name, +# backend_names) +# raise exception.InvalidInput(reason=msg) +# +# if rcg_name and replication_device: +# +# def _check_valid_replication_mode(mode): +# valid_modes = ['synchronous', 'asynchronous', 'streaming'] +# if mode.lower() not in valid_modes: +# msg = "Unknown replication mode '%s' specified. Valid "\ +# "values are 'synchronous | asynchronous | " \ +# "streaming'" % mode +# raise exception.InvalidInput(reason=msg) +# +# rep_mode = replication_device['replication_mode'].lower() +# _check_valid_replication_mode(rep_mode) +# if replication_device.get('quorum_witness_ip'): +# if rep_mode.lower() != 'synchronous': +# msg = "For Peer Persistence, replication mode must be "\ +# "synchronous" +# raise exception.InvalidInput(reason=msg) +# +# sync_period = replication_device.get('sync_period') +# if sync_period and rep_mode == 'synchronous': +# msg = "'sync_period' can be defined only for 'asynchronous'"\ +# " and 'streaming' replicate modes" +# raise exception.InvalidInput(reason=msg) +# +# if (rep_mode == 'asynchronous' or rep_mode == 'streaming')\ +# and sync_period: +# try: +# sync_period = int(sync_period) +# except ValueError as ex: +# msg = "Non-integer value '%s' not allowed for " \ +# "'sync_period'. %s" % ( +# replication_device.sync_period, ex) +# raise exception.InvalidInput(reason=msg) +# else: +# SYNC_PERIOD_LOW = 300 +# SYNC_PERIOD_HIGH = 31622400 +# if sync_period < SYNC_PERIOD_LOW or \ +# sync_period > SYNC_PERIOD_HIGH: +# msg = "'sync_period' must be between 300 and " \ +# "31622400 seconds." +# raise exception.InvalidInput(reason=msg) +# +# @staticmethod +# def _validate_name(vol_name): +# is_valid_name = re.match("^[A-Za-z0-9]+[A-Za-z0-9_-]+$", vol_name) +# if not is_valid_name: +# msg = 'Invalid volume name: %s is passed.' % vol_name +# raise exception.InvalidInput(reason=msg) diff --git a/hpedockerplugin/request_router.py b/hpedockerplugin/request_router.py new file mode 100644 index 00000000..4f84086d --- /dev/null +++ b/hpedockerplugin/request_router.py @@ -0,0 +1,129 @@ +from oslo_log import log as logging + +from hpedockerplugin import exception +from hpedockerplugin import request_context as req_ctxt +import hpedockerplugin.synchronization as synchronization + +LOG = logging.getLogger(__name__) + + +class RequestRouter(object): + def __init__(self, **kwargs): + self._orchestrators = {'volume': kwargs.get('vol_orchestrator'), + 'file': kwargs.get('file_orchestrator')} + # TODO: Workaround just to help unit-test framework to work + # To be fixed later + if self._orchestrators['volume']: + self._etcd = self._orchestrators['volume']._etcd_client + elif self._orchestrators['file']: + self._etcd = self._orchestrators['file']._etcd_client + + all_configs = kwargs.get('all_configs') + self._ctxt_builder_factory = \ + req_ctxt.RequestContextBuilderFactory(all_configs) + + def route_create_request(self, name, contents, orchestrator): + LOG.info("route_create_request: Entering...") + req_ctxt_builder = \ + self._ctxt_builder_factory.get_request_context_builder() + if orchestrator: + req_ctxt = req_ctxt_builder.build_request_context( + contents, orchestrator.get_default_backend_name()) + operation = req_ctxt['operation'] + kwargs = req_ctxt['kwargs'] + resp = getattr(orchestrator, operation)(**kwargs) + LOG.info("route_create_request: Return value: %s" % resp) + return resp + else: + msg = "'%s' driver is not configured. Please refer to" \ + "the document to learn about configuring the driver." + LOG.error(msg) + raise exception.InvalidInput(msg) + + @synchronization.synchronized_fp_share('{name}') + def route_remove_request(self, name): + orch = self._orchestrators['file'] + if orch: + meta_data = orch.get_meta_data_by_name(name) + if meta_data: + return orch.remove_object(meta_data) + # for persona, orch in self._orchestrators.items(): + # if orch: + # meta_data = orch.get_meta_data_by_name(name) + # if meta_data: + # return orch.remove_object(meta_data) + raise exception.EtcdMetadataNotFound( + "Remove failed: '%s' doesn't exist" % name) + + @synchronization.synchronized_fp_share('{name}') + def route_mount_request(self, name, mount_id): + orch = self._orchestrators['file'] + if orch: + meta_data = orch.get_meta_data_by_name(name) + if meta_data: + return orch.mount_object(meta_data, mount_id) + # for persona, orch in self._orchestrators.items(): + # if orch: + # meta_data = orch.get_meta_data_by_name(name) + # if meta_data: + # return orch.mount_object(meta_data, mount_id) + raise exception.EtcdMetadataNotFound( + "Mount failed: '%s' doesn't exist" % name) + + @synchronization.synchronized_fp_share('{name}') + def route_unmount_request(self, name, mount_id): + orch = self._orchestrators['file'] + if orch: + meta_data = orch.get_meta_data_by_name(name) + if meta_data: + return orch.unmount_object(meta_data, mount_id) + # for persona, orch in self._orchestrators.items(): + # if orch: + # meta_data = orch.get_meta_data_by_name(name) + # if meta_data: + # return orch.unmount_object(meta_data, mount_id) + raise exception.EtcdMetadataNotFound( + "Unmount failed: '%s' doesn't exist" % name) + + # # Since volumes and shares are created under the same ETCD key + # # any orchestrator can return all the volume and share names + # def list_objects(self): + # for persona, orch in self._orchestrators.items(): + # if orch: + # return orch.list_objects() + # # TODO: Check if we need to return empty response here? + + def get_object_details(self, name): + orch = self._orchestrators['file'] + if orch: + meta_data = orch.get_meta_data_by_name(name) + if meta_data: + return orch.get_object_details(meta_data) + # for persona, orch in self._orchestrators.items(): + # if orch: + # meta_data = orch.get_meta_data_by_name(name) + # if meta_data: + # return orch.get_object_details(meta_data) + LOG.warning("Share '%s' not found" % name) + raise exception.EtcdMetadataNotFound( + "ERROR: Meta-data details for '%s' don't exist" % name) + + def route_get_path_request(self, name): + orch = self._orchestrators['file'] + if orch: + meta_data = orch.get_meta_data_by_name(name) + if meta_data: + return orch.get_path(meta_data) + # for persona, orch in self._orchestrators.items(): + # if orch: + # meta_data = orch.get_meta_data_by_name(name) + # if meta_data: + # return orch.get_path(name) + raise exception.EtcdMetadataNotFound( + "'%s' doesn't exist" % name) + + def list_objects(self): + orch = self._orchestrators['file'] + if orch: + return orch.list_objects() + return [] diff --git a/hpedockerplugin/request_validator.py b/hpedockerplugin/request_validator.py new file mode 100644 index 00000000..57e66cc9 --- /dev/null +++ b/hpedockerplugin/request_validator.py @@ -0,0 +1,151 @@ +import re +from collections import OrderedDict + +from oslo_log import log as logging + +import hpedockerplugin.exception as exception + +LOG = logging.getLogger(__name__) + + +class RequestValidator(object): + + def __init__(self, backend_configs): + self._backend_configs = backend_configs + + def validate_request(self, contents): + self._validate_name(contents['Name']) + + operations_map = OrderedDict() + operations_map['virtualCopyOf,scheduleName'] = \ + self._validate_snapshot_schedule_opts + operations_map['virtualCopyOf,scheduleFrequency'] = \ + self._validate_snapshot_schedule_opts + operations_map['virtualCopyOf,snaphotPrefix'] = \ + self._validate_snapshot_schedule_opts + operations_map['virtualCopyOf'] = \ + self._validate_snapshot_opts + operations_map['cloneOf'] = \ + self._validate_clone_opts + operations_map['importVol'] = \ + self._validate_import_vol_opts + operations_map['replicationGroup'] = \ + self._validate_rcg_opts + operations_map['help'] = self._validate_help_opt + + if 'Opts' in contents and contents['Opts']: + self._validate_mutually_exclusive_ops(contents) + + validated = False + for op_name, validator in operations_map.items(): + op_name = op_name.split(',') + found = not (set(op_name) - set(contents['Opts'].keys())) + if found: + validator(contents) + validated = True + break + + # Validate regular volume options + if not validated: + self._validate_create_volume_opts(contents) + + @staticmethod + def _validate_mutually_exclusive_ops(contents): + mutually_exclusive_ops = ['virtualCopyOf', 'cloneOf', 'importVol', + 'replicationGroup'] + if 'Opts' in contents and contents['Opts']: + received_opts = contents.get('Opts').keys() + diff = set(mutually_exclusive_ops) - set(received_opts) + if len(diff) < len(mutually_exclusive_ops) - 1: + mutually_exclusive_ops.sort() + msg = "Operations %s are mutually exclusive and cannot be " \ + "specified together. Please check help for usage." % \ + mutually_exclusive_ops + raise exception.InvalidInput(reason=msg) + + @staticmethod + def _validate_opts(operation, contents, valid_opts, mandatory_opts=None): + if 'Opts' in contents and contents['Opts']: + received_opts = contents.get('Opts').keys() + + if mandatory_opts: + diff = set(mandatory_opts) - set(received_opts) + if diff: + # Print options in sorted manner + mandatory_opts.sort() + msg = "One or more mandatory options %s are missing " \ + "for operation %s" % (mandatory_opts, operation) + raise exception.InvalidInput(reason=msg) + + diff = set(received_opts) - set(valid_opts) + if diff: + diff = list(diff) + diff.sort() + msg = "Invalid option(s) %s specified for operation %s. " \ + "Please check help for usage." % \ + (diff, operation) + raise exception.InvalidInput(reason=msg) + + def _validate_create_volume_opts(self, contents): + valid_opts = ['compression', 'size', 'provisioning', + 'flash-cache', 'qos-name', 'fsOwner', + 'fsMode', 'mountConflictDelay', 'cpg', + 'snapcpg', 'backend', 'manager'] + self._validate_opts("create volume", contents, valid_opts) + + def _validate_clone_opts(self, contents): + valid_opts = ['cloneOf', 'size', 'cpg', 'snapcpg', + 'mountConflictDelay', 'manager'] + self._validate_opts("clone volume", contents, valid_opts) + + def _validate_snapshot_opts(self, contents): + valid_opts = ['virtualCopyOf', 'retentionHours', 'expirationHours', + 'mountConflictDelay', 'size', 'manager'] + self._validate_opts("create snapshot", contents, valid_opts) + + def _validate_snapshot_schedule_opts(self, contents): + valid_opts = ['virtualCopyOf', 'scheduleFrequency', 'scheduleName', + 'snapshotPrefix', 'expHrs', 'retHrs', + 'mountConflictDelay', 'size', 'manager'] + mandatory_opts = ['scheduleName', 'snapshotPrefix', + 'scheduleFrequency'] + self._validate_opts("create snapshot schedule", contents, + valid_opts, mandatory_opts) + + def _validate_import_vol_opts(self, contents): + valid_opts = ['importVol', 'backend', 'mountConflictDelay', + 'manager'] + self._validate_opts("import volume", contents, valid_opts) + + # Replication enabled backend cannot be used for volume import + if 'Opts' in contents and contents['Opts']: + backend_name = contents['Opts'].get('backend', None) + if not backend_name: + backend_name = 'DEFAULT' + try: + self._backend_configs[backend_name] + except KeyError: + backend_names = list(self._backend_configs.keys()) + backend_names.sort() + msg = "ERROR: Backend '%s' doesn't exist. Available " \ + "backends are %s. Please use " \ + "a valid backend name and retry." % \ + (backend_name, backend_names) + raise exception.InvalidInput(reason=msg) + + def _validate_rcg_opts(self, contents): + valid_opts = ['replicationGroup', 'size', 'provisioning', + 'backend', 'mountConflictDelay', 'compression', + 'manager'] + self._validate_opts('create replicated volume', contents, valid_opts) + + def _validate_help_opt(self, contents): + valid_opts = ['help'] + self._validate_opts('display help', contents, valid_opts) + + @staticmethod + def _validate_name(vol_name): + is_valid_name = re.match("^[A-Za-z0-9]+[A-Za-z0-9_-]+$", vol_name) + if not is_valid_name: + msg = 'Invalid volume name: %s is passed.' % vol_name + raise exception.InvalidInput(reason=msg) diff --git a/hpedockerplugin/synchronization.py b/hpedockerplugin/synchronization.py index d108de74..36575c0d 100644 --- a/hpedockerplugin/synchronization.py +++ b/hpedockerplugin/synchronization.py @@ -14,9 +14,9 @@ def __synchronized(lock_type, lock_name, f, *a, **k): lck_name = lock_name.format(**call_args) lock_acquired = False self = call_args['self'] - lock = self._etcd.get_lock(lock_type) + lock = self._etcd.get_lock(lock_type, lck_name) try: - lock.try_lock_name(lck_name) + lock.try_lock_name() lock_acquired = True LOG.info('Lock acquired: [caller=%s, lock-name=%s]' % (f.__name__, lck_name)) @@ -31,7 +31,7 @@ def __synchronized(lock_type, lock_name, f, *a, **k): finally: if lock_acquired: try: - lock.try_unlock_name(lck_name) + lock.try_unlock_name() LOG.info('Lock released: [caller=%s, lock-name=%s]' % (f.__name__, lck_name)) except exception.HPEPluginUnlockFailed: @@ -55,3 +55,11 @@ def _wrapped(*a, **k): return __synchronized('RCG', lock_name, f, *a, **k) return _wrapped return _synchronized + + +def synchronized_fp_share(lock_name): + def _synchronized(f): + def _wrapped(*a, **k): + return __synchronized('FP_SHARE', lock_name, f, *a, **k) + return _wrapped + return _synchronized diff --git a/hpedockerplugin/volume_manager.py b/hpedockerplugin/volume_manager.py index 533296e6..929b4241 100644 --- a/hpedockerplugin/volume_manager.py +++ b/hpedockerplugin/volume_manager.py @@ -1,15 +1,10 @@ import json -import string import os import six import time -import uuid from sh import chmod -from Crypto.Cipher import AES -import base64 -import hpedockerplugin.etcdutil as util from os_brick.initiator import connector from oslo_config import cfg from oslo_log import log as logging @@ -29,16 +24,23 @@ from hpedockerplugin.i18n import _, _LE, _LI, _LW import hpedockerplugin.synchronization as synchronization + LOG = logging.getLogger(__name__) PRIMARY = 1 +PRIMARY_REV = 1 SECONDARY = 2 CONF = cfg.CONF +VolumeOwnedAndMounted = 0 +VolumeOwnedAndNotMounted = 1 +VolumeNotOwned = 2 + class VolumeManager(object): def __init__(self, host_config, hpepluginconfig, etcd_util, - backend_name='DEFAULT'): + node_id, + backend_name): self._host_config = host_config self._hpepluginconfig = hpepluginconfig self._my_ip = netutils.get_my_ipv4() @@ -52,8 +54,10 @@ def __init__(self, host_config, hpepluginconfig, etcd_util, self._etcd = etcd_util self._initialize_configuration() - self._decrypt_password(self.src_bkend_config, - self.tgt_bkend_config, backend_name) + self._pwd_decryptor = utils.PasswordDecryptor(backend_name, + self._etcd) + self._pwd_decryptor.decrypt_password(self.src_bkend_config) + self._pwd_decryptor.decrypt_password(self.tgt_bkend_config) # TODO: When multiple backends come into picture, consider # lazy initialization of individual driver @@ -66,7 +70,7 @@ def __init__(self, host_config, hpepluginconfig, etcd_util, LOG.info("Initialized 3PAR driver!") except Exception as ex: msg = "Failed to initialize 3PAR driver for array: %s!" \ - "Exception: %s"\ + " Exception: %s"\ % (self.src_bkend_config.hpe3par_api_url, six.text_type(ex)) LOG.info(msg) @@ -83,7 +87,7 @@ def __init__(self, host_config, hpepluginconfig, etcd_util, self.src_bkend_config) except Exception as ex: msg = "Failed to initialize 3PAR driver for remote array %s!" \ - "Exception: %s"\ + " Exception: %s"\ % (self.tgt_bkend_config.hpe3par_api_url, six.text_type(ex)) LOG.info(msg) @@ -92,7 +96,7 @@ def __init__(self, host_config, hpepluginconfig, etcd_util, self._connector = self._get_connector(hpepluginconfig) # Volume fencing requirement - self._node_id = self._get_node_id() + self._node_id = node_id def _initialize_configuration(self): self.src_bkend_config = self._get_src_bkend_config() @@ -102,8 +106,14 @@ def _initialize_configuration(self): self.tgt_bkend_config = acp.ArrayConnectionParams( self._hpepluginconfig.replication_device) if self.tgt_bkend_config: - self.tgt_bkend_config.hpedockerplugin_driver = \ - self.src_bkend_config.hpedockerplugin_driver + + # Copy all the source configuration to target + hpeconf = self._hpepluginconfig + for key in hpeconf.keys(): + if not self.tgt_bkend_config.is_param_present(key): + value = getattr(hpeconf, key) + self.tgt_bkend_config.__setattr__(key, value) + self.tgt_bkend_config.hpe3par_cpg = self._extract_remote_cpgs( self.tgt_bkend_config.cpg_map) if not self.tgt_bkend_config.hpe3par_cpg: @@ -113,9 +123,10 @@ def _initialize_configuration(self): "Failed to initialize driver - cpg_map not defined for" "replication device") - self.tgt_bkend_config.hpe3par_snapcpg = \ - self._extract_remote_cpgs( - self.tgt_bkend_config.snap_cpg_map) + if self.tgt_bkend_config.snap_cpg_map: + self.tgt_bkend_config.hpe3par_snapcpg = \ + self._extract_remote_cpgs( + self.tgt_bkend_config.snap_cpg_map) if not self.tgt_bkend_config.hpe3par_snapcpg: self.tgt_bkend_config.hpe3par_snapcpg = \ self.tgt_bkend_config.hpe3par_cpg @@ -125,30 +136,14 @@ def _initialize_configuration(self): self.tgt_bkend_config.hpe3par_iscsi_ips = iscsi_ips.split( ';') - # Post failover, user would want to mount the volume to - # target array. In which case, tgt_bkend_config would be - # used to mount the volume. Copy the parameters that are - # present with src_bkend_config and are applicable to - # tgt_bkend_config as well - self.tgt_bkend_config.hpe3par_iscsi_chap_enabled = \ - self.src_bkend_config.hpe3par_iscsi_chap_enabled - - # Additional information from target_device - self.src_bkend_config.replication_mode = \ - self.tgt_bkend_config.replication_mode - def _get_src_bkend_config(self): LOG.info("Getting source backend configuration...") hpeconf = self._hpepluginconfig config = acp.ArrayConnectionParams() - config.hpedockerplugin_driver = hpeconf.hpedockerplugin_driver - config.hpe3par_api_url = hpeconf.hpe3par_api_url - config.hpe3par_username = hpeconf.hpe3par_username - config.hpe3par_password = hpeconf.hpe3par_password - config.san_ip = hpeconf.san_ip - config.san_login = hpeconf.san_login - config.san_password = hpeconf.san_password - config.hpe3par_cpg = hpeconf.hpe3par_cpg + for key in hpeconf.keys(): + value = getattr(hpeconf, key) + config.__setattr__(key, value) + if hpeconf.hpe3par_snapcpg: config.hpe3par_snapcpg = hpeconf.hpe3par_snapcpg else: @@ -156,14 +151,7 @@ def _get_src_bkend_config(self): # if 'hpe3par_snapcpg' is NOT given in hpe.conf this should be # default to empty list & populate volume's snap_cpg later with # value given with '-o cpg' - config.hpe3par_snapcpg = [] - - if 'iscsi' in hpeconf.hpedockerplugin_driver: - config.hpe3par_iscsi_ips = hpeconf.hpe3par_iscsi_ips - config.iscsi_ip_address = hpeconf.iscsi_ip_address - config.iscsi_port = hpeconf.iscsi_port - config.hpe3par_iscsi_chap_enabled = \ - hpeconf.hpe3par_iscsi_chap_enabled + config.hpe3par_snapcpg = hpeconf.hpe3par_cpg LOG.info("Got source backend configuration!") return config @@ -189,7 +177,7 @@ def _initialize_driver(self, host_config, src_config, tgt_config): raise exception.HPEPluginNotInitializedException(reason=msg) try: - hpeplugin_driver.do_setup(timeout=5) + hpeplugin_driver.do_setup(timeout=30) hpeplugin_driver.check_for_setup_error() return hpeplugin_driver except Exception as ex: @@ -208,19 +196,6 @@ def _get_connector(self, hpepluginconfig): protocol, root_helper, use_multipath=self._use_multipath, device_scan_attempts=5, transport='default') - @staticmethod - def _get_node_id(): - # Save node-id if it doesn't exist - node_id_file_path = '/etc/hpedockerplugin/.node_id' - if not os.path.isfile(node_id_file_path): - node_id = str(uuid.uuid4()) - with open(node_id_file_path, 'w') as node_id_file: - node_id_file.write(node_id) - else: - with open(node_id_file_path, 'r') as node_id_file: - node_id = node_id_file.readline() - return node_id - @synchronization.synchronized_volume('{volname}') def create_volume(self, volname, vol_size, vol_prov, vol_flash, compression_val, vol_qos, @@ -259,9 +234,11 @@ def create_volume(self, volname, vol_size, vol_prov, vol = volume.createvol(volname, vol_size, vol_prov, vol_flash, compression_val, vol_qos, mount_conflict_delay, False, cpg, snap_cpg, - False, current_backend, rcg_name) + False, current_backend) + + bkend_vol_name = "" try: - self._create_volume(vol, undo_steps) + bkend_vol_name = self._create_volume(vol, undo_steps) self._apply_volume_specs(vol, undo_steps) if rcg_name: # bkend_rcg_name = self._get_3par_rcg_name(rcg_name) @@ -278,6 +255,8 @@ def create_volume(self, volname, vol_size, vol_prov, # This will make get_vol_byname more efficient vol['fsOwner'] = fs_owner vol['fsMode'] = fs_mode + vol['3par_vol_name'] = bkend_vol_name + self._etcd.save_vol(vol) except Exception as ex: @@ -310,7 +289,7 @@ def map_3par_volume_time_to_docker(self, vol, expiration=True): endd = datetime.datetime.strptime(enddate, date_format) diff = endd - startt - diff_hour = diff.seconds / 3600 + diff_hour = diff.total_seconds() / 3600 return diff_hour except Exception as ex: @@ -349,7 +328,50 @@ def map_3par_volume_compression_to_docker(self, vol): return True return volume.DEFAULT_COMPRESSION_VAL - def manage_existing(self, volname, existing_ref, backend='DEFAULT'): + def _get_vvset_by_volume_name(self, backend_vol_name): + return self._hpeplugin_driver.get_vvset_from_volume( + backend_vol_name) + + def _set_flash_cache_policy(self, vol, vvset_detail): + if vvset_detail is not None: + vvset_name = vvset_detail.get('name') + LOG.info('vvset_name: %(vvset)s' % {'vvset': vvset_name}) + + # check and set the flash-cache if exists + flash_cache_pol = vvset_detail.get('flashCachePolicy') + if flash_cache_pol is not None: + vol['flash_cache'] = (flash_cache_pol == 1) + else: + vol['flash_cache'] = None + + def _set_qos_info(self, vol, vvset_name): + LOG.info("Getting QOS info by vv-set-name '%s' for volume'%s'..." + % (vvset_name, vol['display_name'])) + self._hpeplugin_driver.get_qos_detail(vvset_name) + LOG.info("QOS info found for Docker volume '%s'. Setting QOS name" + "for the volume." % vol['display_name']) + vol["qos_name"] = vvset_name + + def _set_qos_and_flash_cache_info(self, backend_vol_name, vol): + vvset_detail = self._get_vvset_by_volume_name(backend_vol_name) + if vvset_detail: + self._set_flash_cache_policy(vol, vvset_detail) + vvset_name = vvset_detail.get('name') + try: + if vvset_name: + self._set_qos_info(vol, vvset_name) + except Exception as ex: + if not vol['flash_cache']: + msg = (_("ERROR: No QOS or flash-cache found for a volume" + " '%s' present in vvset '%s'" % (backend_vol_name, + vvset_name))) + log_msg = msg + "error: %s" % six.text_type(ex) + LOG.error(log_msg) + # Error message to be displayed in inspect command + vol["qos_name"] = msg + + def manage_existing(self, volname, existing_ref, backend='DEFAULT', + manage_opts=None): LOG.info('Managing a %(vol)s' % {'vol': existing_ref}) # NOTE: Since Docker passes user supplied names and not a unique @@ -370,6 +392,7 @@ def manage_existing(self, volname, existing_ref, backend='DEFAULT'): vol['backend'] = backend vol['fsOwner'] = None vol['fsMode'] = None + vol['Options'] = manage_opts parent_vol = "" try: @@ -383,33 +406,13 @@ def manage_existing(self, volname, existing_ref, backend='DEFAULT'): LOG.exception(msg) return json.dumps({u"Err": six.text_type(msg)}) - vvset_detail = self._hpeplugin_driver.get_vvset_from_volume( - existing_ref_details['name']) - if vvset_detail is not None: - vvset_name = vvset_detail.get('name') - LOG.info('vvset_name: %(vvset)s' % {'vvset': vvset_name}) - - # check and set the flash-cache if exists - if(vvset_detail.get('flashCachePolicy') is not None and - vvset_detail.get('flashCachePolicy') == 1): - vol['flash_cache'] = True + if ('rcopyStatus' in existing_ref_details and + existing_ref_details['rcopyStatus'] != 1): + msg = 'ERROR: Volume associated with a replication group '\ + 'cannot be imported' + raise exception.InvalidInput(reason=msg) - try: - self._hpeplugin_driver.get_qos_detail(vvset_name) - LOG.info('Volume:%(existing_ref)s is in vvset_name:' - '%(vvset_name)s associated with QOS' - % {'existing_ref': existing_ref, - 'vvset_name': vvset_name}) - vol["qos_name"] = vvset_name - except Exception as ex: - msg = (_( - 'volume is in vvset:%(vvset_name)s and not associated with' - ' QOS error:%(ex)s') % { - 'vvset_name': vvset_name, - 'ex': six.text_type(ex)}) - LOG.error(msg) - if not vol['flash_cache']: - return json.dumps({u"Err": six.text_type(msg)}) + self._set_qos_and_flash_cache_info(existing_ref_details['name'], vol) # since we have only 'importVol' option for importing, # both volume and snapshot @@ -457,6 +460,9 @@ def manage_existing(self, volname, existing_ref, backend='DEFAULT'): vol['snap_cpg'] = volume_detail_3par.get('snapCPG') if is_snap: + if vol['3par_vol_name'].startswith("dcv-"): + vol['3par_vol_name'] = \ + str.replace(vol['3par_vol_name'], "dcv-", "dcs-", 1) # managing a snapshot if volume_detail_3par.get("expirationTime8601"): expiration_hours = \ @@ -508,8 +514,9 @@ def manage_existing(self, volname, existing_ref, backend='DEFAULT'): @synchronization.synchronized_volume('{src_vol_name}') def clone_volume(self, src_vol_name, clone_name, size=None, cpg=None, snap_cpg=None, - current_backend='DEFAULT'): + current_backend='DEFAULT', clone_opts=None): # Check if volume is present in database + LOG.info('hpedockerplugin : clone options 5 %s ' % clone_opts) src_vol = self._etcd.get_vol_byname(src_vol_name) mnt_conf_delay = volume.DEFAULT_MOUNT_CONFLICT_DELAY if src_vol is None: @@ -518,6 +525,22 @@ def clone_volume(self, src_vol_name, clone_name, response = json.dumps({u"Err": msg}) return response + # TODO(sonivi): remove below conversion to 3par volume name, once we + # we have code in place to store 3par volume name in etcd vol object + volume_3par = utils.get_3par_vol_name(src_vol.get('id')) + + # check if volume having any active task, it yes return with error + # add prefix '*' because offline copy task name have pattern like + # e.g. dcv-m0o5ZAwPReaZVoymnLTrMA->dcv-N.9ikeA.RiaxPP4LzecaEQ + # this will check both offline as well as online copy task + if self._hpeplugin_driver.is_vol_having_active_task( + "*%s" % volume_3par): + msg = 'source volume: %s / %s is having some active task ' \ + 'running on array' % (src_vol_name, volume_3par) + LOG.debug(msg) + response = json.dumps({u"Err": msg}) + return response + if not size: size = src_vol['size'] if not cpg: @@ -549,7 +572,7 @@ def clone_volume(self, src_vol_name, clone_name, self._etcd.save_vol(src_vol) return self._clone_volume(clone_name, src_vol, size, cpg, - snap_cpg, current_backend) + snap_cpg, current_backend, clone_opts) def _create_snapshot_record(self, snap_vol, snapshot_name, undo_steps): self._etcd.save_vol(snap_vol) @@ -596,6 +619,22 @@ def _create_snapshot(self, src_vol_name, schedName, snapshot_name, vol['has_schedule'] = vol_sched_flag self._etcd.update_vol(volid, 'has_schedule', vol_sched_flag) + # TODO(sonivi): remove below conversion to 3par volume name, once we + # we have code in place to store 3par volume name in etcd vol object + volume_3par = utils.get_3par_vol_name(volid) + + # check if volume having any active task, it yes return with error + # add prefix '*' because offline copy task name have pattern like + # e.g. dcv-m0o5ZAwPReaZVoymnLTrMA->dcv-N.9ikeA.RiaxPP4LzecaEQ + # this will check both offline as well as online copy task + if self._hpeplugin_driver.is_vol_having_active_task( + "*%s" % volume_3par): + msg = 'source volume: %s / %s is having some active task ' \ + 'running on array' % (src_vol_name, volume_3par) + LOG.debug(msg) + response = json.dumps({u"Err": msg}) + return response + # Check if this is an old volume type. If yes, add is_snap flag to it if 'is_snap' not in vol: vol_snap_flag = volume.DEFAULT_TO_SNAP_TYPE @@ -663,6 +702,7 @@ def _create_snapshot(self, src_vol_name, schedName, snapshot_name, 'display_description': 'snapshot of volume %s' % src_vol_name} undo_steps = [] + bkend_snap_name = "" try: bkend_snap_name = self._hpeplugin_driver.create_snapshot( snapshot) @@ -684,8 +724,8 @@ def _create_snapshot(self, src_vol_name, schedName, snapshot_name, 'id': snapshot_id, 'parent_name': src_vol_name, 'parent_id': vol['id'], - 'fsMode': vol['fsMode'], - 'fsOwner': vol['fsOwner'], + 'fsMode': vol.get('fsMode'), + 'fsOwner': vol.get('fsOwner'), 'expiration_hours': expiration_hrs, 'retention_hours': retention_hrs} if has_schedule: @@ -700,9 +740,12 @@ def _create_snapshot(self, src_vol_name, schedName, snapshot_name, vol['snapshots'].append(db_snapshot) snap_vol['snap_metadata'] = db_snapshot snap_vol['backend'] = current_backend + snap_vol['3par_vol_name'] = bkend_snap_name try: - self._create_snapshot_record(snap_vol, snapshot_name, undo_steps) + self._create_snapshot_record(snap_vol, + snapshot_name, + undo_steps) # For now just track volume to uuid mapping internally # TODO: Save volume name and uuid mapping in etcd as well @@ -730,9 +773,9 @@ def remove_volume(self, volname): vol = self._etcd.get_vol_byname(volname) if vol is None: # Just log an error, but don't fail the docker rm command - msg = (_LE('Volume remove name not found %s'), volname) + msg = 'Volume name to remove not found: %s' % volname LOG.error(msg) - return json.dumps({u"Err": ''}) + return json.dumps({u"Err": msg}) parent_name = None is_snap = False if 'is_snap' in vol and vol['is_snap']: @@ -826,7 +869,7 @@ def remove_snapshot(self, volname, snapname): @synchronization.synchronized_volume('{clone_name}') def _clone_volume(self, clone_name, src_vol, size, cpg, - snap_cpg, current_backend): + snap_cpg, current_backend, clone_opts): # Create clone volume specification undo_steps = [] @@ -839,13 +882,19 @@ def _clone_volume(self, clone_name, src_vol, size, cpg, False, cpg, snap_cpg, False, current_backend) try: - self.__clone_volume__(src_vol, clone_vol, undo_steps) + bkend_clone_name = self.__clone_volume__(src_vol, + clone_vol, + undo_steps) self._apply_volume_specs(clone_vol, undo_steps) # For now just track volume to uuid mapping internally # TODO: Save volume name and uuid mapping in etcd as well # This will make get_vol_byname more efficient clone_vol['fsOwner'] = src_vol.get('fsOwner') clone_vol['fsMode'] = src_vol.get('fsMode') + clone_vol['3par_vol_name'] = bkend_clone_name + if clone_opts is not None: + clone_vol['Options'] = clone_opts + self._etcd.save_vol(clone_vol) except Exception as ex: @@ -857,35 +906,36 @@ def _clone_volume(self, clone_name, src_vol, size, cpg, else: return json.dumps({u"Err": ''}) - @synchronization.synchronized_volume('{volumename}') - def revert_to_snapshot(self, volumename, snapname): - volume = self._etcd.get_vol_byname(volumename) - if volume is None: - msg = (_LE('Volume: %s does not exist' % volumename)) - LOG.info(msg) - response = json.dumps({u"Err": msg}) - return response - - snapshots = volume['snapshots'] - LOG.info("Getting snapshot by name: %s" % snapname) - snapshot, idx = self._get_snapshot_by_name(snapshots, - snapname) - if snapshot: - try: - LOG.info("Found snapshot by name %s" % snapname) - self._hpeplugin_driver.revert_snap_to_vol(volume, snapshot) - response = json.dumps({u"Err": ''}) - return response - except Exception as ex: - msg = (_('revert snapshot failed, error is: %s'), - six.text_type(ex)) - LOG.error(msg) - return json.dumps({u"Err": six.text_type(ex)}) - else: - msg = (_LE('snapshot: %s does not exist!' % snapname)) - LOG.info(msg) - response = json.dumps({u"Err": msg}) - return response + # Commenting out unused function to increase coverage + # @synchronization.synchronized_volume('{volumename}') + # def revert_to_snapshot(self, volumename, snapname): + # volume = self._etcd.get_vol_byname(volumename) + # if volume is None: + # msg = (_LE('Volume: %s does not exist' % volumename)) + # LOG.info(msg) + # response = json.dumps({u"Err": msg}) + # return response + # + # snapshots = volume['snapshots'] + # LOG.info("Getting snapshot by name: %s" % snapname) + # snapshot, idx = self._get_snapshot_by_name(snapshots, + # snapname) + # if snapshot: + # try: + # LOG.info("Found snapshot by name %s" % snapname) + # self._hpeplugin_driver.revert_snap_to_vol(volume, snapshot) + # response = json.dumps({u"Err": ''}) + # return response + # except Exception as ex: + # msg = (_('revert snapshot failed, error is: %s'), + # six.text_type(ex)) + # LOG.error(msg) + # return json.dumps({u"Err": six.text_type(ex)}) + # else: + # msg = (_LE('snapshot: %s does not exist!' % snapname)) + # LOG.info(msg) + # response = json.dumps({u"Err": msg}) + # return response def _get_snapshot_response(self, snapinfo, snapname): err = '' @@ -909,6 +959,7 @@ def _get_snapshot_response(self, snapinfo, snapname): retention_hours = metadata['retention_hours'] snap_detail = {} + snap_detail['id'] = snapinfo.get('id') snap_detail['size'] = snapinfo.get('size') snap_detail['compression'] = snapinfo.get('compression') snap_detail['provisioning'] = snapinfo.get('provisioning') @@ -922,9 +973,19 @@ def _get_snapshot_response(self, snapinfo, snapname): snap_detail['mountConflictDelay'] = snapinfo.get( 'mount_conflict_delay') snap_detail['snap_cpg'] = snapinfo.get('snap_cpg') + snap_detail['backend'] = snapinfo.get('backend') + if 'snap_schedule' in metadata: snap_detail['snap_schedule'] = metadata['snap_schedule'] + LOG.info('_get_snapshot_response: adding 3par vol info') + + if '3par_vol_name' in snapinfo: + snap_detail['3par_vol_name'] = snapinfo.get('3par_vol_name') + else: + snap_detail['3par_vol_name'] = utils.get_3par_name(snapinfo['id'], + True) + snapshot['Status'].update({'snap_detail': snap_detail}) response = json.dumps({u"Err": err, u"Volume": snapshot}) @@ -1037,6 +1098,9 @@ def get_volume_snap_details(self, volname, snapname, qualified_name): ss_list_to_show.append(snapshot) volume['Status'].update({'Snapshots': ss_list_to_show}) + backend_vol_name = utils.get_3par_vol_name(volinfo['id']) + self._set_qos_and_flash_cache_info(backend_vol_name, volinfo) + qos_name = volinfo.get('qos_name') if qos_name is not None: try: @@ -1045,32 +1109,62 @@ def get_volume_snap_details(self, volname, snapname, qualified_name): qos_filter = self._get_required_qos_field(qos_detail) volume['Status'].update({'qos_detail': qos_filter}) except Exception as ex: - msg = (_('unable to get/filter qos from 3par, error is:' - ' %s'), six.text_type(ex)) + msg = "ERROR: Failed to retrieve QoS '%s' from 3PAR" \ + % qos_name + volume['Status'].update({'qos_detail': msg}) + msg += ' %s' % six.text_type(ex) LOG.error(msg) - return json.dumps({u"Err": six.text_type(ex)}) + + flash_cache = volinfo.get('flash_cache') + if flash_cache is not None: + flash_cache = 'true' if flash_cache else 'false' vol_detail = {} + vol_detail['id'] = volinfo.get('id') vol_detail['size'] = volinfo.get('size') - vol_detail['flash_cache'] = volinfo.get('flash_cache') + vol_detail['flash_cache'] = flash_cache vol_detail['compression'] = volinfo.get('compression') vol_detail['provisioning'] = volinfo.get('provisioning') vol_detail['fsOwner'] = volinfo.get('fsOwner') vol_detail['fsMode'] = volinfo.get('fsMode') vol_detail['mountConflictDelay'] = volinfo.get( 'mount_conflict_delay') + vol_detail['cpg'] = volinfo.get('cpg') + vol_detail['snap_cpg'] = volinfo.get('snap_cpg') + vol_detail['backend'] = volinfo.get('backend') + vol_detail['domain'] = self._hpeplugin_driver.get_domain( + vol_detail['cpg']) + + LOG.info(' get_volume_snap_details : adding 3par vol info') + if '3par_vol_name' in volinfo: + vol_detail['3par_vol_name'] = volinfo['3par_vol_name'] + else: + vol_detail['3par_vol_name'] = \ + utils.get_3par_name(volinfo['id'], + False) + + if 'Options' in volinfo: + vol_detail['Options'] = volinfo['Options'] + + if volinfo.get('rcg_info'): + vol_detail['secondary_cpg'] = \ + self.tgt_bkend_config.hpe3par_cpg[0] + vol_detail['secondary_snap_cpg'] = \ + self.tgt_bkend_config.hpe3par_snapcpg[0] + + # fetch rcg details and display + rcg_name = volinfo['rcg_info']['local_rcg_name'] + try: + rcg_detail = self._hpeplugin_driver.get_rcg(rcg_name) + rcg_filter = self._get_required_rcg_field(rcg_detail) + volume['Status'].update({'rcg_detail': rcg_filter}) + except Exception as ex: + msg = "ERROR: Failed to retrieve RCG '%s' from 3PAR" \ + % rcg_name + volume['Status'].update({'rcg_detail': msg}) + msg += ' %s' % six.text_type(ex) + LOG.error(msg) - cpg = volinfo.get('cpg') - snap_cpg = volinfo.get('snap_cpg') - rcg_info = volinfo.get('rcg_info') - if rcg_info: - driver = self._get_target_driver(rcg_info) - if driver == self._remote_driver: - cpg = self.tgt_bkend_config['hpe3par_cpg'] - snap_cpg = self.tgt_bkend_config['hpe3par_snapcpg'] - - vol_detail['cpg'] = cpg - vol_detail['snap_cpg'] = snap_cpg volume['Status'].update({'volume_detail': vol_detail}) response = json.dumps({u"Err": err, u"Volume": volume}) @@ -1080,30 +1174,23 @@ def get_volume_snap_details(self, volname, snapname, qualified_name): def list_volumes(self): volumes = self._etcd.get_all_vols() - if volumes is None: - response = json.dumps({u"Err": ''}) - return response - volumelist = [] - for volinfo in volumes.children: - if volinfo.key != util.VOLUMEROOT: - path_info = self._etcd.get_path_info_from_vol(volinfo.value) - if path_info is not None and 'mount_dir' in path_info: - mountdir = path_info['mount_dir'] - devicename = path_info['path'] - else: - mountdir = '' - devicename = '' - info = json.loads(volinfo.value) - volume = {'Name': info['display_name'], - 'Devicename': devicename, - 'size': info['size'], - 'Mountpoint': mountdir, - 'Status': {}} - volumelist.append(volume) - - response = json.dumps({u"Err": '', u"Volumes": volumelist}) - return response + for volinfo in volumes: + path_info = self._etcd.get_path_info_from_vol(volinfo) + if path_info is not None and 'mount_dir' in path_info: + mountdir = path_info['mount_dir'] + devicename = path_info['path'] + else: + mountdir = '' + devicename = '' + volume = {'Name': volinfo['display_name'], + 'Devicename': devicename, + 'size': volinfo['size'], + 'Mountpoint': mountdir, + 'Status': {}} + volumelist.append(volume) + + return volumelist def get_path(self, volname): volinfo = self._etcd.get_vol_byname(volname) @@ -1131,8 +1218,25 @@ def _is_vol_not_mounted(vol): def _is_first_mount(node_mount_info): return (len(node_mount_info) == 0) - def _is_vol_mounted_on_this_node(self, node_mount_info): - return self._node_id in node_mount_info + def _is_vol_mounted_on_this_node(self, node_mount_info, vol): + if self._node_id in node_mount_info: + # get the information from etcd where the volume should be mounted + path_info = self._etcd.get_path_info_from_vol(vol) + # important is here the device which should be mounted... + path_name = path_info['path'] + # ... and the target it should be mounted to! + mount_dir = path_info['mount_dir'] + + # now check if this mount is really present on the node + if fileutil.check_if_mounted(path_name, mount_dir): + # Multiple containers mounting the same volume on same node + return VolumeOwnedAndMounted + else: + # This is a case of node reboot or deleted Stateful-set POD + return VolumeOwnedAndNotMounted + else: + # Failover case where volume is evicted from other node to this one + return VolumeNotOwned def _update_mount_id_list(self, vol, mount_id): node_mount_info = vol['node_mount_info'] @@ -1187,7 +1291,10 @@ def _wait_for_graceful_vol_unmount(self, vol): def _force_remove_vlun(self, vol, is_snap): bkend_vol_name = utils.get_3par_name(vol['id'], is_snap) - if self.tgt_bkend_config: + # Check if replication is configured and volume is + # populated with the RCG + if (self.tgt_bkend_config and 'rcg_info' in vol and + vol['rcg_info'] is not None): if self.tgt_bkend_config.quorum_witness_ip: LOG.info("Peer Persistence setup: Removing VLUNs " "forcefully from remote backend...") @@ -1197,27 +1304,34 @@ def _force_remove_vlun(self, vol, is_snap): "removed from remote backend!") else: LOG.info("Active/Passive setup: Getting active driver...") - driver = self._get_target_driver(vol['rcg_info']) - LOG.info("Active/Passive setup: Got active driver!") - LOG.info("Active/Passive setup: Removing VLUNs " - "forcefully from remote backend...") - driver.force_remove_volume_vlun(bkend_vol_name) - LOG.info("Active/Passive setup: VLUNs forcefully " - "removed from remote backend!") + try: + driver = self._get_target_driver(vol['rcg_info']) + if driver: + LOG.info("Active/Passive setup: Got active driver!") + LOG.info("Active/Passive setup: Removing VLUNs " + "forcefully from remote backend...") + driver.force_remove_volume_vlun(bkend_vol_name) + LOG.info("Active/Passive setup: VLUNs forcefully " + "removed from remote backend!") + else: + msg = "Failed to force remove VLUN(s) " \ + "Could not determine the target array based on" \ + "state of RCG %s." % \ + vol['rcg_info']['local_rcg_name'] + LOG.error(msg) + raise exception.HPEDriverForceRemoveVLUNFailed( + reason=msg) + except Exception as ex: + msg = "Failed to force remove VLUN(s). " \ + "Exception: %s" % six.text_type(ex) + LOG.error(msg) + raise exception.HPEDriverForceRemoveVLUNFailed( + reason=six.text_type(ex)) else: LOG.info("Removing VLUNs forcefully from remote backend...") self._primary_driver.force_remove_volume_vlun(bkend_vol_name) LOG.info("VLUNs forcefully removed from remote backend!") - def _replace_node_mount_info(self, node_mount_info, mount_id): - # Remove previous node info from volume meta-data - old_node_id = list(node_mount_info.keys())[0] - node_mount_info.pop(old_node_id) - - # Add new node information to volume meta-data - node_mount_info[self._node_id] = [mount_id] - - @synchronization.synchronized_volume('{volname}') def mount_volume(self, volname, vol_mount, mount_id): vol = self._etcd.get_vol_byname(volname) if vol is None: @@ -1225,52 +1339,22 @@ def mount_volume(self, volname, vol_mount, mount_id): LOG.error(msg) raise exception.HPEPluginMountException(reason=msg) - volid = vol['id'] - is_snap = False - if 'is_snap' not in vol: - vol['is_snap'] = volume.DEFAULT_TO_SNAP_TYPE - self._etcd.update_vol(volid, 'is_snap', is_snap) - elif vol['is_snap']: - is_snap = vol['is_snap'] - vol['fsOwner'] = vol['snap_metadata'].get('fsOwner') - vol['fsMode'] = vol['snap_metadata'].get('fsMode') - # Initialize node-mount-info if volume is being mounted - # for the first time - if self._is_vol_not_mounted(vol): - LOG.info("Initializing node_mount_info... adding first " - "mount ID %s" % mount_id) - node_mount_info = {self._node_id: [mount_id]} - vol['node_mount_info'] = node_mount_info - - if 'mount_conflict_delay' not in vol: - m_conf_delay = volume.DEFAULT_MOUNT_CONFLICT_DELAY - vol['mount_conflict_delay'] = m_conf_delay - self._etcd.update_vol(volid, 'mount_conflict_delay', - m_conf_delay) - else: - # Volume is in mounted state - Volume fencing logic begins here - node_mount_info = vol['node_mount_info'] - - # If mounted on this node itself then just append mount-id - if self._is_vol_mounted_on_this_node(node_mount_info): - self._update_mount_id_list(vol, mount_id) - return self._get_success_response(vol) - else: + node_mount_info = vol.get('node_mount_info') + if node_mount_info: + is_vol_owned = self._is_vol_mounted_on_this_node( + node_mount_info, vol + ) + if is_vol_owned == VolumeNotOwned: # Volume mounted on different node LOG.info("Volume mounted on a different node. Waiting for " "other node to gracefully unmount the volume...") + self._wait_for_graceful_vol_unmount(vol) - unmounted = self._wait_for_graceful_vol_unmount(vol) - - if not unmounted: - LOG.info("Volume not gracefully unmounted by other node") - LOG.info("%s" % vol) - self._force_remove_vlun(vol, is_snap) - - LOG.info("Updating node_mount_info...") - self._replace_node_mount_info(node_mount_info, mount_id) - LOG.info("node_mount_info updated!") + # Grab lock on volume name and continue with mount + return self._synchronized_mount_volume(volname, vol_mount, mount_id) + @synchronization.synchronized_volume('{volname}') + def _synchronized_mount_volume(self, volname, vol_mount, mount_id): root_helper = 'sudo' connector_info = connector.get_connector_properties( root_helper, self._my_ip, multipath=self._use_multipath, @@ -1288,11 +1372,18 @@ def _mount_volume(driver): LOG.debug('connection_info: %(connection_info)s, ' 'was successfully retrieved', {'connection_info': json.dumps(connection_info)}) + + undo_steps.append( + {'undo_func': driver.terminate_connection, + 'params': (vol, connector_info, is_snap), + 'msg': 'Terminating connection to volume: %s...' + % volname}) except Exception as ex: msg = (_('Initialize Connection Failed: ' 'connection info retrieval failed, error is: '), six.text_type(ex)) LOG.error(msg) + self._rollback(undo_steps) raise exception.HPEPluginMountException(reason=msg) # Call OS Brick to connect volume @@ -1300,28 +1391,175 @@ def _mount_volume(driver): LOG.debug("OS Brick Connector Connecting Volume...") device_info = self._connector.connect_volume( connection_info['data']) + + undo_steps.append( + {'undo_func': self._connector.disconnect_volume, + 'params': (connection_info['data'], None), + 'msg': 'Undoing connection to volume: %s...' % volname}) except Exception as ex: msg = (_('OS Brick connect volume failed, error is: '), six.text_type(ex)) LOG.error(msg) + self._rollback(undo_steps) raise exception.HPEPluginMountException(reason=msg) return device_info, connection_info + # Check for volume's existence once again after lock has been + # acquired. This is just to ensure another thread didn't delete + # the volume before reaching this point in mount-volume flow + vol = self._etcd.get_vol_byname(volname) + if vol is None: + msg = (_LE('Volume mount name not found %s'), volname) + LOG.error(msg) + raise exception.HPEPluginMountException(reason=msg) + + undo_steps = [] + volid = vol['id'] + + # Update volume metadata with the fields that may not be + # there due to the fact that this volume might have been + # created using an older version of plugin + is_snap = False + if 'is_snap' not in vol: + vol['is_snap'] = volume.DEFAULT_TO_SNAP_TYPE + self._etcd.update_vol(volid, 'is_snap', is_snap) + elif vol['is_snap']: + is_snap = vol['is_snap'] + vol['fsOwner'] = vol['snap_metadata'].get('fsOwner') + vol['fsMode'] = vol['snap_metadata'].get('fsMode') + + if 'mount_conflict_delay' not in vol: + m_conf_delay = volume.DEFAULT_MOUNT_CONFLICT_DELAY + vol['mount_conflict_delay'] = m_conf_delay + self._etcd.update_vol(volid, 'mount_conflict_delay', + m_conf_delay) + # Initialize node-mount-info if volume is being mounted + # for the first time + if self._is_vol_not_mounted(vol): + LOG.info("Initializing node_mount_info... adding first " + "mount ID %s" % mount_id) + node_mount_info = {self._node_id: [mount_id]} + vol['node_mount_info'] = node_mount_info + else: + # Volume is in mounted state - Volume fencing logic begins here + node_mount_info = vol['node_mount_info'] + + flag = self._is_vol_mounted_on_this_node(node_mount_info, vol) + # If mounted on this node itself then just append mount-id + if flag == VolumeOwnedAndMounted: + self._update_mount_id_list(vol, mount_id) + return self._get_success_response(vol) + elif flag == VolumeNotOwned: + # Volume mounted on different node + LOG.info("Volume not gracefully unmounted by other node") + LOG.info("%s" % vol) + self._force_remove_vlun(vol, is_snap) + + # Since VLUNs exported to previous node were forcefully + # removed, cache the connection information so that it + # can be used later when user tries to un-mount volume + # from the previous node + if 'path_info' in vol: + path_info = vol['path_info'] + old_node_id = list(node_mount_info.keys())[0] + old_path_info = vol.get('old_path_info', []) + + # Check if old_node_id is already present in old_path_info + # If found, replace it by removing the existing ones and + # appending the new one + if old_path_info: + LOG.info("Old path info found! Removing any " + "duplicate entries...") + # This is a temporary logic without a break statement + # This is required to remove multiple duplicate tuples + # (node_id, path_info) i.e. entries with same node_id + # Later on + updated_list = [] + for opi in old_path_info: + node_id = opi[0] + if old_node_id == node_id: + LOG.info("Found old-path-info tuple " + "having node-id %s for volume %s. " + "Skipping it..." + % (node_id, volname)) + continue + updated_list.append(opi) + old_path_info = updated_list + + old_path_info.append((old_node_id, path_info)) + self._etcd.update_vol(volid, 'old_path_info', + old_path_info) + + node_mount_info = {self._node_id: [mount_id]} + LOG.info("New node_mount_info set: %s" % node_mount_info) + elif flag == VolumeOwnedAndNotMounted: + LOG.info("This might be the case of reboot...") + LOG.info("Volume %s is owned by this node %s but it is not " + "in mounted state" % (volname, self._node_id)) + # We need to simply mount the volume using the information + # in ETCD + path_info = self._etcd.get_path_info_from_vol(vol) + if path_info: + dev_sym_link = path_info['device_info']['path'] + etcd_dev_path = path_info['path'] + real_dev_path = os.path.realpath(dev_sym_link) + if etcd_dev_path != real_dev_path: + LOG.info("Multipath device remapped for %s. " + "[Old-dev: %s, New-dev: %s]. " + "Using new device for mounting!" % + (dev_sym_link, etcd_dev_path, real_dev_path)) + # Assigning blindly real_dev_path + path_info['path'] = real_dev_path + mount_dir = path_info['mount_dir'] + # Ensure: + # 1. we have a multi-path device + # 2. mount dir is present + # 3. device symlink is not broken + if 'dm-' in real_dev_path and \ + fileutil.check_if_file_exists(mount_dir): + if fileutil.check_if_file_exists(real_dev_path): + LOG.info("Case of reboot confirmed! Mounting " + "device %s on path %s" + % (dev_sym_link, mount_dir)) + try: + fileutil.mount_dir(dev_sym_link, mount_dir) + self._etcd.update_vol(vol['id'], + 'path_info', + json.dumps(path_info)) + except Exception as ex: + msg = "Mount volume failed: %s" % \ + six.text_type(ex) + LOG.error(msg) + self._rollback(undo_steps) + response = json.dumps({"Err": '%s' % msg}) + return response + else: + mount_ids = node_mount_info[self._node_id] + if mount_id not in mount_ids: + # In case of reboot, mount-id list will + # have a previous stale mount-id which + # if not cleaned will disallow actual + # unmount of the volume forever. Hence + # creating new mount-id list with just + # the new mount_id received + node_mount_info[self._node_id] = \ + [mount_id] + self._etcd.update_vol(vol['id'], + 'node_mount_info', + node_mount_info) + return self._get_success_response(vol) + else: + LOG.info("Symlink %s exists but corresponding " + "device %s does not" % + (dev_sym_link, real_dev_path)) + pri_connection_info = None sec_connection_info = None - # Check if replication is configured - if self.tgt_bkend_config: + # Check if replication is configured and volume is + # populated with the RCG + if (self.tgt_bkend_config and 'rcg_info' in vol and + vol['rcg_info'] is not None): LOG.info("This is a replication setup") - # TODO: This is where existing volume can be added to RCG - # after enabling replication configuration in hpe.conf - if 'rcg_info' not in vol or not vol['rcg_info']: - msg = "Volume %s is not a replicated volume. It seems" \ - "the backend configuration was modified to be a" \ - "replication configuration after volume creation."\ - % volname - LOG.error(msg) - raise exception.HPEPluginMountException(reason=msg) - # Check if this is Active/Passive based replication if self.tgt_bkend_config.quorum_witness_ip: LOG.info("Peer Persistence has been configured") @@ -1359,6 +1597,7 @@ def _mount_volume(driver): if path.exists is False: msg = (_('path: %s, does not exist'), path) LOG.error(msg) + self._rollback(undo_steps) raise exception.HPEPluginMountException(reason=msg) LOG.debug('path for volume: %(name)s, was successfully created: ' @@ -1375,16 +1614,38 @@ def _mount_volume(driver): # Determine if we need to mount the volume if vol_mount == volume.DEFAULT_MOUNT_VOLUME: # mkdir for mounting the filesystem - mount_dir = fileutil.mkdir_for_mounting(device_info['path']) + if self._host_config.mount_prefix: + mount_prefix = self._host_config.mount_prefix + else: + mount_prefix = None + mount_dir = fileutil.mkdir_for_mounting(device_info['path'], + mount_prefix) LOG.debug('Directory: %(mount_dir)s, ' 'successfully created to mount: ' '%(mount)s', {'mount_dir': mount_dir, 'mount': device_info['path']}) + undo_steps.append( + {'undo_func': fileutil.remove_dir, + 'params': mount_dir, + 'msg': 'Removing mount directory: %s...' % mount_dir}) + # mount the directory - fileutil.mount_dir(path.path, mount_dir) - LOG.debug('Device: %(path)s successfully mounted on %(mount)s', - {'path': path.path, 'mount': mount_dir}) + try: + fileutil.mount_dir(path.path, mount_dir) + LOG.debug('Device: %(path)s successfully mounted on %(mount)s', + {'path': path.path, 'mount': mount_dir}) + + undo_steps.append( + {'undo_func': fileutil.umount_dir, + 'params': mount_dir, + 'msg': 'Unmounting directory: %s...' % mount_dir}) + except Exception as ex: + msg = "Mount volume failed: %s" % six.text_type(ex) + LOG.error(msg) + self._rollback(undo_steps) + response = json.dumps({"Err": '%s' % msg}) + return response # TODO: find out how to invoke mkfs so that it creates the # filesystem without the lost+found directory @@ -1398,90 +1659,112 @@ def _mount_volume(driver): else: mount_dir = '' - if 'fsOwner' in vol and vol['fsOwner']: - fs_owner = vol['fsOwner'].split(":") - uid = int(fs_owner[0]) - gid = int(fs_owner[1]) - os.chown(mount_dir, uid, gid) - - if 'fsMode' in vol and vol['fsMode']: - mode = str(vol['fsMode']) - chmod(mode, mount_dir) - - path_info = {} - path_info['name'] = volname - path_info['path'] = path.path - path_info['device_info'] = device_info - path_info['connection_info'] = pri_connection_info - path_info['mount_dir'] = mount_dir - if sec_connection_info: - path_info['remote_connection_info'] = sec_connection_info - - LOG.info("Updating node_mount_info in etcd with mount_id %s..." - % mount_id) - self._etcd.update_vol(volid, - 'node_mount_info', - node_mount_info) - LOG.info("node_mount_info updated successfully in etcd with mount_id " - "%s" % mount_id) - self._etcd.update_vol(volid, 'path_info', json.dumps(path_info)) + try: + if 'fsOwner' in vol and vol['fsOwner']: + fs_owner = vol['fsOwner'].split(":") + uid = int(fs_owner[0]) + gid = int(fs_owner[1]) + os.chown(mount_dir, uid, gid) + + if 'fsMode' in vol and vol['fsMode']: + mode = str(vol['fsMode']) + chmod(mode, mount_dir) + + path_info = {} + path_info['name'] = volname + path_info['path'] = path.path + path_info['device_info'] = device_info + path_info['connection_info'] = pri_connection_info + path_info['mount_dir'] = mount_dir + if sec_connection_info: + path_info['remote_connection_info'] = sec_connection_info + + LOG.info("Updating node_mount_info in etcd with mount_id %s..." + % mount_id) + self._etcd.update_vol(volid, + 'node_mount_info', + node_mount_info) + LOG.info("node_mount_info updated successfully in etcd with " + "mount_id %s" % mount_id) + self._etcd.update_vol(volid, 'path_info', json.dumps(path_info)) + + response = json.dumps({u"Err": '', u"Name": volname, + u"Mountpoint": mount_dir, + u"Devicename": path.path}) + except Exception as ex: + self._rollback(undo_steps) + response = json.dumps({"Err": '%s' % six.text_type(ex)}) - response = json.dumps({u"Err": '', u"Name": volname, - u"Mountpoint": mount_dir, - u"Devicename": path.path}) return response def _get_target_driver(self, rcg_info): local_rcg = None + rcg_name = rcg_info.get('local_rcg_name') try: - rcg_name = rcg_info['local_rcg_name'] + LOG.info("Getting local RCG: %s" % rcg_name) local_rcg = self._primary_driver.get_rcg(rcg_name) local_role_reversed = local_rcg['targets'][0]['roleReversed'] except Exception as ex: - msg = (_("There was an error fetching the remote copy " - "group from primary array: %s.") % six.text_type(ex)) + msg = "There was an error fetching the remote copy " \ + "group %s from primary array: %s" % \ + (rcg_name, six.text_type(ex)) LOG.error(msg) remote_rcg = None + remote_rcg_name = rcg_info.get('remote_rcg_name') try: - remote_rcg_name = rcg_info['remote_rcg_name'] + LOG.info("Getting remote RCG: %s" % remote_rcg_name) remote_rcg = self._remote_driver.get_rcg(remote_rcg_name) remote_role_reversed = remote_rcg['targets'][0]['roleReversed'] except Exception as ex: - msg = (_("There was an error fetching the remote copy " - "group from secondary array: %s.") % six.text_type(ex)) + msg = "There was an error fetching the remote copy " \ + "group %s from secondary array: %s" % \ + (remote_rcg_name, six.text_type(ex)) LOG.error(msg) - if not (local_rcg and remote_rcg): - msg = (_("Failed to get remote copy group role: %s") % rcg_name) - LOG.error(msg) - raise exception.HPEPluginMountException(reason=msg) - # Both arrays are up - this could just be a group fail-over if local_rcg and remote_rcg: + LOG.info("Got both local and remote RCGs! Checking roles...") # State before to fail-over if local_rcg['role'] == PRIMARY and not local_role_reversed and \ remote_rcg['role'] == SECONDARY and not remote_role_reversed: + LOG.info("Primary array is the active array") return self._primary_driver + # Primary array is either down or RCG under maintenance + # Allow remote target driver to take over + if local_rcg['role'] == PRIMARY and not local_role_reversed and \ + remote_rcg['role'] == PRIMARY_REV and remote_role_reversed: + msg = "Secondary array is the active array" + LOG.info(msg) + return self._remote_driver + # State post recover if remote_rcg['role'] == PRIMARY and remote_role_reversed and \ local_rcg['role'] == SECONDARY and local_role_reversed: + LOG.info("Secondary array is the active array") return self._remote_driver - msg = (_("Cannot perform mount at this time as remote copy group " - " %s is being failed over or failed back. Please try " - "after some time.") % rcg_name) - raise exception.HPEPluginMountException(reason=msg) + msg = (_("Remote copy group %s is being failed over or failed " + "back. Unable to determine RCG location") % rcg_name) + LOG.error(msg) + raise exception.RcgStateInTransitionException(reason=msg) if local_rcg: if local_rcg['role'] == PRIMARY and not local_role_reversed: + LOG.info("Primary array is the active array") return self._primary_driver if remote_rcg: if remote_rcg['role'] == PRIMARY and remote_role_reversed: + LOG.info("Secondary array is the active array") return self._remote_driver + msg = (_("Failed to get RCG %s. Unable to determine RCG location") + % rcg_name) + LOG.error(msg) + raise exception.HPEDriverRemoteCopyGroupNotFound(name=rcg_name) + @synchronization.synchronized_volume('{volname}') def unmount_volume(self, volname, vol_mount, mount_id): vol = self._etcd.get_vol_byname(volname) @@ -1493,6 +1776,9 @@ def unmount_volume(self, volname, vol_mount, mount_id): volid = vol['id'] is_snap = vol['is_snap'] + path_info = None + node_owns_volume = True + # Start of volume fencing LOG.info('Unmounting volume: %s' % vol) if 'node_mount_info' in vol: @@ -1509,48 +1795,80 @@ def unmount_volume(self, volname, vol_mount, mount_id): # by some other node, it can go to that different ETCD root to # fetch the volume meta-data and do the cleanup. if self._node_id not in node_mount_info: - return json.dumps({u"Err": "Volume '%s' is mounted on another" - " node. Cannot unmount it!" % - volname}) - - LOG.info("node_id '%s' is present in vol mount info" - % self._node_id) - - mount_id_list = node_mount_info[self._node_id] - - LOG.info("Current mount_id_list %s " % mount_id_list) - - try: - mount_id_list.remove(mount_id) - except ValueError as ex: - pass + if 'old_path_info' in vol: + LOG.info("Old path info present in volume: %s" + % path_info) + for pi in vol['old_path_info']: + node_id = pi[0] + if node_id == self._node_id: + LOG.info("Found matching old path info for old " + "node ID: %s" % six.text_type(pi)) + path_info = pi + node_owns_volume = False + break + + if path_info: + LOG.info("Removing old path info for node %s from ETCD " + "volume meta-data..." % self._node_id) + vol['old_path_info'].remove(path_info) + if len(vol['old_path_info']) == 0: + LOG.info("Last old_path_info found. " + "Removing it too...") + vol.pop('old_path_info') + LOG.info("Updating volume meta-data: %s..." % vol) + self._etcd.save_vol(vol) + LOG.info("Volume meta-data updated: %s" % vol) + + path_info = json.loads(path_info[1]) + LOG.info("Cleaning up devices using old_path_info: %s" + % path_info) + else: + LOG.info("Volume '%s' is mounted on another node. " + "No old_path_info is present on ETCD. Unable" + "to cleanup devices!" % volname) + return json.dumps({u"Err": ""}) + else: + LOG.info("node_id '%s' is present in vol mount info" + % self._node_id) - LOG.info("Updating node_mount_info '%s' in etcd..." - % node_mount_info) - # Update the mount_id list in etcd - self._etcd.update_vol(volid, 'node_mount_info', - node_mount_info) + mount_id_list = node_mount_info[self._node_id] - LOG.info("Updated node_mount_info '%s' in etcd!" - % node_mount_info) + LOG.info("Current mount_id_list %s " % mount_id_list) - if len(mount_id_list) > 0: - # Don't proceed with unmount - LOG.info("Volume still in use by %s containers... " - "no unmounting done!" % len(mount_id_list)) - return json.dumps({u"Err": ''}) - else: - # delete the node_id key from node_mount_info - LOG.info("Removing node_mount_info %s", - node_mount_info) - vol.pop('node_mount_info') - LOG.info("Saving volume to etcd: %s..." % vol) - self._etcd.save_vol(vol) - LOG.info("Volume saved to etcd: %s!" % vol) + try: + mount_id_list.remove(mount_id) + except ValueError as ex: + LOG.exception('Ignoring exception: %s' % ex) + pass + + LOG.info("Updating node_mount_info '%s' in etcd..." + % node_mount_info) + # Update the mount_id list in etcd + self._etcd.update_vol(volid, 'node_mount_info', + node_mount_info) + + LOG.info("Updated node_mount_info '%s' in etcd!" + % node_mount_info) + + if len(mount_id_list) > 0: + # Don't proceed with unmount + LOG.info("Volume still in use by %s containers... " + "no unmounting done!" % len(mount_id_list)) + return json.dumps({u"Err": ''}) + else: + # delete the node_id key from node_mount_info + LOG.info("Removing node_mount_info %s", + node_mount_info) + vol.pop('node_mount_info') + LOG.info("Saving volume to etcd: %s..." % vol) + self._etcd.save_vol(vol) + LOG.info("Volume saved to etcd: %s!" % vol) # TODO: Requirement #5 will bring the flow here but the below flow # may result into exception. Need to ensure it doesn't happen - path_info = self._etcd.get_vol_path_info(volname) + if not path_info: + path_info = self._etcd.get_vol_path_info(volname) + # path_info = vol.get('path_info', None) if path_info: path_name = path_info['path'] @@ -1627,7 +1945,8 @@ def _unmount_volume(driver): # TODO: Create path_info list as we can mount the volume to multiple # hosts at the same time. # If this node owns the volume then update path_info - self._etcd.update_vol(volid, 'path_info', None) + if node_owns_volume: + self._etcd.update_vol(volid, 'path_info', None) LOG.info(_LI('path for volume: %(name)s, was successfully removed: ' '%(path_name)s'), {'name': volname, @@ -1699,13 +2018,22 @@ def _set_flash_cache_for_volume(self, vvs_name, flash_cache): @staticmethod def _rollback(rollback_list): + LOG.info("Rolling back...") for undo_action in reversed(rollback_list): LOG.info(undo_action['msg']) try: - undo_action['undo_func'](**undo_action['params']) + params = undo_action['params'] + if type(params) is dict: + undo_action['undo_func'](**undo_action['params']) + elif type(params) is tuple: + undo_action['undo_func'](*undo_action['params']) + else: + undo_action['undo_func'](undo_action['params']) except Exception as ex: # TODO: Implement retry logic + LOG.warning('Ignoring exception: %s' % six.text_type(ex)) pass + LOG.info("Roll back complete!") @staticmethod def _get_snapshot_by_name(snapshots, snapname): @@ -1743,12 +2071,24 @@ def _sync_snapshots_from_array(self, vol_id, db_snapshots, snap_cpg): self._etcd.update_vol(vol_id, 'snapshots', db_snapshots) + @staticmethod + def _get_required_rcg_field(rcg_detail): + rcg_filter = {} + + msg = 'get_required_rcg_field: %s' % rcg_detail + LOG.info(msg) + rcg_filter['rcg_name'] = rcg_detail.get('name') + # TODO(sonivi): handle in case of multiple target + rcg_filter['policies'] = rcg_detail['targets'][0].get('policies') + rcg_filter['role'] = volume.RCG_ROLE.get(rcg_detail.get('role')) + + return rcg_filter + @staticmethod def _get_required_qos_field(qos_detail): qos_filter = {} - msg = (_LI('get_required_qos_field: %(qos_detail)s'), - {'qos_detail': qos_detail}) + msg = 'get_required_qos_field: %s' % qos_detail LOG.info(msg) qos_filter['enabled'] = qos_detail.get('enabled') @@ -1817,43 +2157,3 @@ def _add_volume_to_rcg(self, vol, rcg_name, undo_steps): 'rcg_name': rcg_name}, 'msg': 'Removing VV %s from Remote Copy Group %s...' % (bkend_vol_name, rcg_name)}) - - def _decrypt_password(self, src_bknd, trgt_bknd, backend_name): - try: - passphrase = self._etcd.get_backend_key(backend_name) - except Exception as ex: - LOG.info("Using Plain Text") - else: - passphrase = self.key_check(passphrase) - src_bknd.hpe3par_password = \ - self._decrypt(src_bknd.hpe3par_password, passphrase) - src_bknd.san_password = \ - self._decrypt(src_bknd.san_password, passphrase) - if trgt_bknd: - trgt_bknd.hpe3par_password = \ - self._decrypt(trgt_bknd.hpe3par_password, passphrase) - trgt_bknd.san_password = \ - self._decrypt(trgt_bknd.san_password, passphrase) - - def key_check(self, key): - KEY_LEN = len(key) - padding_string = string.ascii_letters - - if KEY_LEN < 16: - KEY = key + padding_string[:16 - KEY_LEN] - - elif KEY_LEN > 16 and KEY_LEN < 24: - KEY = key + padding_string[:24 - KEY_LEN] - - elif KEY_LEN > 24 and KEY_LEN < 32: - KEY = key + padding_string[:32 - KEY_LEN] - - elif KEY_LEN > 32: - KEY = key[:32] - - return KEY - - def _decrypt(self, encrypted, passphrase): - aes = AES.new(passphrase, AES.MODE_CFB, '1234567812345678') - decrypt_pass = aes.decrypt(base64.b64decode(encrypted)) - return decrypt_pass.decode('utf-8') diff --git a/patch_3par_client/python_3parclient-4.2.9-py3.5.egg b/patch_3par_client/python_3parclient-4.2.9-py3.5.egg new file mode 100644 index 00000000..7262dc6f Binary files /dev/null and b/patch_3par_client/python_3parclient-4.2.9-py3.5.egg differ diff --git a/patch_os_bricks/linuxscsi.py b/patch_os_bricks/linuxscsi.py index dc191891..5db2f9ec 100644 --- a/patch_os_bricks/linuxscsi.py +++ b/patch_os_bricks/linuxscsi.py @@ -256,15 +256,6 @@ def find_multipath_device_path(self, wwn): except exception.VolumeDeviceNotFound: pass - LOG.info('checking for by-id/scsi-wwn entry for multipath') - path = "/dev/disk/by-id/scsi-%(wwn)s" % wwn_dict - try: - self.wait_for_path(path) - return path - except exception.VolumeDeviceNotFound: - pass - - # couldn't find a path LOG.warning("couldn't find a valid multipath device path for " "%(wwn)s", wwn_dict) diff --git a/plugin-start b/plugin-start index 978484eb..e6e1bade 100755 --- a/plugin-start +++ b/plugin-start @@ -3,6 +3,8 @@ /sbin/multipathd /sbin/udevd & +export PYTHONPATH=${HOME}/python-hpedockerplugin:/root/python-hpedockerplugin:/python-hpedockerplugin:/python-hpedockerplugin/hpedockerplugin + # Perform cleanup of twistd.pid, hpe.sock.* files /usr/bin/cleanup.sh /usr/bin/twistd --nodaemon hpe_plugin_service diff --git a/quick-start/README.md b/quick-start/README.md index 617fbe98..25b2617f 100644 --- a/quick-start/README.md +++ b/quick-start/README.md @@ -180,7 +180,7 @@ or `` /var/log/messages `` - For upgrading the plugin from older version 2.0 or 2.0.2 to 2.1 user needs to unmount all the volumes and follow the standard upgrade procedure described in docker guide. -- Volumes created using older plugins (2.0.2 or below) do not have snp_cpg associated with them, hence when the plugin is upgraded to 2.1 and user wants to perform clone/snapshot operations on these old volumes, he/she must set the snap_cpg for the +- Volumes created using older plugins (2.0.2 or below) do not have snap_cpg associated with them, hence when the plugin is upgraded to 2.1 and user wants to perform clone/snapshot operations on these old volumes, he/she must set the snap_cpg for the corresponding volumes using 3par cli or any tool before performing clone/snapshot operations. - While inspecting a snapshot, its provisioning field is set to that of parent volume's provisioning type. In 3PAR however, it is shown as 'snp'. diff --git a/requirements-py3.txt b/requirements-py3.txt index d4dedbdc..c7419e4e 100644 --- a/requirements-py3.txt +++ b/requirements-py3.txt @@ -3,6 +3,7 @@ asn1crypto==0.24.0 attrs==18.1.0 Automat==0.7.0 Babel==2.6.0 +backoff==1.8.0 bcrypt==3.1.4 cachetools==2.1.0 certifi==2018.4.16 @@ -63,7 +64,7 @@ pycrypto==2.6.1 pyinotify==0.9.6 PyNaCl==1.2.1 pyparsing==2.2.0 -python-3parclient==4.2.7 +python-3parclient==4.2.10 python-dateutil==2.7.3 python-etcd==0.4.5 python-lefthandclient==2.1.0 @@ -74,11 +75,12 @@ requests==2.19.1 retrying==1.3.3 rfc3986==1.1.0 Routes==2.4.1 -setuptools +setuptools==41.0.0 sh==1.12.14 six==1.11.0 statsd==3.2.2 stevedore==1.28.0 +ratelimit==2.2.1 tenacity==4.12.0 Twisted==18.7.0rc1 urllib3==1.23 diff --git a/test-requirements.txt b/test-requirements.txt index 8e4d6efb..69f3336e 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -1,3 +1,5 @@ -flake8 +flake8==3.5.0 testtools mock==2.0.0 +ratelimit==2.2.1 +backoff==1.8.0 diff --git a/test/clonevolume_tester.py b/test/clonevolume_tester.py index ad771ff0..1cd1d4e4 100644 --- a/test/clonevolume_tester.py +++ b/test/clonevolume_tester.py @@ -24,11 +24,17 @@ def get_request_params(self): def setup_mock_objects(self): mock_etcd = self.mock_objects['mock_etcd'] - mock_etcd.get_vol_byname.return_value = data.volume + mock_etcd.get_vol_byname.side_effect = [ + None, + data.volume, + data.volume + ] mock_3parclient = self.mock_objects['mock_3parclient'] mock_3parclient.copyVolume.return_value = {'taskid': data.TASK_ID} + mock_3parclient.isOnlinePhysicalCopy.return_value = False mock_3parclient.getCPG.return_value = {} + mock_3parclient.findVolumeSet.return_value = None class TestCloneDefaultEtcdSaveFails(CloneVolumeUnitTest): @@ -38,12 +44,17 @@ def get_request_params(self): def setup_mock_objects(self): mock_etcd = self.mock_objects['mock_etcd'] - mock_etcd.get_vol_byname.return_value = data.volume + mock_etcd.get_vol_byname.side_effect = [ + None, + data.volume, + data.volume + ] # Make save_vol fail with exception mock_etcd.save_vol.side_effect = [Exception("I am dead")] mock_3parclient = self.mock_objects['mock_3parclient'] mock_3parclient.copyVolume.return_value = {'taskid': data.TASK_ID} + mock_3parclient.isOnlinePhysicalCopy.return_value = False mock_3parclient.getCPG.return_value = {} def check_response(self, resp): @@ -64,7 +75,6 @@ def check_response(self, resp): mock_3parclient = self.mock_objects['mock_3parclient'] mock_3parclient.createVolume.assert_called() mock_3parclient.copyVolume.assert_called() - mock_3parclient.getTask.assert_called() mock_3parclient.modifyVolume.assert_called() def get_request_params(self): @@ -76,44 +86,44 @@ def get_request_params(self): def setup_mock_objects(self): mock_etcd = self.mock_objects['mock_etcd'] - mock_etcd.get_vol_byname.return_value = data.volume + mock_etcd.get_vol_byname.side_effect = [ + None, + data.volume, + data.volume + ] mock_3parclient = self.mock_objects['mock_3parclient'] mock_3parclient.copyVolume.return_value = {'taskid': data.TASK_ID} + mock_3parclient.isOnlinePhysicalCopy.return_value = False mock_3parclient.getCPG.return_value = {} mock_3parclient.getTask.return_value = {'status': data.TASK_DONE} -# Make copyVolume operation fail -class TestCloneOfflineCopyFails(CloneVolumeUnitTest): +# Offline copy +class TestCloneFromBaseVolumeActiveTask(CloneVolumeUnitTest): def check_response(self, resp): - # Match error substring with returned error string - err_received = resp['Err'] - err_expected = 'copy volume task failed: create_cloned_volume' - self._test_case.assertIn(err_expected, err_received) - - # Check following 3PAR APIs were invoked - mock_3parclient = self.mock_objects['mock_3parclient'] - mock_3parclient.createVolume.assert_called() - mock_3parclient.copyVolume.assert_called() - mock_3parclient.getTask.assert_called() + self._test_case.assertNotEqual(resp, {u"Err": ''}) def get_request_params(self): return {"Name": "clone-vol-001", "Opts": {"cloneOf": data.VOLUME_NAME, # Difference in size of source and cloned volume - # triggers offline copy. Src size is 2. + # triggers offline copy. Src volume size is 2. "size": 20}} def setup_mock_objects(self): mock_etcd = self.mock_objects['mock_etcd'] - mock_etcd.get_vol_byname.return_value = data.volume + mock_etcd.get_vol_byname.side_effect = [ + None, + data.volume, + data.volume + ] mock_3parclient = self.mock_objects['mock_3parclient'] - mock_3parclient.getCPG.return_value = {} mock_3parclient.copyVolume.return_value = {'taskid': data.TASK_ID} - # TASK_FAILED simulates failure of copyVolume() operation - mock_3parclient.getTask.return_value = {'status': data.TASK_FAILED} + mock_3parclient.isOnlinePhysicalCopy.return_value = True + mock_3parclient.getCPG.return_value = {} + mock_3parclient.getTask.return_value = {'status': data.TASK_DONE} class TestCloneInvalidSourceVolume(CloneVolumeUnitTest): @@ -146,7 +156,13 @@ def get_request_params(self): def setup_mock_objects(self): mock_etcd = self.mock_objects['mock_etcd'] # Source volume that is to be cloned - mock_etcd.get_vol_byname.return_value = data.volume + mock_etcd.get_vol_byname.side_effect = [ + None, + data.volume, + data.volume + ] + mock_3parclient = self.mock_objects['mock_3parclient'] + mock_3parclient.isOnlinePhysicalCopy.return_value = False def check_response(self, resp): expected_msg = "clone volume size 1 is less than source volume size 2" @@ -170,10 +186,15 @@ def get_request_params(self): def setup_mock_objects(self): mock_etcd = self.mock_objects['mock_etcd'] - mock_etcd.get_vol_byname.return_value = data.volume_dedup + mock_etcd.get_vol_byname.side_effect = [ + None, + data.volume_dedup, + data.volume_dedup + ] mock_3parclient = self.mock_objects['mock_3parclient'] mock_3parclient.copyVolume.return_value = {'taskid': data.TASK_ID} + mock_3parclient.isOnlinePhysicalCopy.return_value = False mock_3parclient.getCPG.return_value = {} @@ -194,10 +215,15 @@ def get_request_params(self): def setup_mock_objects(self): mock_etcd = self.mock_objects['mock_etcd'] - mock_etcd.get_vol_byname.return_value = data.volume_flash_cache + mock_etcd.get_vol_byname.side_effect = [ + None, + data.volume_flash_cache, + data.volume_flash_cache + ] mock_3parclient = self.mock_objects['mock_3parclient'] mock_3parclient.copyVolume.return_value = {'taskid': data.TASK_ID} + mock_3parclient.isOnlinePhysicalCopy.return_value = False mock_3parclient.getCPG.return_value = {} @@ -216,11 +242,17 @@ def get_request_params(self): def setup_mock_objects(self): mock_etcd = self.mock_objects['mock_etcd'] - mock_etcd.get_vol_byname.return_value = data.volume_qos + mock_etcd.get_vol_byname.side_effect = [ + None, + data.volume_qos, + data.volume_qos + ] mock_3parclient = self.mock_objects['mock_3parclient'] mock_3parclient.copyVolume.return_value = {'taskid': data.TASK_ID} + mock_3parclient.isOnlinePhysicalCopy.return_value = False mock_3parclient.getCPG.return_value = {} + mock_3parclient.findVolumeSet.return_value = None # Online copy with flash cache - add to vvset fails @@ -246,11 +278,16 @@ def get_request_params(self): def setup_mock_objects(self): mock_etcd = self.mock_objects['mock_etcd'] - mock_etcd.get_vol_byname.return_value = data.volume_flash_cache + mock_etcd.get_vol_byname.side_effect = [ + None, + data.volume_flash_cache, + data.volume_flash_cache, + ] mock_3parclient = self.mock_objects['mock_3parclient'] mock_3parclient.copyVolume.return_value = {'taskid': data.TASK_ID} mock_3parclient.getCPG.return_value = {} + mock_3parclient.isOnlinePhysicalCopy.return_value = False # Make addVolumeToVolumeSet fail by throwing exception mock_3parclient.addVolumeToVolumeSet.side_effect = \ [exceptions.HTTPNotFound('fake')] @@ -280,12 +317,17 @@ def get_request_params(self): def setup_mock_objects(self): mock_etcd = self.mock_objects['mock_etcd'] - mock_etcd.get_vol_byname.return_value = data.volume_flash_cache + mock_etcd.get_vol_byname.side_effect = [ + None, + data.volume_flash_cache, + data.volume_flash_cache + ] mock_etcd.save_vol.side_effect = \ [hpe_exc.HPEPluginSaveFailed(obj='clone-vol-001')] mock_3parclient = self.mock_objects['mock_3parclient'] mock_3parclient.copyVolume.return_value = {'taskid': data.TASK_ID} + mock_3parclient.isOnlinePhysicalCopy.return_value = False mock_3parclient.getCPG.return_value = {} @@ -310,11 +352,16 @@ def get_request_params(self): def setup_mock_objects(self): mock_etcd = self.mock_objects['mock_etcd'] - mock_etcd.get_vol_byname.return_value = data.volume_flash_cache + mock_etcd.get_vol_byname.side_effect = [ + None, + data.volume_flash_cache, + data.volume_flash_cache, + ] mock_3parclient = self.mock_objects['mock_3parclient'] mock_3parclient.copyVolume.return_value = {'taskid': data.TASK_ID} mock_3parclient.getCPG.return_value = {} + mock_3parclient.isOnlinePhysicalCopy.return_value = False # Make addVolumeToVolumeSet fail by throwing exception mock_3parclient.modifyVolumeSet.side_effect = [ exceptions.HTTPInternalServerError("Internal server error") @@ -343,21 +390,25 @@ def get_request_params(self): def setup_mock_objects(self): mock_etcd = self.mock_objects['mock_etcd'] - mock_etcd.get_vol_byname.return_value = \ + mock_etcd.get_vol_byname.side_effect = [ + None, + data.volume_flash_cache_and_qos, data.volume_flash_cache_and_qos + ] mock_etcd.save_vol.side_effect = \ [hpe_exc.HPEPluginSaveFailed(obj='clone-vol-001')] mock_3parclient = self.mock_objects['mock_3parclient'] mock_3parclient.copyVolume.return_value = {'taskid': data.TASK_ID} + mock_3parclient.isOnlinePhysicalCopy.return_value = False mock_3parclient.getCPG.return_value = {} # CHAP enabled makes Offline copy flow to execute class TestCloneWithCHAP(CloneVolumeUnitTest): def override_configuration(self, all_configs): - all_configs['DEFAULT'].hpe3par_iscsi_chap_enabled = True - all_configs['DEFAULT'].use_multipath = False + all_configs['block'][1]['DEFAULT'].hpe3par_iscsi_chap_enabled = True + all_configs['block'][1]['DEFAULT'].use_multipath = False def check_response(self, resp): self._test_case.assertEqual(resp, {u"Err": ''}) @@ -375,12 +426,17 @@ def get_request_params(self): def setup_mock_objects(self): mock_etcd = self.mock_objects['mock_etcd'] - mock_etcd.get_vol_byname.return_value = data.volume + mock_etcd.get_vol_byname.side_effect = [ + None, + data.volume, + data.volume + ] mock_3parclient = self.mock_objects['mock_3parclient'] mock_3parclient.copyVolume.return_value = {'taskid': data.TASK_ID} mock_3parclient.getCPG.return_value = {} mock_3parclient.getVolumeMetaData.return_value = {'value': True} + mock_3parclient.isOnlinePhysicalCopy.return_value = False mock_3parclient.getTask.return_value = {'status': data.TASK_DONE} @@ -395,12 +451,15 @@ def check_response(self, resp): def get_request_params(self): return {"Name": "clone-vol-001", "Opts": {"cloneOf": data.VOLUME_NAME, - "compression": 'true', "size": '16'}} def setup_mock_objects(self): mock_etcd = self.mock_objects['mock_etcd'] - mock_etcd.get_vol_byname.return_value = data.volume_compression + mock_etcd.get_vol_byname.side_effect = [ + None, + data.volume_compression, + data.volume_compression + ] mock_3parclient = self.mock_objects['mock_3parclient'] mock_3parclient.getWsApiVersion.return_value = \ @@ -411,5 +470,29 @@ def setup_mock_objects(self): 'revision': 0} mock_3parclient.copyVolume.return_value = {'taskid': data.TASK_ID} mock_3parclient.getCPG.return_value = {} + mock_3parclient.isOnlinePhysicalCopy.return_value = False mock_3parclient.getStorageSystemInfo.return_value = \ {'licenseInfo': {'licenses': [{'name': 'Compression'}]}} + + +class TestCloneVolumeWithInvalidOptions(CloneVolumeUnitTest): + def check_response(self, resp): + expected_error_msg = "Invalid input received: Invalid option(s) " \ + "['provisioning', 'qos-name'] specified for " \ + "operation clone volume. Please check help " \ + "for usage." + self._test_case.assertEqual(expected_error_msg, resp['Err']) + + def get_request_params(self): + return {"Name": "test-vol-001", + "Opts": {"qos-name": "soni_vvset", + "provisioning": "thin", + "size": "2", + "cloneOf": "clone_of"}} + + def setup_mock_objects(self): + mock_etcd = self.mock_objects['mock_etcd'] + mock_etcd.get_vol_byname.return_value = None + + mock_3parclient = self.mock_objects['mock_3parclient'] + mock_3parclient.getCPG.return_value = {} diff --git a/test/config/hpe_file.conf b/test/config/hpe_file.conf new file mode 100644 index 00000000..c6168906 --- /dev/null +++ b/test/config/hpe_file.conf @@ -0,0 +1,47 @@ +[DEFAULT] +ssh_hosts_key_file = /root/.ssh/known_hosts +host_etcd_ip_address = 192.168.68.36 +host_etcd_port_number = 2379 +#host_etcd_client_cert = /root/plugin/certs/.pem +#host_etcd_client_key = /root/plugin/certs/.pem + +# OSLO based Logging level for the plugin. +logging = DEBUG + +# Enable 3PAR client debug messages +hpe3par_debug = True + +# Suppress Requests Library SSL warnings +suppress_requests_ssl_warnings = True + +hpedockerplugin_driver = hpedockerplugin.hpe.hpe_3par_fc.HPE3PARFCDriver + +hpe3par_api_url = https://192.168.67.7:8080/api/v1 +hpe3par_username = 3paradm +hpe3par_password = 3pardata +san_ip = 192.168.67.7 +san_login = 3paradm +san_password = 3pardata +san_password = 3pardata +hpe3par_cpg = FC_r6 +hpe3par_snapcpg = FC_r1 +# hpe3par_snapcpg is optional. If not provided, it defaults to hpe3par_cpg value +use_multipath = True +enforce_multipath = True + +[DEFAULT_FILE] +ssh_hosts_key_file = /root/.ssh/known_hosts +host_etcd_ip_address = 192.168.68.36 +host_etcd_port_number = 2379 +logging = DEBUG +hpe3par_debug = True +suppress_requests_ssl_warnings = True +hpedockerplugin_driver = hpedockerplugin.hpe.hpe_3par_file.HPE3PARFileDriver +hpe3par_api_url = https://192.168.67.6:8080/api/v1 +hpe3par_username = 3paradm +hpe3par_password = 3pardata +san_ip = 192.168.67.6 +san_login = 3paradm +san_password = 3pardata +hpe3par_cpg = fs_cpg +hpe3par_server_ip_pool = 192.168.98.8-192.168.98.13:255.255.192.0 diff --git a/test/config/hpe_mixed_fc_default.conf b/test/config/hpe_mixed_fc_default.conf new file mode 100644 index 00000000..4ec7d4b2 --- /dev/null +++ b/test/config/hpe_mixed_fc_default.conf @@ -0,0 +1,64 @@ +[DEFAULT] +ssh_hosts_key_file = /root/.ssh/known_hosts +host_etcd_ip_address = 192.168.68.36 +host_etcd_port_number = 2379 +#host_etcd_client_cert = /root/plugin/certs/.pem +#host_etcd_client_key = /root/plugin/certs/.pem + +# OSLO based Logging level for the plugin. +logging = DEBUG + +# Enable 3PAR client debug messages +hpe3par_debug = True + +# Suppress Requests Library SSL warnings +suppress_requests_ssl_warnings = True + +hpedockerplugin_driver = hpedockerplugin.hpe.hpe_3par_fc.HPE3PARFCDriver +hpe3par_api_url = https://192.168.67.7:8080/api/v1 +hpe3par_username = 3paradm +hpe3par_password = 3pardata +san_ip = 192.168.67.7 +san_login = 3paradm +san_password = 3pardata +san_password = 3pardata +hpe3par_cpg = FC_r6 +hpe3par_snapcpg = FC_r1 +use_multipath = True +enforce_multipath = True +replication_device = backend_id:CSSOS-SSA05, + replication_mode:synchronous, + cpg_map:FC_r6:FC_r1, + snap_cpg_map:FC_r1:FC_r5, + hpe3par_api_url:https://192.168.67.5:8080/api/v1, + hpe3par_username:3paradm, + hpe3par_password:3pardata, + san_ip:192.168.67.5, + san_login:3paradm, + san_password:3pardata + +[3par_iscsi_rep] +hpedockerplugin_driver = hpedockerplugin.hpe.hpe_3par_iscsi.HPE3PARISCSIDriver +hpe3par_api_url = https://192.168.67.7:8080/api/v1 +hpe3par_username = 3paradm +hpe3par_password = 3pardata +san_ip = 192.168.67.7 +san_login = 3paradm +san_password = 3pardata +san_password = 3pardata +hpe3par_cpg = FC_r6 +hpe3par_snapcpg = FC_r1 +hpe3par_iscsi_chap_enabled = True +hpe3par_iscsi_ips = 10.50.3.59 +replication_device = backend_id:CSSOS-SSA05, + quorum_witness_ip:dummy_ip, + replication_mode:synchronous, + cpg_map:FC_r6:FC_r1, + snap_cpg_map:FC_r1:FC_r5, + hpe3par_api_url:https://192.168.67.5:8080/api/v1, + hpe3par_username:3paradm, + hpe3par_password:3pardata, + san_ip:192.168.67.5, + san_login:3paradm, + san_password:3pardata, + hpe3par_iscsi_ips:10.50.3.59 diff --git a/test/config/hpe_mixed_iscsi_default.conf b/test/config/hpe_mixed_iscsi_default.conf new file mode 100644 index 00000000..b4b01fdb --- /dev/null +++ b/test/config/hpe_mixed_iscsi_default.conf @@ -0,0 +1,64 @@ +[DEFAULT] +ssh_hosts_key_file = /root/.ssh/known_hosts +host_etcd_ip_address = 192.168.68.36 +host_etcd_port_number = 2379 +#host_etcd_client_cert = /root/plugin/certs/.pem +#host_etcd_client_key = /root/plugin/certs/.pem + +# OSLO based Logging level for the plugin. +logging = DEBUG + +# Enable 3PAR client debug messages +hpe3par_debug = True + +# Suppress Requests Library SSL warnings +suppress_requests_ssl_warnings = True + +hpedockerplugin_driver = hpedockerplugin.hpe.hpe_3par_iscsi.HPE3PARISCSIDriver +hpe3par_api_url = https://192.168.67.7:8080/api/v1 +hpe3par_username = 3paradm +hpe3par_password = 3pardata +san_ip = 192.168.67.7 +san_login = 3paradm +san_password = 3pardata +san_password = 3pardata +hpe3par_cpg = FC_r6 +hpe3par_snapcpg = FC_r1 +hpe3par_iscsi_chap_enabled = True +hpe3par_iscsi_ips = 10.50.3.59 +replication_device = backend_id:CSSOS-SSA05, + quorum_witness_ip:dummy_ip, + replication_mode:synchronous, + cpg_map:FC_r6:FC_r1, + snap_cpg_map:FC_r1:FC_r5, + hpe3par_api_url:https://192.168.67.5:8080/api/v1, + hpe3par_username:3paradm, + hpe3par_password:3pardata, + san_ip:192.168.67.5, + san_login:3paradm, + san_password:3pardata, + hpe3par_iscsi_ips:10.50.3.59 + +[3par_fc_rep] +hpedockerplugin_driver = hpedockerplugin.hpe.hpe_3par_fc.HPE3PARFCDriver +hpe3par_api_url = https://192.168.67.7:8080/api/v1 +hpe3par_username = 3paradm +hpe3par_password = 3pardata +san_ip = 192.168.67.7 +san_login = 3paradm +san_password = 3pardata +san_password = 3pardata +hpe3par_cpg = FC_r6 +hpe3par_snapcpg = FC_r1 +use_multipath = True +enforce_multipath = True +replication_device = backend_id:CSSOS-SSA05, + replication_mode:synchronous, + cpg_map:FC_r6:FC_r1, + snap_cpg_map:FC_r1:FC_r5, + hpe3par_api_url:https://192.168.67.5:8080/api/v1, + hpe3par_username:3paradm, + hpe3par_password:3pardata, + san_ip:192.168.67.5, + san_login:3paradm, + san_password:3pardata diff --git a/test/createreplicatedvolume_tester.py b/test/createreplicatedvolume_tester.py index 2aa1ff58..01f80ff2 100644 --- a/test/createreplicatedvolume_tester.py +++ b/test/createreplicatedvolume_tester.py @@ -20,6 +20,25 @@ def override_configuration(self, all_configs): # here for the normal happy path TCs here as they are same +class TestCreateVolumeDefaultFails(CreateReplicatedVolumeUnitTest): + def get_request_params(self): + return {"Name": "test-vol-001", + "Opts": {}} + + def setup_mock_objects(self): + mock_etcd = self.mock_objects['mock_etcd'] + mock_etcd.get_vol_byname.return_value = None + + def check_response(self, resp): + self._test_case.assertEqual(resp, {u"Err": ''}) + + # Check if these functions were actually invoked + # in the flow or not + mock_3parclient = self.mock_objects['mock_3parclient'] + mock_3parclient.getWsApiVersion.assert_called() + mock_3parclient.createVolume.assert_called() + + class TestCreateReplicatedVolumeAndRCG(CreateReplicatedVolumeUnitTest): def __init__(self, backend_name): self._backend_name = backend_name @@ -115,6 +134,23 @@ def check_response(self, resp): mock_3parclient.createRemoteCopyGroup.assert_called() +class TestCreateReplicatedVolumeWithInvalidOptions( + CreateReplicatedVolumeUnitTest): + def check_response(self, resp): + in_valid_opts = ['expHrs', 'retHrs'] + in_valid_opts.sort() + op = "create replicated volume" + expected = "Invalid input received: Invalid option(s) " \ + "%s specified for operation %s. " \ + "Please check help for usage." % (in_valid_opts, op) + self._test_case.assertEqual(expected, resp['Err']) + + def get_request_params(self): + return {"Name": "test-vol-001", + "Opts": {"replicationGroup": "Dummy-RCG", + "expHrs": 111, + "retHrs": 123}} + # TODO: # class TestCreateVolumeWithMutuallyExclusiveList( # CreateReplicatedVolumeUnitTest): diff --git a/test/createshare_tester.py b/test/createshare_tester.py new file mode 100644 index 00000000..12343305 --- /dev/null +++ b/test/createshare_tester.py @@ -0,0 +1,691 @@ +import time + +from hpe3parclient import exceptions as hpe3par_ex + +import hpedockerplugin.exception as exception +import test.fake_3par_data as data +import test.hpe_docker_unit_test as hpedockerunittest + + +class CreateShareUnitTest(hpedockerunittest.HpeDockerUnitTestExecutor): + def _get_plugin_api(self): + return 'volumedriver_create' + + def setup_mock_objects(self): + mock_etcd = self.mock_objects['mock_etcd'] + mock_etcd.get_vol_byname.return_value = None + + def override_configuration(self, all_configs): + pass + + # TODO: check_response and setup_mock_objects can be implemented + # here for the normal happy path TCs here as they are same + + +class TestCreateFirstDefaultShare(CreateShareUnitTest): + def get_request_params(self): + return {u"Name": u"MyDefShare_01", + u"Opts": {u"filePersona": u''}} + + def setup_mock_objects(self): + # ***** BEGIN - Required mock objects ***** + mock_etcd = self.mock_objects['mock_etcd'] + mock_share_etcd = self.mock_objects['mock_share_etcd'] + mock_fp_etcd = self.mock_objects['mock_fp_etcd'] + mock_file_client = self.mock_objects['mock_file_client'] + # ***** END - Required mock objects ***** + + # ***** BEGIN - Setup side effect lists ***** + etcd_get_share_side_effect = list() + mock_share_etcd.get_share.side_effect = etcd_get_share_side_effect + + etcd_get_backend_metadata_side_effect = list() + mock_fp_etcd.get_backend_metadata.side_effect = \ + etcd_get_backend_metadata_side_effect + + etcd_get_fpg_metadata_side_effect = list() + mock_fp_etcd.get_fpg_metadata.side_effect = \ + etcd_get_fpg_metadata_side_effect + + file_client_http_post_side_effect = list() + mock_file_client.http.post.side_effect = \ + file_client_http_post_side_effect + + file_client_get_task_side_effect = list() + mock_file_client.getTask.side_effect = \ + file_client_get_task_side_effect + + file_client_http_get_side_effect = list() + mock_file_client.http.get.side_effect = \ + file_client_http_get_side_effect + # ***** END - Setup side effect lists ***** + + # Step #1: + # Skip check for volume existence <-- REST layer + + # Step #0: + # Skip check for volume existence <-- REST LAYER + mock_etcd.get_vol_byname.return_value = None + + # Step #1: + # Skip check for share existence <-- REST LAYER + etcd_get_share_side_effect.append( + exception.EtcdMetadataNotFound(msg="Key not found") + ) + # Step #2: + # Skip check for share existence <-- File Mgr + etcd_get_share_side_effect.append( + exception.EtcdMetadataNotFound(msg="Key not found") + ) + # Step #3: + # Get current default FPG. No backend metadata exists + # This will result in EtcdDefaultFpgNotPresent exception + # which will execute _create_default_fpg flow which tries + # to generate default FPG/VFS names using backend metadata + etcd_get_backend_metadata_side_effect.append( + exception.EtcdMetadataNotFound(msg="Key not found") + ) + # Step #4: + # _create_default_fpg flow tries to generate default FPG/VFS + # names using backend metadata. For first share, no backend + # metadata exists which results in EtcdMetadataNotFound. As a + # result, backend metadata is CREATED: + # { + # 'ips_in_use': [], + # 'ips_locked_for_use': [], + # 'counter': 0 + # } + # DockerFpg_0 and DockerVFS_0 names are returned for creation. + etcd_get_backend_metadata_side_effect.append( + exception.EtcdMetadataNotFound(msg="Key not found") + ) + # Step #5: + # Create FPG DockerFpg_0 at the backend. This results in 3PAR + # task creation with taskId present in fpg_create_response. Wait + # for task completion in step #6 below + file_client_http_post_side_effect.append( + (data.fpg_create_resp, data.fpg_create_body) + ) + # Step #6: + # Wait for task completion and add default_fpg to backend + # metadata as below: + # { + # 'default_fpgs': {cpg_name: ['Docker_Fpg0']}, + # 'ips_in_use': [], + # 'ips_locked_for_use': [], + # 'counter': 0 + # } + # Save FPG metadata as well + file_client_get_task_side_effect.append( + data.fpg_create_task_body + ) + # Step #7: + # Claim available IP + etcd_get_backend_metadata_side_effect.append( + data.etcd_bkend_mdata_with_default_fpg + ) + # Step #8: + # Get all VFS to check IPs in use + file_client_http_get_side_effect.append( + (data.all_vfs_resp, data.all_vfs_body) + ) + # Step #9: + # Create VFS + file_client_http_post_side_effect.append( + (data.vfs_create_resp, data.vfs_create_body) + ) + # Step #10: + # Wait for VFS create task completion + file_client_get_task_side_effect.append( + data.vfs_create_task_body + ) + mock_file_client.TASK_DONE = 1 + + # Step #11: + # Allow IP info to be updated by returning empty dict + # This brings VFS creation process to completion + etcd_get_fpg_metadata_side_effect.append({}) + + # Step #12: + # Allow marking of IP to be in use + etcd_get_backend_metadata_side_effect.append( + data.etcd_bkend_mdata_with_default_fpg + ) + # Step #13: + # Create share response and body + file_client_http_post_side_effect.append( + (data.sh_create_resp, data.sh_create_body) + ) + # Step #14: + # Set quota + file_client_http_post_side_effect.append( + (data.set_quota_resp, data.set_quota_body) + ) + # Step #15: + # Verify VFS is in good state + file_client_http_get_side_effect.append( + (data.get_vfs_resp, data.get_vfs_body) + ) + # Step #16: + # Allow marking of IP to be in use + etcd_get_backend_metadata_side_effect.append( + data.etcd_bkend_mdata_with_default_fpg + ) + # Step #17: + # Allow quota_id to be updated in share + etcd_get_share_side_effect.append( + data.create_share_args + ) + + def check_response(self, resp): + self._test_case.assertEqual(resp, {u"Err": ''}) + for i in range(1, 3): + status = data.create_share_args.get('status') + if status == 'AVAILABLE' or status == 'FAILED': + print("Share is in %s state!" % status) + break + else: + print("Share is in %s state. Checking in few seconds " + "again..." % status) + time.sleep(2) + + +class TestCreateSecondDefaultShare(CreateShareUnitTest): + def get_request_params(self): + return {u"Name": u"MyDefShare_01", + u"Opts": {u"filePersona": u''}} + + def setup_mock_objects(self): + + # ***** BEGIN - Required mock objects ***** + mock_etcd = self.mock_objects['mock_etcd'] + mock_share_etcd = self.mock_objects['mock_share_etcd'] + mock_fp_etcd = self.mock_objects['mock_fp_etcd'] + mock_file_client = self.mock_objects['mock_file_client'] + # ***** END - Required mock objects ***** + + # ***** BEGIN - Setup side effect lists ***** + etcd_get_share_side_effect = list() + mock_share_etcd.get_share.side_effect = etcd_get_share_side_effect + + etcd_get_backend_metadata_side_effect = list() + mock_fp_etcd.get_backend_metadata.side_effect = \ + etcd_get_backend_metadata_side_effect + + file_client_http_post_side_effect = list() + mock_file_client.http.post.side_effect = \ + file_client_http_post_side_effect + + file_client_get_task_side_effect = list() + mock_file_client.getTask.side_effect = \ + file_client_get_task_side_effect + + file_client_http_get_side_effect = list() + mock_file_client.http.get.side_effect = \ + file_client_http_get_side_effect + # ***** END - Setup side effect lists ***** + + # Step #1: + # Skip check for volume existence <-- REST layer + mock_etcd.get_vol_byname.return_value = None + + # Step #2: + # Skip check for share existence <-- REST LAYER + etcd_get_share_side_effect.append( + exception.EtcdMetadataNotFound(msg="Key not found") + ) + # Step #3: Skip check for share existence <-- File Mgr + etcd_get_share_side_effect.append( + exception.EtcdMetadataNotFound(msg="Key not found") + ) + # Step #4: + # Get current default FPG. Backend metadata exists. FPG info + # needs to be prepared in the below format and returned. For + # this, step #5, #6 and #7 needs to be executed: + # fpg_info = { + # 'ips': {netmask: [ip]}, + # 'fpg': fpg_name, + # 'vfs': vfs_name, + # } + etcd_get_backend_metadata_side_effect.append( + data.etcd_bkend_mdata_with_default_fpg + ) + # Step #5: + # Get FPG from the backend so that its total capacity can + # be ascertained and checked against sum of sizes of shares + # existing on this FPG to find out if a new share with the + # specified/default size can be accommodated on this FPG + file_client_http_get_side_effect.append( + (data.resp, data.bkend_fpg) + ) + # Step #6: + # Get all quotas set for the file-stores under the current FPG + file_client_http_get_side_effect.append( + (data.resp, data.get_quotas_for_fpg) + ) + # Step #7: + # Get VFS corresponding the the FPG so that IP and netmask can be + # set within the FPG info being returned + file_client_http_get_side_effect.append( + (data.get_vfs_resp, data.get_vfs_body) + ) + # Step #8: + # Create share response and body + file_client_http_post_side_effect.append( + (data.sh_create_resp, data.sh_create_body) + ) + # Step #9: + # Set quota + file_client_http_post_side_effect.append( + (data.set_quota_resp, data.set_quota_body) + ) + # Step #10: + # Allow quota_id to be updated in share + etcd_get_share_side_effect.append( + data.create_share_args, + ) + + def check_response(self, resp): + self._test_case.assertEqual(resp, {u"Err": ''}) + for i in range(1, 3): + status = data.create_share_args.get('status') + if status == 'AVAILABLE' or status == 'FAILED': + print("Share is in %s state!" % status) + break + else: + print("Share is in %s state. Checking in few seconds " + "again..." % status) + time.sleep(2) + + +class TestCreateShareOnNewFpg(CreateShareUnitTest): + def get_request_params(self): + return {u"Name": u"MyDefShare_01", + u"Opts": {u"filePersona": u"", + u"fpg": u"NewFpg"}} + + def setup_mock_objects(self): + + # ***** BEGIN - Required mock objects ***** + mock_etcd = self.mock_objects['mock_etcd'] + mock_share_etcd = self.mock_objects['mock_share_etcd'] + mock_fp_etcd = self.mock_objects['mock_fp_etcd'] + mock_file_client = self.mock_objects['mock_file_client'] + # ***** END - Required mock objects ***** + + # ***** BEGIN - Setup side effect lists ***** + etcd_get_share_side_effect = list() + mock_share_etcd.get_share.side_effect = etcd_get_share_side_effect + + etcd_get_backend_metadata_side_effect = list() + mock_fp_etcd.get_backend_metadata.side_effect = \ + etcd_get_backend_metadata_side_effect + + etcd_get_fpg_metadata_side_effect = list() + mock_fp_etcd.get_fpg_metadata.side_effect = \ + etcd_get_fpg_metadata_side_effect + + file_client_http_post_side_effect = list() + mock_file_client.http.post.side_effect = \ + file_client_http_post_side_effect + + file_client_get_task_side_effect = list() + mock_file_client.getTask.side_effect = \ + file_client_get_task_side_effect + + file_client_http_get_side_effect = list() + mock_file_client.http.get.side_effect = \ + file_client_http_get_side_effect + # ***** END - Setup side effect lists ***** + + # Step #1: + # Skip check for volume existence <-- REST layer + mock_etcd.get_vol_byname.return_value = None + + # Step #2: + # Skip check for share existence <-- REST LAYER + etcd_get_share_side_effect.append( + exception.EtcdMetadataNotFound(msg="Key not found") + ) + # Step #3: + # Skip check for share existence <-- File Mgr + etcd_get_share_side_effect.append( + exception.EtcdMetadataNotFound(msg="Key not found") + ) + + # Step #4: + # No FPG metadata for specified FPG name present in ETCD + etcd_get_fpg_metadata_side_effect.append( + exception.EtcdMetadataNotFound + ) + + # Step #5: + # Get FPG from backend + file_client_http_get_side_effect.append( + (data.no_fpg_resp, data.no_fpg_body) + ) + + # Step #6: + # Get all quotas for the specified FPG + file_client_http_get_side_effect.append( + (data.resp, data.get_quotas_for_fpg) + ) + + # Step #7: + # Get VFS for the specified FPG so that IP information can + # be added to the share metadata + file_client_http_get_side_effect.append( + (data.get_vfs_resp, data.get_vfs_body) + ) + + # Step #8: + # Create share response and body + file_client_http_post_side_effect.append( + (data.sh_create_resp, data.sh_create_body) + ) + # Step #9: + # Set quota + file_client_http_post_side_effect.append( + (data.set_quota_resp, data.set_quota_body) + ) + # Step #10: + # Allow quota_id to be updated in share + etcd_get_share_side_effect.append( + data.create_share_args, + ) + + def check_response(self, resp): + self._test_case.assertEqual(resp, {u"Err": ''}) + + # Check if these functions were actually invoked + # in the flow or not + mock_3parclient = self.mock_objects['mock_3parclient'] + mock_3parclient.getWsApiVersion.assert_called() + + +class TestCreateShareOnLegacyFpg(CreateShareUnitTest): + def get_request_params(self): + return {u"Name": u"MyDefShare_01", + u"Opts": {u"filePersona": u"", + u"fpg": u"LegacyFpg"}} + + def setup_mock_objects(self): + + # ***** BEGIN - Required mock objects ***** + mock_etcd = self.mock_objects['mock_etcd'] + mock_share_etcd = self.mock_objects['mock_share_etcd'] + mock_fp_etcd = self.mock_objects['mock_fp_etcd'] + mock_file_client = self.mock_objects['mock_file_client'] + # ***** END - Required mock objects ***** + + # ***** BEGIN - Setup side effect lists ***** + etcd_get_share_side_effect = list() + mock_share_etcd.get_share.side_effect = etcd_get_share_side_effect + + etcd_get_backend_metadata_side_effect = list() + mock_fp_etcd.get_backend_metadata.side_effect = \ + etcd_get_backend_metadata_side_effect + + etcd_get_fpg_metadata_side_effect = list() + mock_fp_etcd.get_fpg_metadata.side_effect = \ + etcd_get_fpg_metadata_side_effect + + file_client_http_post_side_effect = list() + mock_file_client.http.post.side_effect = \ + file_client_http_post_side_effect + + file_client_get_task_side_effect = list() + mock_file_client.getTask.side_effect = \ + file_client_get_task_side_effect + + file_client_http_get_side_effect = list() + mock_file_client.http.get.side_effect = \ + file_client_http_get_side_effect + # ***** END - Setup side effect lists ***** + + # Step #1: + # Skip check for volume existence <-- REST layer + mock_etcd.get_vol_byname.return_value = None + + # Step #2: + # Skip check for share existence <-- REST LAYER + etcd_get_share_side_effect.append( + exception.EtcdMetadataNotFound(msg="Key not found") + ) + # Step #3: + # Skip check for share existence <-- File Mgr + etcd_get_share_side_effect.append( + exception.EtcdMetadataNotFound(msg="Key not found") + ) + + # Step #4: + # No FPG metadata for specified FPG name present in ETCD + etcd_get_fpg_metadata_side_effect.append( + exception.EtcdMetadataNotFound + ) + + # Step #5: + # Return legacy FPG from backend + file_client_http_get_side_effect.append( + (data.resp, data.bkend_fpg) + ) + + # Step #6: + # Get all quotas for the specified FPG + file_client_http_get_side_effect.append( + (data.resp, data.get_quotas_for_fpg) + ) + + # Step #7: + # Get VFS for the specified FPG so that IP information can + # be added to the share metadata + file_client_http_get_side_effect.append( + (data.get_vfs_resp, data.get_vfs_body) + ) + + # Step #8: + # Create share response and body + file_client_http_post_side_effect.append( + (data.sh_create_resp, data.sh_create_body) + ) + # Step #9: + # Set quota + file_client_http_post_side_effect.append( + (data.set_quota_resp, data.set_quota_body) + ) + # Step #10: + # Allow quota_id to be updated in share + etcd_get_share_side_effect.append( + data.create_share_args, + ) + + def check_response(self, resp): + self._test_case.assertEqual(resp, {u"Err": ''}) + + # Check if these functions were actually invoked + # in the flow or not + mock_3parclient = self.mock_objects['mock_3parclient'] + mock_3parclient.getWsApiVersion.assert_called() + + +class TestCreateFirstDefaultShareSetQuotaFails(CreateShareUnitTest): + def get_request_params(self): + return {u"Name": u"MyDefShare_01", + u"Opts": {u"filePersona": u''}} + + def setup_mock_objects(self): + # ***** BEGIN - Required mock objects ***** + mock_etcd = self.mock_objects['mock_etcd'] + mock_share_etcd = self.mock_objects['mock_share_etcd'] + mock_fp_etcd = self.mock_objects['mock_fp_etcd'] + mock_file_client = self.mock_objects['mock_file_client'] + # ***** END - Required mock objects ***** + + # ***** BEGIN - Setup side effect lists ***** + etcd_get_share_side_effect = list() + mock_share_etcd.get_share.side_effect = etcd_get_share_side_effect + + etcd_get_backend_metadata_side_effect = list() + mock_fp_etcd.get_backend_metadata.side_effect = \ + etcd_get_backend_metadata_side_effect + + etcd_get_fpg_metadata_side_effect = list() + mock_fp_etcd.get_fpg_metadata.side_effect = \ + etcd_get_fpg_metadata_side_effect + + file_client_http_post_side_effect = list() + mock_file_client.http.post.side_effect = \ + file_client_http_post_side_effect + + file_client_get_task_side_effect = list() + mock_file_client.getTask.side_effect = \ + file_client_get_task_side_effect + + file_client_http_get_side_effect = list() + mock_file_client.http.get.side_effect = \ + file_client_http_get_side_effect + # ***** END - Setup side effect lists ***** + + # Step #0: + # Skip check for volume existence <-- REST LAYER + mock_etcd.get_vol_byname.return_value = None + + # Step #1: + # Skip check for share existence <-- REST LAYER + etcd_get_share_side_effect.append( + exception.EtcdMetadataNotFound(msg="Key not found") + ) + # Step #2: + # Skip check for share existence <-- File Mgr + etcd_get_share_side_effect.append( + exception.EtcdMetadataNotFound(msg="Key not found") + ) + # Step #3: + # Get current default FPG. No backend metadata exists + # This will result in EtcdDefaultFpgNotPresent exception + # which will execute _create_default_fpg flow which tries + # to generate default FPG/VFS names using backend metadata + etcd_get_backend_metadata_side_effect.append( + exception.EtcdMetadataNotFound(msg="Key not found") + ) + # Step #4: + # _create_default_fpg flow tries to generate default FPG/VFS + # names using backend metadata. For first share, no backend + # metadata exists which results in EtcdMetadataNotFound. As a + # result, backend metadata is CREATED: + # { + # 'ips_in_use': [], + # 'ips_locked_for_use': [], + # 'counter': 0 + # } + # DockerFpg_0 and DockerVFS_0 names are returned for creation. + etcd_get_backend_metadata_side_effect.append( + exception.EtcdMetadataNotFound(msg="Key not found") + ) + # Step #5: + # Create FPG DockerFpg_0 at the backend. This results in 3PAR + # task creation with taskId present in fpg_create_response. Wait + # for task completion in step #6 below + file_client_http_post_side_effect.append( + (data.fpg_create_resp, data.fpg_create_body) + ) + # Step #6: + # Wait for task completion and add default_fpg to backend + # metadata as below: + # { + # 'default_fpgs': {cpg_name: ['Docker_Fpg0']}, + # 'ips_in_use': [], + # 'ips_locked_for_use': [], + # 'counter': 0 + # } + # Save FPG metadata as well + file_client_get_task_side_effect.append( + data.fpg_create_task_body + ) + # Step #7: + # Claim available IP + etcd_get_backend_metadata_side_effect.append( + data.etcd_bkend_mdata_with_default_fpg + ) + # Step #8: + # Get all VFS to check IPs in use + file_client_http_get_side_effect.append( + (data.all_vfs_resp, data.all_vfs_body) + ) + # Step #9: + # Create VFS + file_client_http_post_side_effect.append( + (data.vfs_create_resp, data.vfs_create_body) + ) + # Step #10: + # Wait for VFS create task completion + file_client_get_task_side_effect.append( + data.vfs_create_task_body + ) + mock_file_client.TASK_DONE = 1 + + # Step #11: + # Verify VFS is in good state + file_client_http_get_side_effect.append( + (data.get_vfs_resp, data.get_vfs_body) + ) + + # Step #12: + # Allow IP info to be updated by returning empty dict + # This brings VFS creation process to completion + etcd_get_fpg_metadata_side_effect.append({}) + + # Step #13: + # Allow marking of IP to be in use + etcd_get_backend_metadata_side_effect.append( + data.etcd_bkend_mdata_with_default_fpg + ) + # Step #14: + # Allow marking of IP to be in use + etcd_get_backend_metadata_side_effect.append( + data.etcd_bkend_mdata_with_default_fpg + ) + # Step #15: + # Create share response and body + file_client_http_post_side_effect.append( + (data.sh_create_resp, data.sh_create_body) + ) + # Step #16: + # Set quota FAILS + file_client_http_post_side_effect.append( + hpe3par_ex.HTTPBadRequest("Set Quota Failed") + ) + # Step #17: + # Delete file store requires its ID. Query file store + # by name + file_client_http_get_side_effect.append( + (data.get_fstore_resp, data.get_fstore_body) + ) + # Step #18: + # IP marked for use to be returned to IP pool as part of rollback + # Return backend metadata that has the IPs in use + etcd_get_backend_metadata_side_effect.append( + data.etcd_bkend_mdata_with_default_fpg_and_ips + ) + # Step #19: + # To delete backend FPG, get FPG by name to retrieve its ID + file_client_http_get_side_effect.append( + (data.get_bkend_fpg_resp, data.bkend_fpg) + ) + # Step #20: + # Wait for delete FPG task completion + mock_file_client.http.delete.return_value = \ + (data.fpg_delete_task_resp, data.fpg_delete_task_body) + file_client_get_task_side_effect.append( + data.fpg_delete_task_body + ) + mock_file_client.TASK_DONE = 1 + + # Step #21: + # Allow removal of default FPG from backend metadata + etcd_get_backend_metadata_side_effect.append( + data.etcd_bkend_mdata_with_default_fpg_and_ips + ) + + def check_response(self, resp): + pass diff --git a/test/createsnapshot_tester.py b/test/createsnapshot_tester.py index 12c690bf..88e22d23 100644 --- a/test/createsnapshot_tester.py +++ b/test/createsnapshot_tester.py @@ -23,12 +23,16 @@ def get_request_params(self): def setup_mock_objects(self): mock_etcd = self.mock_objects['mock_etcd'] + volume = copy.deepcopy(data.volume) mock_etcd.get_vol_byname.side_effect = [ - data.volume, None, - copy.deepcopy(data.volume), + volume, + None, + volume, None ] + mock_3parclient = self.mock_objects['mock_3parclient'] + mock_3parclient.isOnlinePhysicalCopy.return_value = False def check_response(self, resp): self._test_case.assertEqual(resp, {u"Err": ''}) @@ -47,11 +51,15 @@ def get_request_params(self): def setup_mock_objects(self): mock_etcd = self.mock_objects['mock_etcd'] + volume = copy.deepcopy(data.volume) mock_etcd.get_vol_byname.side_effect = [ - data.volume, None, - copy.deepcopy(data.volume) + volume, + None, + volume ] + mock_3parclient = self.mock_objects['mock_3parclient'] + mock_3parclient.isOnlinePhysicalCopy.return_value = False def check_response(self, resp): self._test_case.assertEqual(resp, {u"Err": ''}) @@ -69,7 +77,11 @@ def get_request_params(self): def setup_mock_objects(self): mock_etcd = self.mock_objects['mock_etcd'] - mock_etcd.get_vol_byname.return_value = data.snap1 + mock_etcd.get_vol_byname.side_effect = [ + None, + data.snap1, + data.snap1 + ] def check_response(self, resp): self._test_case.assertEqual(resp, {u"Err": 'snapshot snapshot-1' @@ -105,7 +117,7 @@ def setup_mock_objects(self): ] def check_response(self, resp): - expected = 'source volume: %s does not exist' % \ + expected = 'Volume/Snapshot %s does not exist' % \ 'i_do_not_exist_volume' self._test_case.assertEqual(resp, {u"Err": expected}) @@ -118,12 +130,15 @@ def get_request_params(self): def setup_mock_objects(self): mock_etcd = self.mock_objects['mock_etcd'] mock_etcd.get_vol_byname.side_effect = [ + None, data.volume, None, copy.deepcopy(data.volume) ] mock_etcd.save_vol.side_effect = \ [hpe_exc.HPEPluginSaveFailed(obj='snap-001')] + mock_3parclient = self.mock_objects['mock_3parclient'] + mock_3parclient.isOnlinePhysicalCopy.return_value = False def check_response(self, resp): expected = "ETCD data save failed: snap-001" @@ -136,5 +151,173 @@ def check_response(self, resp): # Rollback mock_3parclient.deleteVolume.assert_called() + +class TestCreateSnpSchedule(CreateSnapshotUnitTest): + def get_request_params(self): + return {"Name": data.SNAPSHOT_NAME4, + "Opts": {"virtualCopyOf": data.VOLUME_NAME, + "scheduleName": '3parsched1', + "scheduleFrequency": "10 * * * *", + "snapshotPrefix": "pqrst", + "expHrs": '4', + "retHrs": '2'}} + + def setup_mock_objects(self): + mock_etcd = self.mock_objects['mock_etcd'] + mock_etcd.get_vol_byname.side_effect = [ + None, + data.volume, + None, + copy.deepcopy(data.volume) + ] + mock_3parclient = self.mock_objects['mock_3parclient'] + mock_3parclient.isOnlinePhysicalCopy.return_value = False + + def check_response(self, resp): + self._test_case.assertEqual(resp, {u"Err": ''}) + + # Ensure that createSnapshot was called on 3PAR Client + mock_3parclient = self.mock_objects['mock_3parclient'] + mock_3parclient._run.assert_called() + mock_3parclient.createSnapshot.assert_called() + + +class TestCreateSnpSchedNegFreq(CreateSnapshotUnitTest): + def get_request_params(self): + return {"Name": data.SNAPSHOT_NAME4, + "Opts": {"virtualCopyOf": data.VOLUME_NAME, + "scheduleName": '3parsched1', + "snapshotPrefix": "pqrst", + "expHrs": '4', + "retHrs": '2'}} + + def check_response(self, resp): + opts = ['scheduleName', 'snapshotPrefix', 'scheduleFrequency'] + opts.sort() + expected = "Invalid input received: One or more mandatory options " \ + "%s are missing for operation create snapshot schedule" \ + % opts + self._test_case.assertEqual(resp, {u"Err": expected}) + + +class TestCreateSnpSchedNegPrefx(CreateSnapshotUnitTest): + def get_request_params(self): + return {"Name": data.SNAPSHOT_NAME4, + "Opts": {"virtualCopyOf": data.VOLUME_NAME, + "scheduleName": '3parsched1', + "scheduleFrequency": "10 * * * *", + "expHrs": '4', + "retHrs": '2'}} + + def check_response(self, resp): + opts = ['scheduleName', 'snapshotPrefix', 'scheduleFrequency'] + opts.sort() + expected = "Invalid input received: One or more mandatory options " \ + "%s are missing for operation create snapshot schedule" \ + % opts + self._test_case.assertEqual(resp, {u"Err": expected}) + + +class TestCreateSnpSchedInvPrefxLen(CreateSnapshotUnitTest): + def get_request_params(self): + return {"Name": data.SNAPSHOT_NAME4, + "Opts": {"virtualCopyOf": data.VOLUME_NAME, + "scheduleName": '3parsched1', + "scheduleFrequency": "10 * * * *", + "snapshotPrefix": "pqrstwdstyuijowkdlasihguf", + "expHrs": '4', + "retHrs": '2'}} + + def check_response(self, resp): + expected = 'Please provide a scheduleName with max 31 characters '\ + 'and snapshotPrefix with max length of 15 characters' + self._test_case.assertEqual(resp, {u"Err": expected}) + + +class TestCreateSnpSchedNoSchedName(CreateSnapshotUnitTest): + def get_request_params(self): + return {"Name": data.SNAPSHOT_NAME4, + "Opts": {"virtualCopyOf": data.VOLUME_NAME, + "scheduleFrequency": "10 * * * *", + "snapshotPrefix": "pqrst", + "expHrs": '4', + "retHrs": '2'}} + + def check_response(self, resp): + opts = ['scheduleName', 'snapshotPrefix', 'scheduleFrequency'] + opts.sort() + expected = "Invalid input received: One or more mandatory options " \ + "%s are missing for operation create snapshot schedule" \ + % opts + self._test_case.assertEqual(resp, {u"Err": expected}) + + +class TestCreateSnpSchedwithRetToBase(CreateSnapshotUnitTest): + def get_request_params(self): + return {"Name": data.SNAPSHOT_NAME4, + "Opts": {"virtualCopyOf": data.VOLUME_NAME, + "scheduleName": '3parsched1', + "scheduleFrequency": "10 * * * *", + "snapshotPrefix": "pqrst", + "retentionHours": '5', + "expHrs": '4', + "retHrs": '2'}} + + def check_response(self, resp): + invalid_opts = ['retentionHours'] + expected = "Invalid input received: Invalid option(s) %s " \ + "specified for operation create snapshot schedule. " \ + "Please check help for usage." % invalid_opts + self._test_case.assertEqual(resp, {u"Err": expected}) + + +class TestCreateSnpSchedRetExpNeg(CreateSnapshotUnitTest): + def get_request_params(self): + return {"Name": data.SNAPSHOT_NAME4, + "Opts": {"virtualCopyOf": data.VOLUME_NAME, + "scheduleName": '3parsched1', + "scheduleFrequency": "10 * * * *", + "snapshotPrefix": "pqrst", + "expHrs": '2', + "retHrs": '4'}} + + def check_response(self, resp): + expected = 'create schedule failed, error is: expiration hours '\ + 'must be greater than retention hours' + self._test_case.assertEqual(resp, {u"Err": expected}) + + +class TestCreateSnpSchedInvSchedFreq(CreateSnapshotUnitTest): + def get_request_params(self): + return {"Name": data.SNAPSHOT_NAME4, + "Opts": {"virtualCopyOf": data.VOLUME_NAME, + "scheduleName": '3parsched1', + "scheduleFrequency": "10 * * * * *", + "snapshotPrefix": "pqrst", + "expHrs": '4', + "retHrs": '2'}} + + def check_response(self, resp): + expected = 'Invalid schedule string is passed: HPE Docker Volume '\ + 'plugin Create volume failed: create schedule failed, '\ + 'error is: Improper string passed. ' + self._test_case.assertEqual(resp, {u"Err": expected}) + + +class TestCreateSnapshotInvalidOptions(CreateSnapshotUnitTest): + def get_request_params(self): + return {"Name": data.SNAPSHOT_NAME4, + "Opts": {"virtualCopyOf": data.VOLUME_NAME, + "mountConflictDelay": 22, + "backend": "dummy"}} + + def check_response(self, resp): + invalid_opts = ['backend'] + invalid_opts.sort() + expected = "Invalid input received: Invalid option(s) " \ + "%s specified for operation create snapshot. " \ + "Please check help for usage." % invalid_opts + self._test_case.assertEqual(resp, {u"Err": expected}) + # class TestCreateSnapshotUnauthorized(CreateSnapshotUnitTest): # pass diff --git a/test/createvolume_tester.py b/test/createvolume_tester.py index 240da70d..ce31ba08 100644 --- a/test/createvolume_tester.py +++ b/test/createvolume_tester.py @@ -62,6 +62,29 @@ def setup_mock_objects(self): mock_etcd.get_vol_byname.return_value = None mock_3parclient = self.mock_objects['mock_3parclient'] + mock_3parclient.getCPG.return_value = {'domain': 'some_domain'} + vol_3par_some_domain = { + 'name': 'dummy_3par_vol', + 'domain': 'some_domain', + 'copyType': 'base', + 'copyOf': '---', + 'sizeMiB': 2048, + 'provisioningType': 2, + 'compressionState': 1, + 'userCPG': 'some_user_cpg', + 'snapCPG': 'some_snap_cpg' + } + mock_3parclient.getVolume.return_value = vol_3par_some_domain + mock_3parclient.findVolumeSet.return_value = "some_vvset" + + some_vvset = { + 'name': 'dummy_vvset', + 'flashCachePolicy': 1, + } + mock_3parclient.getVolumeSet.return_value = some_vvset + + mock_3parclient.queryQoSRule.return_value = {'name': 'dummy_qos'} + mock_3parclient.getVLUN.side_effect = \ [exceptions.HTTPNotFound('fake')] @@ -80,10 +103,72 @@ def setup_mock_objects(self): mock_etcd.get_vol_byname.return_value = None +class TestImportAlreadyManagedVolume(CreateVolumeUnitTest): + def check_response(self, resp): + msg = 'target: %s is already in-use' % 'dcv-vvk_vol' + self._test_case.assertEqual(resp, {u"Err": msg}) + + def get_request_params(self): + return {"Name": "abc_vol", + "Opts": {"importVol": "dcv-vvk_vol"}} + + def setup_mock_objects(self): + mock_etcd = self.mock_objects['mock_etcd'] + mock_etcd.get_vol_byname.return_value = None + + +class TestImportVolumeDifferentDomain(CreateVolumeUnitTest): + def check_response(self, resp): + self._test_case.assertEqual(resp, {u"Err": ""}) + + def get_request_params(self): + return {"Name": "abc_vol", + "Opts": {"importVol": "dummy_3par_vol"}} + + def setup_mock_objects(self): + mock_etcd = self.mock_objects['mock_etcd'] + mock_etcd.get_vol_byname.return_value = None + + mock_3parclient = self.mock_objects['mock_3parclient'] + vol_3par_with_other_domain = { + 'name': 'dummy_3par_vol', + 'domain': 'other_than_some_domain', + 'copyType': 'base', + 'copyOf': '---', + 'sizeMiB': 2048, + 'provisioningType': 2, + 'compressionState': 1, + 'userCPG': 'some_user_cpg', + 'snapCPG': 'some_snap_cpg' + } + mock_3parclient.getVolume.return_value = vol_3par_with_other_domain + mock_3parclient.getVLUN.side_effect = [ + exceptions.HTTPNotFound("dummy_3par_vol") + ] + mock_3parclient.getCPG.return_value = {'domain': 'some_domain'} + + +class TestImportVolumeWithInvalidOptions(CreateVolumeUnitTest): + def check_response(self, resp): + in_valid_opts = ['expHrs', 'retHrs'] + in_valid_opts.sort() + expected = "Invalid input received: Invalid option(s) " \ + "%s specified for operation import volume. " \ + "Please check help for usage." % in_valid_opts + self._test_case.assertEqual(expected, resp['Err']) + + def get_request_params(self): + return {"Name": "test-vol-001", + "Opts": {"importVol": "DummyVol", + "expHrs": 111, + "retHrs": 123}} + + class TestCreateVolumeInvalidName(CreateVolumeUnitTest): def check_response(self, resp): - self._test_case.assertEqual(resp, {u"Err": 'Invalid volume ' - 'name: test@vol@001 is passed.'}) + expected = {u'Err': 'Invalid input received: Invalid volume name: ' + 'test@vol@001 is passed.'} + self._test_case.assertEqual(expected, resp) def get_request_params(self): return {"Name": "test@vol@001", @@ -198,28 +283,6 @@ def setup_mock_objects(self): [exceptions.HTTPNotFound('fake')] -class TestCreateVolumeWithMutuallyExclusiveList(CreateVolumeUnitTest): - def check_response(self, resp): - self._test_case.assertEqual( - {"Err": "['virtualCopyOf', 'cloneOf', 'qos-name'," - " 'replicationGroup'] cannot be specified at the" - " same time"}, resp) - - def get_request_params(self): - return {"Name": "test-vol-001", - "Opts": {"qos-name": "soni_vvset", - "provisioning": "thin", - "size": "2", - "cloneOf": "clone_of"}} - - def setup_mock_objects(self): - mock_etcd = self.mock_objects['mock_etcd'] - mock_etcd.get_vol_byname.return_value = None - - mock_3parclient = self.mock_objects['mock_3parclient'] - mock_3parclient.getCPG.return_value = {} - - # FlashCache = True and qos-name= class TestCreateVolumeWithFlashCacheAndQOS(CreateVolumeUnitTest): def check_response(self, resp): @@ -364,8 +427,8 @@ class TestCreateCompressedVolumeNegativeSize(CreateVolumeUnitTest): def check_response(self, resp): expected_msg = 'Invalid input received: To create compression '\ 'enabled volume, size of the volume should be '\ - 'atleast 16GB. Fully provisioned volume can not be '\ - 'compressed. Please re enter requested volume size '\ + 'at least 16GB. Fully provisioned volume cannot be '\ + 'compressed. Please re-enter requested volume size '\ 'or provisioning type. ' self._test_case.assertEqual(resp, {u"Err": expected_msg}) @@ -516,6 +579,41 @@ def setup_mock_objects(self): ] +class TestCreateVolumeWithMutuallyExclusiveOptions(CreateVolumeUnitTest): + def check_response(self, resp): + mutually_exclusive_ops = ['virtualCopyOf', 'cloneOf', 'importVol', + 'replicationGroup'] + mutually_exclusive_ops.sort() + expected_error_msg = "Invalid input received: Operations " \ + "%s are mutually exclusive and cannot be " \ + "specified together. Please check help for " \ + "usage." % mutually_exclusive_ops + self._test_case.assertEqual(expected_error_msg, resp['Err']) + + def get_request_params(self): + return {"Name": "test-vol-001", + "Opts": {"virtualCopyOf": "my-vol", + "cloneOf": "my-vol", + "replicationGroup": "my-rcg"}} + + +class TestCreateVolumeWithInvalidOptions(CreateVolumeUnitTest): + def check_response(self, resp): + invalid_opts = ['expHrs', 'retHrs'] + invalid_opts.sort() + op = "create volume" + expected_error_msg = "Invalid input received: Invalid option(s) " \ + "%s specified for operation %s. " \ + "Please check help for usage." % \ + (invalid_opts, op) + self._test_case.assertEqual(expected_error_msg, resp['Err']) + + def get_request_params(self): + return {"Name": "test-vol-001", + "Opts": {"expHrs": 111, + "retHrs": 123}} + + # More cases of flash cache # 1. # if flash_cache: diff --git a/test/deleteshare_tester.py b/test/deleteshare_tester.py new file mode 100644 index 00000000..18004b66 --- /dev/null +++ b/test/deleteshare_tester.py @@ -0,0 +1,84 @@ +import time +import test.fake_3par_data as data +import test.hpe_docker_unit_test as hpedockerunittest +import copy + +from oslo_config import cfg +CONF = cfg.CONF + + +class DeleteShareUnitTest(hpedockerunittest.HpeDockerUnitTestExecutor): + def _get_plugin_api(self): + return 'volumedriver_remove' + + def override_configuration(self, all_configs): + pass + + +class TestDeleteShare(DeleteShareUnitTest): + + def __init__(self, test_obj): + self._test_obj = test_obj + + def get_request_params(self): + return self._test_obj.get_request_params() + + def setup_mock_objects(self): + self._test_obj.setup_mock_objects(self.mock_objects) + + def check_response(self, resp): + self._test_obj.check_response(resp, self.mock_objects, + self._test_case) + + # Nested class to handle regular volume + class Regular(object): + def __init__(self, params={}): + self._params = params + + def get_request_params(self): + share_name = 'MyDefShare_01' + return {"Name": share_name, + "Opts": {}} + + def setup_mock_objects(self, mock_objects): + mock_share_etcd = mock_objects['mock_share_etcd'] + if 'share_with_acl' in self._params: + mock_share_etcd.get_share.return_value = copy.deepcopy( + data.etcd_share_with_acl) + else: + mock_share_etcd.get_share.return_value = copy.deepcopy( + data.etcd_share) + mock_file_client = mock_objects['mock_file_client'] + mock_file_client.http.get.side_effect = [ + # This file store is deleted as part of share delete + (data.get_fstore_resp, data.get_fstore_body), + # No more file store present on parent FPG + (data.get_fstore_resp, data.no_fstore_body), + # WSAPI for FPG delete requires ID of FPG for which + # FPG is being fetched by name + (data.get_bkend_fpg_resp, data.bkend_fpg) + ] + mock_fp_etcd = mock_objects['mock_fp_etcd'] + # ETCD having FPG metadata means the host owns the FPG + # Since last share on the FPG got deleted, FPG also needs + # to be deleted + mock_fp_etcd.get_fpg_metadata.return_value = \ + data.etcd_bkend_mdata_with_default_fpg + + mock_file_client.http.delete.return_value = \ + (data.fpg_delete_task_resp, data.fpg_delete_task_body) + + mock_file_client.getTask.return_value = data.fpg_delete_task_body + mock_file_client.TASK_DONE = 1 + + def check_response(self, resp, mock_objects, test_case): + # Check if these functions were actually invoked + # in the flow or not + mock_3parclient = mock_objects['mock_3parclient'] + mock_3parclient.getWsApiVersion.assert_called() + time.sleep(3) + + # mock_3parclient.deleteVolume.assert_called() + # + # mock_etcd = mock_objects['mock_etcd'] + # mock_etcd.delete_vol.assert_called() diff --git a/test/enableplugin_tester.py b/test/enableplugin_tester.py new file mode 100644 index 00000000..59a8c84d --- /dev/null +++ b/test/enableplugin_tester.py @@ -0,0 +1,36 @@ +from hpe3parclient import exceptions +import test.hpe_docker_unit_test as hpeunittest +from oslo_config import cfg +CONF = cfg.CONF + + +class EnablePluginUnitTest(hpeunittest.HpeDockerUnitTestExecutor): + def _get_plugin_api(self): + return 'plugin_activate' + + def check_response(self, resp): + expected_resp = {u"Implements": [u"VolumeDriver"]} + self._test_case.assertEqual(resp, expected_resp) + + +class TestEnablePlugin(EnablePluginUnitTest): + pass + + +class InitializePluginUnitTest(hpeunittest.HpeDockerUnitTestExecutor): + def _get_plugin_api(self): + return "" + + +class TestPluginInitializationFails(InitializePluginUnitTest): + def setup_mock_objects(self): + mock_3parclient = self.mock_objects['mock_3parclient'] + + # Add as many side_effect as the number of backends + side_effect = [] + for backend in self._all_configs: + side_effect.append(exceptions.UnsupportedVersion) + mock_3parclient.getWsApiVersion.side_effect = side_effect + + def check_response(self, resp): + self._test_case.assertEqual(resp, {u"Err": 'GOT RESPONSE'}) diff --git a/test/fake_3par_data.py b/test/fake_3par_data.py index 6c9052f5..360a8d94 100644 --- a/test/fake_3par_data.py +++ b/test/fake_3par_data.py @@ -1,8 +1,11 @@ +import copy import json import mock +from oslo_utils import netutils THIS_NODE_ID = "This-Node-Id" OTHER_NODE_ID = "Other-Node-Id" +FAKE_MOUNT_ID = 'Fake-Mount-ID' KNOWN_HOSTS_FILE = 'dummy' HPE3PAR_CPG = 'DockerCPG' HPE3PAR_CPG2 = 'fakepool' @@ -38,7 +41,10 @@ SNAPSHOT_NAME2 = 'snapshot-2' SNAPSHOT_ID3 = 'f5d9e226-2995-4d66-a5bd-3e373f4ff772' SNAPSHOT_NAME3 = 'snapshot-3' +SNAPSHOT_ID4 = 'f5d9e226-2995-4d66-a5bd-3e373f4ff774' +SNAPSHOT_NAME4 = 'snapshot-4' VOLUME_3PAR_NAME = 'dcv-0DM4qZEVSKON-DXN-NwVpw' +SNAPSHOT_3PAR_NAME1 = 'dcs-0DM4qZEVSKON-DXN-NwVpw' SNAPSHOT_3PAR_NAME = 'dcs-L4I73ONuTci9Fd4ceij-MQ' TARGET_IQN = 'iqn.2000-05.com.3pardata:21810002ac00383d' TARGET_LUN = 90 @@ -53,6 +59,7 @@ RCG_STARTED = 3 RCG_STOPPED = 5 ROLE_PRIMARY = 1 +ROLE_PRIMARY_REV = 1 ROLE_SECONDARY = 2 FAKE_DESC = 'test description name' @@ -108,6 +115,69 @@ 'backend': 'DEFAULT' } +replicated_volume = { + 'name': VOLUME_NAME, + 'id': VOLUME_ID, + 'display_name': VOL_DISP_NAME, + 'size': 2, + 'host': FAKE_DOCKER_HOST, + 'provisioning': THIN, + 'flash_cache': None, + 'qos_name': None, + 'compression': None, + 'fsMode': None, + 'fsOwner': None, + 'snapshots': [], + 'mount_conflict_delay': MOUNT_CONFLICT_DELAY, + 'is_snap': False, + 'cpg': HPE3PAR_CPG, + 'snap_cpg': HPE3PAR_CPG2, + 'backend': 'DEFAULT', + 'rcg_info': {'local_rcg_name': RCG_NAME, + 'remote_rcg_name': REMOTE_RCG_NAME} +} + +pp_rcg_policies = {'autoRecover': False, + 'overPeriodAlert': False, + 'autoFailover': False, + 'pathManagement': False} +normal_rcg = { + 'primary_3par_rcg': { + 'name': RCG_NAME, + 'role': ROLE_PRIMARY, + 'targets': [{'roleReversed': False, + 'policies': pp_rcg_policies + }], + }, + 'secondary_3par_rcg': { + 'role': ROLE_SECONDARY, + 'targets': [{'roleReversed': False}] + } +} + +failover_rcg = { + 'primary_3par_rcg': { + 'role': ROLE_PRIMARY, + 'targets': [{'roleReversed': False}] + }, + 'secondary_3par_rcg': { + 'role': ROLE_PRIMARY_REV, + 'targets': [{'roleReversed': True}] + } +} + +recover_rcg = { + 'primary_3par_rcg': { + 'role': ROLE_SECONDARY, + 'targets': [{'roleReversed': True}] + }, + 'secondary_3par_rcg': { + 'role': ROLE_PRIMARY, + 'targets': [{'roleReversed': True}] + } +} + + json_path_info = \ '{"connection_info": {"driver_volume_type": "iscsi", ' \ '"data": {"target_luns": [3, 3], "target_iqns": ' \ @@ -124,7 +194,22 @@ '/hpe/data/hpedocker-dm-uuid-mpath-360002ac00000000001008f99000' \ '19d52"}' -path_info = json.loads(json_path_info) +# Volumes list for list-volumes operation +vols_list = [ + { + 'display_name': 'test-vol-001', + 'size': 310, + 'path_info': copy.deepcopy(json_path_info) + }, + { + 'display_name': 'test-vol-002', + 'size': 555, + 'path_info': copy.deepcopy(json_path_info) + } +] + + +path_info = json.loads(copy.deepcopy(json_path_info)) vol_mounted_on_this_node = { 'name': VOLUME_NAME, @@ -140,7 +225,7 @@ 'fsMode': None, 'snapshots': [], 'node_mount_info': {THIS_NODE_ID: ['Fake-Mount-ID']}, - 'path_info': json_path_info, + 'path_info': copy.deepcopy(json_path_info), 'mount_conflict_delay': MOUNT_CONFLICT_DELAY, 'is_snap': False, 'backend': 'DEFAULT' @@ -161,11 +246,13 @@ 'snapshots': [], 'node_mount_info': {OTHER_NODE_ID: ['Fake-Mount-ID']}, 'path_info': path_info, + 'old_path_info': [(THIS_NODE_ID, copy.deepcopy(json_path_info))], 'mount_conflict_delay': MOUNT_CONFLICT_DELAY, 'is_snap': False, 'backend': 'DEFAULT' } + volume_mounted_twice_on_this_node = { 'name': VOLUME_NAME, 'id': VOLUME_ID, @@ -261,6 +348,40 @@ 'mount_conflict_delay': MOUNT_CONFLICT_DELAY, } +snap4_schedule = { + 'schedule_name': "3parsched1", + 'snap_name_prefix': "pqrst", + 'sched_frequency': "10 * * * *", + 'sched_snap_exp_hrs': 4, + 'sched_snap_ret_hrs': 2 +} +snap4_metadata = { + 'name': SNAPSHOT_NAME4, + 'id': SNAPSHOT_ID4, + 'parent_name': SNAPSHOT_NAME1, + 'parent_id': SNAPSHOT_ID1, + 'expiration_hours': None, + 'retention_hours': None, + 'fsOwner': None, + 'fsMode': None, + 'snap_schedule': snap4_schedule, +} +snap4 = { + 'name': SNAPSHOT_NAME4, + 'id': SNAPSHOT_ID4, + 'display_name': SNAPSHOT_NAME4, + # This is a child of ref_to_snap1 + 'parent_id': VOLUME_ID, + 'ParentName': VOLUME_NAME, + 'is_snap': True, + 'has_schedule': True, + 'size': 2, + 'snap_metadata': snap4_metadata, + 'snapshots': [], + 'mount_conflict_delay': MOUNT_CONFLICT_DELAY, + 'backend': 'DEFAULT' +} + ref_to_snap1 = { 'name': SNAPSHOT_NAME1, 'id': SNAPSHOT_ID1, @@ -283,6 +404,14 @@ 'ParentName': VOLUME_NAME } +ref_to_snap4 = { + 'name': SNAPSHOT_NAME4, + 'id': SNAPSHOT_ID4, + 'parent_id': VOLUME_ID, + 'ParentName': VOLUME_NAME, + 'snap_schedule': snap4_schedule +} + bkend_snapshots = [SNAPSHOT_3PAR_NAME] # this is the qos we get from wsapi @@ -312,6 +441,22 @@ 'backend': 'DEFAULT' } +volume_with_snap_schedule = { + 'name': VOLUME_NAME, + 'id': VOLUME_ID, + 'display_name': VOL_DISP_NAME, + 'size': 2, + 'host': FAKE_DOCKER_HOST, + 'provisioning': THIN, + 'flash_cache': None, + 'compression': None, + 'snapshots': [ref_to_snap4], + 'mount_conflict_delay': MOUNT_CONFLICT_DELAY, + 'is_snap': False, + 'has_schedule': False, + 'backend': 'DEFAULT' +} + volume_with_multilevel_snapshot = { 'name': VOLUME_NAME, 'id': VOLUME_ID, @@ -724,3 +869,518 @@ standard_logout = [ mock.call.logout()] + +create_share_args = { + 'id': '1422125830661572115', + 'backend': 'DEFAULT_FILE', + 'cpg': 'swap_fs_cpg', + 'fpg': 'DockerFpg_2', + 'name': 'GoodShare', + 'size': 1048576, + 'readonly': False, + 'nfsOptions': None, + 'protocol': 'nfs', + 'comment': None, + 'fsMode': None, + 'fsOwner': None, + 'status': 'AVAILABLE', + 'vfsIPs': [['192.168.98.41', '255.255.192.0']], +} + +etcd_share = { + 'id': '1422125830661572115', + 'backend': 'DEFAULT_FILE', + 'cpg': 'swap_fs_cpg', + 'fpg': 'DockerFpg_2', + 'vfs': 'DockerVfs_2', + 'name': 'GoodShare', + 'size': 1048576, + 'readonly': False, + 'nfsOptions': None, + 'protocol': 'nfs', + 'clientIPs': [], + 'comment': None, + 'fsMode': None, + 'fsOwner': None, + 'status': 'AVAILABLE', + 'vfsIPs': [['192.168.98.41', '255.255.192.0']], + 'quota_id': '13209547719864709510' +} + +etcd_share_with_acl = { + 'id': '1422125830661572115', + 'backend': 'DEFAULT_FILE', + 'cpg': 'swap_fs_cpg', + 'fpg': 'DockerFpg_2', + 'vfs': 'DockerVfs_2', + 'name': 'GoodShare', + 'size': 1048576, + 'readonly': False, + 'nfsOptions': None, + 'protocol': 'nfs', + 'clientIPs': [], + 'comment': None, + 'fsMode': 'A:fd:rwax,A:fdg:rwax,A:fdS:DtnNcy', + 'fsOwner': '1000:1000', + 'status': 'AVAILABLE', + 'vfsIPs': [['192.168.98.41', '255.255.192.0']], + 'quota_id': '13209547719864709510' +} + +etcd_bkend_mdata_with_default_fpg = { + 'ips_in_use': [], + 'ips_locked_for_use': [], + 'counter': 1, + 'default_fpgs': {'fs_cpg': ['DockerFpg_0']} +} + +etcd_bkend_mdata_with_default_fpg_and_ips = { + 'ips_in_use': ['192.168.98.41'], + 'ips_locked_for_use': [], + 'counter': 1, + 'default_fpgs': {'fs_cpg': ['DockerFpg_0']} +} + +etcd_fpg_metadata = { + "fpg": "DockerFpg_1", + "fpg_size": 16, + "vfs": "DockerVfs_1", + "ips": { + "255.255.192.0": ["192.168.98.41"] + } +} + +get_bkend_fpg_resp = { + 'status': '200' +} + +bkend_fpg = { + 'members': [ + { + 'id': '5233be44-292c-43f2-a9b8-373479d785a3', 'overAllState': 1, + 'totalCapacityGiB': 10240.0, + 'comment': 'Docker created FPG', + 'cpg': 'fs_cpg', + 'name': 'Imran_fpg', + 'usedCapacityGiB': 5.35, + 'availCapacityGiB': 10234.65, + } + ], + 'total': 1 +} + +quotas_for_fpg = { + 'members': [ + { + 'currentBlockMiB': 0, + 'hardFileLimit': 0, + 'softBlockMiB': 1048576, + 'hardBlockMiB': 1048576, + 'currentFileLimit': 2, + 'id': '10098013665158623372', + 'fpg': 'DockerFpg_0', + 'graceBlockInSec': 0, + 'softFileLimit': 0, + 'overallState': 1, + 'graceFileLimitInSec': 0, + 'key': 3, + 'type': 3, + 'name': 'MyShare_101', + 'vfs': 'DockerVfs_0' + }, + { + 'currentBlockMiB': 0, + 'hardFileLimit': 0, + 'softBlockMiB': 13631488, + 'hardBlockMiB': 13631488, + 'currentFileLimit': 2, + 'id': '10211052782065922663', + 'fpg': 'DockerFpg_0', + 'graceBlockInSec': 0, + 'softFileLimit': 0, + 'overallState': 1, + 'graceFileLimitInSec': 0, + 'key': 4, + 'type': 3, + 'name': 'MyShare_102', + 'vfs': 'DockerVfs_0' + } + ], + 'total': 2 +} + +bkend_vfs = { + 'members': [ + { + 'comment': 'Docker created VFS', + 'id': '5233be44-292c-43f2-a9b8-373479d785a3-2', + 'name': 'Imran_fpg_vfs', + 'overallState': 1, + 'IPInfo': [ + { + 'fpg': 'Imran_fpg', + 'vlanTag': 0, + 'vfs': 'Imran_fpg_vfs', + 'IPAddr': '192.168.98.5', + 'networkName': 'user', + 'netmask': '255.255.192.0' + } + ], + 'fpg': 'Imran_fpg', + 'blockGraceTimeSec': 604800, + 'snapshotQuotaEnabled': False + } + ], + 'total': 1 +} + +fpg_create_resp = { + 'status': '202' +} + +fpg_create_body = { + "taskId": 5565 +} + +fpg_create_task_resp = { + 'status': '200' +} + +fpg_create_task_body = { + "id": 5565, + "type": 20, + "name": "createfpg_task", + "status": 1, + "completedPhases": 1, + "totalPhases": 1, + "completedSteps": 0, + "totalsteps": 1, + "startTime": "2019-05-20 16:22:58 IST", + "finishTime": "-", + "user": "3paradm", + "detailedStatus": "2019-05-20 16:22:58 IST Created task.\n" + "2019-05-20 16:22:58 IST Updated Executing " + "\"createfpg_task\" as 0:63364\n2019-05-20 16:22:58 " + "IST Updated Size: 16t\n2019-05-20 16:22:58 IST " + "Updated FPG Name: DockerFpg_1\n" + "2019-05-20 16:22:58 IST Updated CPG Name: fs_cpg\n" + "2019-05-20 16:22:59 IST Updated Automatically " + "assigned nodeid: 1\n2019-05-20 16:22:59 IST Updated" + " createfpg_vvs: DockerFpg_1 16t 5565\n2019-05-20 " + "16:22:59 IST Updated Creating VV: DockerFpg_1.1 " + "16t in fs_cpg\n2019-05-20 16:23:00 IST Updated vv " + "DockerFpg_1.1 attached to node 0 File Services\n" + "2019-05-20 16:23:00 IST Updated vv DockerFpg_1.1 " + "attached to node 1 File Services\n" +} + +sh_create_resp = { + 'status': '201' +} + +sh_create_body = { + "links": [ + { + "href": "https://192.168.67.6:8080/api/v1/fileshares/" + "14818594021406325994" + } + ] +} + +set_quota_resp = { + 'status': '201' +} + +resp = { + 'status': '200' +} + +get_quotas_for_fpg = { + "members": [ + { + "softBlockMiB": 1048576, + "hardBlockMiB": 1048576, + "id": "10098013665158623372", + "fpg": "DockerFpg_0", + "overallState": 1, + "key": 3, + "type": 3, + "name": "MyShare_101", + "vfs": "DockerVfs_0" + }, + { + "softBlockMiB": 1048576, + "hardBlockMiB": 1048576, + "id": "10211052782065922663", + "fpg": "DockerFpg_0", + "overallState": 1, + "key": 4, + "type": 3, + "name": "MyShare_102", + "vfs": "DockerVfs_0" + } + ], + "total": 2 +} +set_quota_body = { + "links": [ + { + "href": "https://192.168.67.6:8080/api/v1/filepersonaquotas/" + "17562742969854637283", + } + ] +} + +all_vfs_resp = { + 'status': '200' +} + +all_vfs_body = { + 'members': [ + { + 'IPInfo': [ + { + 'networkName': 'user', + 'vlanTag': 0, + 'fpg': 'DockerFpg_19', + 'IPAddr': '192.168.70.27', + 'netmask': '255.255.192.0', + 'vfs': 'DockerVfs_19' + } + ], + 'comment': 'Docker created VFS', + 'fpg': 'DockerFpg_19', + 'id': '5000031e-c00b-445d-8cc2-d1369fa1ac6d-2', + 'name': 'DockerVfs_19', + 'overallState': 1, + }, + { + 'IPInfo': [ + { + 'networkName': 'user', + 'vlanTag': 0, + 'fpg': 'DockerFpg_1', + 'IPAddr': '192.168.98.41', + 'netmask': '255.255.192.0', + 'vfs': 'DockerVfs_1' + } + ], + 'comment': 'Docker created VFS', + 'fpg': 'DockerFpg_1', + 'id': '43baa30e-3e57-40d4-b8a3-b9a94ce2de78-2', + 'name': 'DockerVfs_1', + 'overallState': 1, + }, + { + 'IPInfo': [ + { + 'networkName': 'user', + 'vlanTag': 0, + 'fpg': 'swap_fpg2', + 'IPAddr': '192.168.110.7', + 'netmask': '255.255.192.0', + 'vfs': 'swap_fpg2_vfs' + } + ], + 'comment': 'Docker created VFS', + 'fpg': 'swap_fpg2', + 'id': '00d76323-6ac6-4b0f-b4cc-8fe79d9f2df2-2', + 'name': 'swap_fpg2_vfs', + 'overallState': 1, + }, + { + 'IPInfo': [ + { + 'networkName': 'user', + 'vlanTag': 0, + 'fpg': 'ImranFpg', + 'IPAddr': '192.168.98.42', + 'netmask': '255.255.192.0', + 'vfs': 'ImranFpg_vfs' + } + ], + 'comment': 'Docker created VFS', + 'fpg': 'ImranFpg', + 'id': 'e29c7282-7d12-4973-976e-cd02163f6c9e-2', + 'name': 'ImranFpg_vfs', + 'overallState': 1, + }, + { + 'IPInfo': [ + { + 'networkName': 'user', + 'vlanTag': 0, + 'fpg': 'DockerFpg_0', + 'IPAddr': '192.168.110.5', + 'netmask': '255.255.192.0', + 'vfs': 'DockerVfs_0' + } + ], + 'comment': 'Docker created VFS', + 'fpg': 'DockerFpg_0', + 'id': 'cea9120c-80e2-4f2a-ae91-7166e50046c0-2', + 'name': 'DockerVfs_0', + 'overallState': 1, + } + ], + 'total': 5 +} + +vfs_create_resp = { + 'status': '202' +} + +vfs_create_body = { + "taskId": 5566, +} + +vfs_create_task_resp = { + 'status': '200' +} + +vfs_create_task_body = { + "id": 5566, + "type": 20, + "name": "createvfs_task", + "status": 1, + "startTime": "2019-05-20 16:24:20 IST", + "finishTime": "2019-05-20 16:24:50 IST", + "user": "3paradm", + "detailedStatus": "2019-05-20 16:24:20 IST Created task.\n" + "2019-05-20 16:24:20 IST Updated Executing " + "\"createvfs_task\" as 0:2428\n2019-05-20 16:24:21 " + "IST Updated Generating self signed certificate.\n" + "2019-05-20 16:24:21 IST Updated Creating VFS " + "\"DockerVfs_1\" in FPG DockerFpg_1.\n2019-05-20 " + "16:24:29 IST Updated Applying certificate data.\n" + "2019-05-20 16:24:39 IST Updated Associating IP " + "192.168.98.11 with VFS \"DockerVfs_1\".\n2019-05-20 " + "16:24:50 IST Updated Associated IP 192.168.98.11 " + "with VFS \"DockerVfs_1\".\n2019-05-20 16:24:50 IST " + "Updated Setting snap quota accounting switch " + "value\n2019-05-20 16:24:50 IST Updated Value for " + "Snap quota accounting switch is set to: disable.\n" + "2019-05-20 16:24:50 IST Updated Created VFS " + "\"DockerVfs_1\" on FPG DockerFpg_1.\n2019-05-20 " + "16:24:50 IST Completed scheduled task." +} + +get_vfs_resp = { + "status": "200", +} + +get_vfs_body = { + "members": [ + { + "comment": "Docker created VFS", + "id": "5233be44-292c-43f2-a9b8-373479d785a3-2", + "name": "Imran_fpg_vfs", + "overallState": 1, + "IPInfo": [ + { + "fpg": "Imran_fpg", + "vlanTag": 0, + "vfs": "Imran_fpg_vfs", + "IPAddr": "192.168.98.5", + "networkName": "user", + "netmask": "255.255.192.0" + } + ], + "fpg": "Imran_fpg" + } + ], + "total": 1 +} + +get_fstore_resp = { + "status": "200", +} + +get_fstore_body = { + "total": 1, + "members": [ + { + "fpg": "DockerFpg_1", + "overallState": 1, + "securityMode": 2, + "id": "b1a085a1-4834-49fc-b9cd-37b7e3fcf55d-2", + "name": "GoodShare", + "vfs": "DockerVfs_1" + } + ] +} + +no_fpg_resp = { + "status": "200", +} + +no_fpg_body = { + "total": 0, + "members": [] +} + +no_fstore_body = { + "total": 0, + "members": [] +} + +fpg_delete_task_resp = { + 'status': '202' +} + +fpg_delete_task_body = { + "id": 5565, + "type": 20, + "name": "deletefpg_task", + "status": 1, + "taskId": 1234 +} + +etcd_mounted_share = { + 'id': '1422125830661572115', + 'backend': 'DEFAULT_FILE', + 'cpg': 'swap_fs_cpg', + 'fpg': 'DockerFpg_2', + 'vfs': 'DockerVfs_2', + 'name': 'GoodShare', + 'size': 1048576, + 'readonly': False, + 'nfsOptions': None, + 'protocol': 'nfs', + 'clientIPs': [netutils.get_my_ipv4()], + 'comment': None, + 'fsMode': None, + 'fsOwner': None, + 'status': 'AVAILABLE', + 'vfsIPs': [['192.168.98.41', '255.255.192.0']], + 'quota_id': '13209547719864709510', + 'path_info': {THIS_NODE_ID: [FAKE_MOUNT_ID]} +} + +show_fs_user_resp = [ + 'Username,UID,---------------------SID----------------------,' + 'Primary_Group,Enabled', + 'Administrator,10500,S-1-5-21-3407317619-3829948340-1570492076-' + '500,Local Users,false', + 'Guest,10501,S-1-5-21-3407317619-3829948340-1570492076-501,' + 'Local Users,false', + 'abc,1000,S-1-5-21-3407317619-3829948340-1570492076-5009,' + 'Local Users,true', + 'xyz,1005,S-1-5-21-3407317619-3829948340-1570492076-5011,' + 'Local Users,true', + '--------------------------------------------------------------' + '--------------------------', + '4,total,,,' +] + +show_fs_group_resp = [ + 'GroupName,GID,---------------------SID----------------------', + 'Local Users,10800,S-1-5-21-3407317619-3829948340-1570492076-800', + 'Administrators,10544,S-1-5-32-544', + 'Users,10545,S-1-5-32-545', + 'Guests,10546,S-1-5-32-546', + 'Backup Operators,10551,S-1-5-32-551', + 'docker,1000,S-1-5-21-3407317619-3829948340-1570492076-5010', + '---------------------------------------------------------------------', + '6,total,' +] diff --git a/test/getvolume_tester.py b/test/getvolume_tester.py index bdf79359..0a2480f7 100644 --- a/test/getvolume_tester.py +++ b/test/getvolume_tester.py @@ -1,5 +1,7 @@ import copy +from hpe3parclient import exceptions + import test.fake_3par_data as data import test.hpe_docker_unit_test as hpedockerunittest from oslo_config import cfg @@ -18,7 +20,7 @@ def override_configuration(self, all_configs): pass -class TestQosVolume(GetVolumeUnitTest): +class TestGetVolumeWithQos(GetVolumeUnitTest): def get_request_params(self): return {"Name": data.VOLUME_NAME, "Opts": {"provisioning": "thin", @@ -28,7 +30,7 @@ def get_request_params(self): def setup_mock_objects(self): mock_etcd = self.mock_objects['mock_etcd'] - mock_etcd.get_vol_byname.return_value = data.volume_qos + mock_etcd.get_vol_byname.return_value = copy.deepcopy(data.volume_qos) mock_etcd.get_vol_path_info.return_value = None mock_3parclient = self.mock_objects['mock_3parclient'] @@ -50,6 +52,66 @@ def check_response(self, resp): u'vvset_name': u'vvk_vvset' }, u'volume_detail': { + u'id': 'd03338a9-9115-48a3-8dfc-35cdfcdc15a7', + u'3par_vol_name': data.VOLUME_3PAR_NAME, + u'backend': 'DEFAULT', + u'compression': None, + u'flash_cache': 'false', + u'fsMode': None, + u'fsOwner': None, + u'provisioning': u'thin', + u'size': 2, + u'mountConflictDelay': data.MOUNT_CONFLICT_DELAY, + u'cpg': data.HPE3PAR_CPG, + u'snap_cpg': data.HPE3PAR_CPG2, + u'domain': None + } + }, + u'Name': u'volume-d03338a9-9115-48a3-8dfc-35cdfcdc15a7', + u'Mountpoint': u'' + }, + u'Err': u'' + } + + self._test_case.assertEqual(resp, expected) + + # Check if these functions were actually invoked + # in the flow or not + mock_3parclient = self.mock_objects['mock_3parclient'] + mock_3parclient.getWsApiVersion.assert_called() + mock_3parclient.queryQoSRule.assert_called() + + +class TestGetVolumeWithGetQoSFails(GetVolumeUnitTest): + def get_request_params(self): + return {"Name": data.VOLUME_NAME, + "Opts": {"provisioning": "thin", + "qos-name": "vvk_vvset", + "size": "2", + "backend": "DEFAULT"}} + + def setup_mock_objects(self): + mock_etcd = self.mock_objects['mock_etcd'] + mock_etcd.get_vol_byname.return_value = copy.deepcopy(data.volume_qos) + mock_etcd.get_vol_path_info.return_value = None + + mock_3parclient = self.mock_objects['mock_3parclient'] + mock_3parclient.queryQoSRule.side_effect = [ + exceptions.HTTPNotFound("QoS vvk_vvset not found") + ] + mock_3parclient.findVolumeSet.return_value = None + + def check_response(self, resp): + expected = { + u'Volume': { + u'Devicename': u'', + u'Status': { + u'qos_detail': "ERROR: Failed to retrieve QoS " + "'vvk_vvset' from 3PAR", + u'volume_detail': { + u'id': 'd03338a9-9115-48a3-8dfc-35cdfcdc15a7', + u'3par_vol_name': data.VOLUME_3PAR_NAME, + u'backend': 'DEFAULT', u'compression': None, u'flash_cache': None, u'fsMode': None, @@ -58,7 +120,8 @@ def check_response(self, resp): u'size': 2, u'mountConflictDelay': data.MOUNT_CONFLICT_DELAY, u'cpg': data.HPE3PAR_CPG, - u'snap_cpg': data.HPE3PAR_CPG2 + u'snap_cpg': data.HPE3PAR_CPG2, + u'domain': None } }, u'Name': u'volume-d03338a9-9115-48a3-8dfc-35cdfcdc15a7', @@ -76,6 +139,129 @@ def check_response(self, resp): mock_3parclient.queryQoSRule.assert_called() +class TestGetRcgVolume(GetVolumeUnitTest): + def get_request_params(self): + return {"Name": data.VOLUME_NAME, + "Opts": {"provisioning": "thin", + "replicationGroup": data.RCG_NAME, + "size": "2", + "backend": "3par_pp_rep"}} + + def setup_mock_objects(self): + mock_etcd = self.mock_objects['mock_etcd'] + self.rep_vol = copy.deepcopy(data.replicated_volume) + self.rep_vol['backend'] = '3par_pp_rep' + mock_etcd.get_vol_byname.return_value = self.rep_vol + mock_etcd.get_vol_path_info.return_value = None + + mock_3parclient = self.mock_objects['mock_3parclient'] + mock_3parclient.getRemoteCopyGroup.return_value = \ + data.normal_rcg['primary_3par_rcg'] + + mock_3parclient.findVolumeSet.return_value = None + + def check_response(self, resp): + expected = { + u'Volume': { + u'Devicename': u'', + u'Status': { + u'rcg_detail': {'rcg_name': data.RCG_NAME, + 'policies': data.pp_rcg_policies, + 'role': 'Primary'}, + u'volume_detail': { + u'id': 'd03338a9-9115-48a3-8dfc-35cdfcdc15a7', + u'3par_vol_name': data.VOLUME_3PAR_NAME, + u'backend': '3par_pp_rep', + u'compression': None, + u'flash_cache': None, + u'fsMode': None, + u'fsOwner': None, + u'provisioning': u'thin', + u'size': 2, + u'mountConflictDelay': data.MOUNT_CONFLICT_DELAY, + u'cpg': data.HPE3PAR_CPG, + u'snap_cpg': data.HPE3PAR_CPG2, + u'secondary_cpg': 'FC_r1', + u'secondary_snap_cpg': 'FC_r5', + u'domain': None + } + }, + u'Name': u'volume-d03338a9-9115-48a3-8dfc-35cdfcdc15a7', + u'Mountpoint': u'' + }, + u'Err': u'' + } + + self._test_case.assertEqual(resp, expected) + + # Check if these functions were actually invoked + # in the flow or not + mock_3parclient = self.mock_objects['mock_3parclient'] + mock_3parclient.getWsApiVersion.assert_called() + mock_3parclient.getRemoteCopyGroup.assert_called() + + +class TestGetRcgVolumeFails(GetVolumeUnitTest): + def get_request_params(self): + return {"Name": data.VOLUME_NAME, + "Opts": {"provisioning": "thin", + "replicationGroup": data.RCG_NAME, + "size": "2", + "backend": "3par_pp_rep"}} + + def setup_mock_objects(self): + mock_etcd = self.mock_objects['mock_etcd'] + self.rep_vol = copy.deepcopy(data.replicated_volume) + self.rep_vol['backend'] = '3par_pp_rep' + mock_etcd.get_vol_byname.return_value = self.rep_vol + mock_etcd.get_vol_path_info.return_value = None + + mock_3parclient = self.mock_objects['mock_3parclient'] + mock_3parclient.getRemoteCopyGroup.side_effect = [ + exceptions.HTTPNotFound("RCG %s not found" % data.RCG_NAME) + ] + mock_3parclient.findVolumeSet.return_value = None + + def check_response(self, resp): + expected = { + u'Volume': { + u'Devicename': u'', + u'Status': { + u'rcg_detail': "ERROR: Failed to retrieve RCG '%s' " + "from 3PAR" % data.RCG_NAME, + u'volume_detail': { + u'id': 'd03338a9-9115-48a3-8dfc-35cdfcdc15a7', + u'3par_vol_name': data.VOLUME_3PAR_NAME, + u'backend': '3par_pp_rep', + u'compression': None, + u'flash_cache': None, + u'fsMode': None, + u'fsOwner': None, + u'provisioning': u'thin', + u'size': 2, + u'mountConflictDelay': data.MOUNT_CONFLICT_DELAY, + u'cpg': data.HPE3PAR_CPG, + u'snap_cpg': data.HPE3PAR_CPG2, + u'secondary_cpg': 'FC_r1', + u'secondary_snap_cpg': 'FC_r5', + u'domain': None + } + }, + u'Name': u'volume-d03338a9-9115-48a3-8dfc-35cdfcdc15a7', + u'Mountpoint': u'' + }, + u'Err': u'' + } + + self._test_case.assertEqual(resp, expected) + + # Check if these functions were actually invoked + # in the flow or not + mock_3parclient = self.mock_objects['mock_3parclient'] + mock_3parclient.getWsApiVersion.assert_called() + mock_3parclient.getRemoteCopyGroup.assert_called() + + class TestCloneVolume(GetVolumeUnitTest): def get_request_params(self): return {"Name": data.VOLUME_NAME, @@ -87,6 +273,8 @@ def setup_mock_objects(self): mock_etcd = self.mock_objects['mock_etcd'] mock_etcd.get_vol_byname.return_value = data.volume_dedup mock_etcd.get_vol_path_info.return_value = None + mock_3parclient = self.mock_objects['mock_3parclient'] + mock_3parclient.findVolumeSet.return_value = None def check_response(self, resp): expected = { @@ -94,6 +282,9 @@ def check_response(self, resp): u'Devicename': u'', u'Status': { u'volume_detail': { + u'id': 'd03338a9-9115-48a3-8dfc-35cdfcdc15a7', + u'3par_vol_name': data.VOLUME_3PAR_NAME, + u'backend': 'DEFAULT', u'compression': None, u'flash_cache': None, u'provisioning': u'dedup', @@ -102,7 +293,8 @@ def check_response(self, resp): u'fsOwner': None, u'mountConflictDelay': data.MOUNT_CONFLICT_DELAY, u'cpg': data.HPE3PAR_CPG, - u'snap_cpg': data.HPE3PAR_CPG + u'snap_cpg': data.HPE3PAR_CPG, + u'domain': None } }, u'Name': u'volume-d03338a9-9115-48a3-8dfc-35cdfcdc15a7', @@ -151,6 +343,9 @@ def setup_mock_objects(self): def check_response(self, resp): snap_detail = { + u'id': '2f823bdc-e36e-4dc8-bd15-de1c7a28ff31', + u'3par_vol_name': data.SNAPSHOT_3PAR_NAME, + u'backend': 'DEFAULT', u'compression': None, u'is_snap': True, u'parent_id': data.VOLUME_ID, diff --git a/test/hpe_docker_unit_test.py b/test/hpe_docker_unit_test.py index afc4eb49..871084f5 100644 --- a/test/hpe_docker_unit_test.py +++ b/test/hpe_docker_unit_test.py @@ -1,10 +1,12 @@ import abc import json import six +import time from io import StringIO from twisted.internet import reactor + from config import setupcfg from hpedockerplugin import exception from hpedockerplugin import hpe_storage_api as api @@ -30,7 +32,6 @@ class HpeDockerUnitTestExecutor(object): def __init__(self, **kwargs): self._kwargs = kwargs - self._host_config = None self._all_configs = None @staticmethod @@ -53,7 +54,7 @@ def _real_execute_api(self, plugin_api): # Get API parameters from child class req_body = self._get_request_body(self.get_request_params()) - _api = api.VolumePlugin(reactor, self._host_config, self._all_configs) + _api = api.VolumePlugin(reactor, self._all_configs) try: resp = getattr(_api, plugin_api)(req_body) resp = json.loads(resp) @@ -93,13 +94,47 @@ def _mock_execute_api(self, mock_objects, plugin_api=''): # Get API parameters from child class req_body = self._get_request_body(self.get_request_params()) - _api = api.VolumePlugin(reactor, self._host_config, self._all_configs) + _api = api.VolumePlugin(reactor, self._all_configs) + + if _api.orchestrator: + _api.orchestrator._execute_request = \ + _api.orchestrator.__undeferred_execute_request__ + + if _api._file_orchestrator: + _api._file_orchestrator._execute_request = \ + _api._file_orchestrator.__undeferred_execute_request__ + + req_params = self.get_request_params() + + # Workaround to allow all the async-initializing threads to + # complete the initialization. We cannot use thread.join() + # in the plugin code as that would defeat the purpose of async + # initialization by making the main thread wait for all the + # child threads to complete initialization + time.sleep(3) + + # There are few TCs like enable/disable plugin for which + # there isn't going to be any request parameters + # Such TCs need to skip the below block and continue + if req_params: + backend = req_params.get('backend', 'DEFAULT') + + while True: + backend_state = _api.is_backend_initialized(backend) + print(" ||| Backend %s, backend_state %s " % (backend, + backend_state)) + if backend_state == 'OK' or backend_state == 'FAILED': + break + time.sleep(1) + try: - resp = getattr(_api, plugin_api)(req_body) - resp = json.loads(resp) + # Plugin initialization UTs will return empty plugin_api string + if plugin_api: + resp = getattr(_api, plugin_api)(req_body) + resp = json.loads(resp) - # Allow child class to validate response - self.check_response(resp) + # Allow child class to validate response + self.check_response(resp) except Exception as ex: # self.handle_exception(ex) # Plugin will never throw exception. This exception is coming @@ -113,7 +148,7 @@ def run_test(self, test_case): # This is important to set as it is used by the mock decorator to # take decision which driver to instantiate self._protocol = test_case.protocol - self._host_config, self._all_configs = self._get_configuration() + self._all_configs = self._get_configuration() if not self.use_real_flow(): self._mock_execute_api(plugin_api=self._get_plugin_api()) @@ -125,22 +160,56 @@ def use_real_flow(self): return False def _get_configuration(self): - cfg_file_name = './test/config/hpe_%s.conf' % self._protocol.lower() + if self.use_real_flow(): + cfg_file_name = '/etc/hpedockerplugin/hpe.conf' + else: + cfg_file_name = './test/config/hpe_%s.conf' % \ + self._protocol.lower() cfg_param = ['--config-file', cfg_file_name] try: host_config = setupcfg.get_host_config(cfg_param) - all_configs = setupcfg.get_all_backend_configs(cfg_param) + backend_configs = setupcfg.get_all_backend_configs(cfg_param) except Exception as ex: msg = 'Setting up of hpe3pardocker unit test failed, error is: ' \ '%s' % six.text_type(ex) # LOG.error(msg) raise exception.HPEPluginStartPluginException(reason=msg) + all_configs = self._rearrange_configs(host_config, backend_configs) + # _protocol is set in the immediate child class # config = create_configuration(self._protocol) # Allow child classes to override configuration self.override_configuration(all_configs) - return host_config, all_configs + return all_configs + + def _rearrange_configs(self, host_config, backend_configs): + file_driver = 'hpedockerplugin.hpe.hpe_3par_file.HPE3PARFileDriver' + fc_driver = 'hpedockerplugin.hpe.hpe_3par_fc.HPE3PARFCDriver' + iscsi_driver = 'hpedockerplugin.hpe.hpe_3par_iscsi.HPE3PARISCSIDriver' + # backend_configs -> {'backend1': config1, 'backend2': config2, ...} + # all_configs -> {'block': backend_configs1, 'file': backend_configs2} + file_configs = {} + block_configs = {} + all_configs = {} + for backend_name, config in backend_configs.items(): + configured_driver = config.hpedockerplugin_driver.strip() + if configured_driver == file_driver: + file_configs[backend_name] = config + elif configured_driver == fc_driver or \ + configured_driver == iscsi_driver: + block_configs[backend_name] = config + else: + msg = "Bad driver name specified in hpe.conf: %s" %\ + configured_driver + raise exception.HPEPluginStartPluginException(reason=msg) + + if file_configs: + all_configs['file'] = (host_config, file_configs) + if block_configs: + all_configs['block'] = (host_config, block_configs) + + return all_configs """ Allows the child class to override the HPE configuration parameters diff --git a/test/listvolume_tester.py b/test/listvolume_tester.py new file mode 100644 index 00000000..fafe5672 --- /dev/null +++ b/test/listvolume_tester.py @@ -0,0 +1,76 @@ +import test.fake_3par_data as data +import test.hpe_docker_unit_test as hpedockerunittest +from oslo_config import cfg +CONF = cfg.CONF + + +class ListVolumeUnitTest(hpedockerunittest.HpeDockerUnitTestExecutor): + def _get_plugin_api(self): + return 'volumedriver_list' + + def get_request_params(self): + return {} + + def setup_mock_objects(self): + mock_etcd = self.mock_objects['mock_etcd'] + mock_etcd.get_all_vols.return_value = [] + + def override_configuration(self, config): + pass + + # TODO: check_response and setup_mock_objects can be implemented + # here for the normal happy path TCs here as they are same + + +class TestListNoVolumes(ListVolumeUnitTest): + def check_response(self, resp): + self._test_case.assertEqual(resp, {u"Err": ''}) + + # Check if these functions were actually invoked + # in the flow or not + mock_3parclient = self.mock_objects['mock_3parclient'] + mock_3parclient.getWsApiVersion.assert_called() + + mock_etcd = self.mock_objects['mock_etcd'] + mock_etcd.get_all_vols.assert_called() + + +class TestListVolumeDefault(ListVolumeUnitTest): + def check_response(self, resp): + expected_vols = [ + { + 'Devicename': '', + 'Mountpoint': '', + 'Name': 'test-vol-001', + 'Status': {}, + 'size': 310 + }, + { + 'Devicename': '', + 'Mountpoint': '', + 'Name': 'test-vol-002', + 'Status': {}, + 'size': 555 + } + ] + + self._test_case.assertEqual(resp, {u"Err": '', + 'Volumes': expected_vols}) + + # Check if these functions were actually invoked + # in the flow or not + mock_3parclient = self.mock_objects['mock_3parclient'] + mock_3parclient.getWsApiVersion.assert_called() + + mock_etcd = self.mock_objects['mock_etcd'] + mock_etcd.get_all_vols.assert_called() + mock_etcd.get_path_info_from_vol.assert_called() + self._test_case.assertEqual( + mock_etcd.get_path_info_from_vol.call_count, 2) + + def get_request_params(self): + return {} + + def setup_mock_objects(self): + mock_etcd = self.mock_objects['mock_etcd'] + mock_etcd.get_all_vols.return_value = data.vols_list diff --git a/test/mountshare_tester.py b/test/mountshare_tester.py new file mode 100644 index 00000000..46a45d57 --- /dev/null +++ b/test/mountshare_tester.py @@ -0,0 +1,91 @@ +import copy + +import test.fake_3par_data as data +import test.hpe_docker_unit_test as hpedockerunittest + + +class MountShareUnitTest(hpedockerunittest.HpeDockerUnitTestExecutor): + def __init__(self): + self._backend_name = None + + def _get_plugin_api(self): + return 'volumedriver_mount' + + def get_request_params(self): + return {"Name": 'DemoShare-99', + "ID": "Fake-Mount-ID"} + + def setup_mock_objects(self): + def _setup_mock_3parclient(): + self.setup_mock_3parclient() + + def _setup_mock_etcd(): + # Allow child class to make changes + self.setup_mock_etcd() + + _setup_mock_3parclient() + _setup_mock_etcd() + + def setup_mock_3parclient(self): + pass + + def setup_mock_etcd(self): + pass + + def setup_mock_fileutil(self): + pass + + +class TestMountNfsShare(MountShareUnitTest): + def __init__(self, **kwargs): + super(type(self), self).__init__(**kwargs) + self._share = copy.deepcopy(data.etcd_share) + + def setup_mock_etcd(self): + mock_share_etcd = self.mock_objects['mock_share_etcd'] + mock_share_etcd.get_share.return_value = self._share + + def check_response(self, resp): + pass + # mnt_point = '/opt/hpe/data/hpedocker-GoodShare' + # dev_name = '192.168.98.41:/DockerFpg_2/DockerVfs_2/GoodShare' + # expected = { + # 'Mountpoint': mnt_point, + # 'Err': '', + # 'Name': 'GoodShare', + # 'Devicename': dev_name} + # expected_keys = ["Mountpoint", "Name", "Err", "Devicename"] + # for key in expected_keys: + # self._test_case.assertIn(key, resp) + # + # self._test_case.assertEqual(resp, expected) + + +class TestMountNfsShareWithAcl(MountShareUnitTest): + def __init__(self, **kwargs): + super(type(self), self).__init__(**kwargs) + self._share = copy.deepcopy(data.etcd_share_with_acl) + + def setup_mock_etcd(self): + mock_share_etcd = self.mock_objects['mock_share_etcd'] + mock_share_etcd.get_share.return_value = self._share + mock_file_client = self.mock_objects['mock_file_client'] + mock_file_client._run.side_effect = [ + data.show_fs_user_resp, + data.show_fs_group_resp + ] + + def check_response(self, resp): + pass + # mnt_point = '/opt/hpe/data/hpedocker-GoodShare' + # dev_name = '192.168.98.41:/DockerFpg_2/DockerVfs_2/GoodShare' + # expected = { + # 'Mountpoint': mnt_point, + # 'Err': '', + # 'Name': 'GoodShare', + # 'Devicename': dev_name} + # expected_keys = ["Mountpoint", "Name", "Err", "Devicename"] + # for key in expected_keys: + # self._test_case.assertIn(key, resp) + # + # self._test_case.assertEqual(resp, expected) diff --git a/test/mountvolume_tester.py b/test/mountvolume_tester.py index 066cf8dd..436769ed 100644 --- a/test/mountvolume_tester.py +++ b/test/mountvolume_tester.py @@ -6,10 +6,24 @@ class MountVolumeUnitTest(hpedockerunittest.HpeDockerUnitTestExecutor): - def __init__(self, is_snap=False): + def __init__(self, is_snap=False, vol_params=None): + self._backend_name = None + self._vol_type = None + self._rep_type = None self._is_snap = is_snap + self._rcg_state = None if not is_snap: - self._vol = copy.deepcopy(data.volume) + if vol_params: + self._rcg_state = vol_params.get('rcg_state') + self._vol_type = vol_params['vol_type'] + if self._vol_type == 'replicated': + self._rep_type = vol_params['rep_type'] + if self._rep_type == 'active-passive': + self._backend_name = '3par_ap_sync_rep' + self._vol = copy.deepcopy(data.replicated_volume) + self._vol['backend'] = self._backend_name + else: + self._vol = copy.deepcopy(data.volume) else: self._vol = copy.deepcopy(data.snap1) @@ -17,13 +31,51 @@ def _get_plugin_api(self): return 'volumedriver_mount' def get_request_params(self): + opts = {'mount-volume': 'True'} + if self._backend_name: + opts['backend'] = self._backend_name return {"Name": self._vol['display_name'], "ID": "Fake-Mount-ID", - "Opts": {'mount-volume': 'True'}} + "Opts": opts} def setup_mock_objects(self): def _setup_mock_3parclient(): # Allow child class to make changes + if self._rep_type == 'active-passive': + mock_3parclient = self.mock_objects['mock_3parclient'] + if self._rcg_state == 'normal': + mock_3parclient.getRemoteCopyGroup.side_effect = [ + data.normal_rcg['primary_3par_rcg'], + data.normal_rcg['secondary_3par_rcg'] + ] + elif self._rcg_state == 'failover': + mock_3parclient.getRemoteCopyGroup.side_effect = [ + data.failover_rcg['primary_3par_rcg'], + data.failover_rcg['secondary_3par_rcg'] + ] + elif self._rcg_state == 'recover': + mock_3parclient.getRemoteCopyGroup.side_effect = [ + data.recover_rcg['primary_3par_rcg'], + data.recover_rcg['secondary_3par_rcg'] + ] + elif self._rcg_state == 'rcgs_not_gettable': + mock_3parclient.getRemoteCopyGroup.side_effect = [ + exceptions.HTTPNotFound("Primary RCG not found"), + exceptions.HTTPNotFound("Secondary RCG not found"), + ] + elif self._rcg_state == 'only_primary_rcg_gettable': + mock_3parclient.getRemoteCopyGroup.side_effect = [ + data.normal_rcg['primary_3par_rcg'], + exceptions.HTTPNotFound("Secondary RCG not found"), + ] + elif self._rcg_state == 'only_secondary_rcg_gettable': + mock_3parclient.getRemoteCopyGroup.side_effect = [ + exceptions.HTTPNotFound("Primary RCG not found"), + data.failover_rcg['secondary_3par_rcg'], + ] + else: + raise Exception("Invalid rcg_state specified") + self.setup_mock_3parclient() def _setup_mock_etcd(): @@ -111,43 +163,52 @@ def setup_mock_3parclient(self): def check_response(self, resp): # resp -> {"Mountpoint": "/tmp", "Name": "test-vol-001", # "Err": "", "Devicename": "/tmp"} - expected_keys = ["Mountpoint", "Name", "Err", "Devicename"] - for key in expected_keys: - self._test_case.assertIn(key, resp) - - # resp -> {u'Mountpoint': u'/tmp', u'Name': u'test-vol-001', - # u'Err': u'', u'Devicename': u'/tmp'} - self._test_case.assertEqual(resp['Mountpoint'], u'/tmp') - self._test_case.assertEqual(resp['Name'], self._vol['display_name']) - self._test_case.assertEqual(resp['Err'], u'') - self._test_case.assertEqual(resp['Devicename'], u'/tmp') + # In case of 'rcgs_not_gettable', 'Err' is returned + if self._rcg_state == 'rcgs_not_gettable': + expected = {'Err': "Remote copy group 'TEST-RCG' not found"} + self._test_case.assertEqual(resp, expected) + else: + expected_keys = ["Mountpoint", "Name", "Err", "Devicename"] + for key in expected_keys: + self._test_case.assertIn(key, resp) + + # resp -> {u'Mountpoint': u'/tmp', u'Name': u'test-vol-001', + # u'Err': u'', u'Devicename': u'/tmp'} + self._test_case.assertEqual(resp['Mountpoint'], u'/tmp') + self._test_case.assertEqual(resp['Name'], + self._vol['display_name']) + self._test_case.assertEqual(resp['Err'], u'') + self._test_case.assertEqual(resp['Devicename'], u'/tmp') # Check if these functions were actually invoked # in the flow or not + mock_etcd = self.mock_objects['mock_etcd'] mock_3parclient = self.mock_objects['mock_3parclient'] mock_3parclient.getWsApiVersion.assert_called() - mock_3parclient.getVolume.assert_called() - mock_3parclient.getCPG.assert_called() - mock_3parclient.getHost.assert_called() - mock_3parclient.queryHost.assert_called() - # mock_3parclient.getPorts.assert_called() - mock_3parclient.getHostVLUNs.assert_called() - mock_3parclient.createVLUN.assert_called() + if self._rcg_state != 'rcgs_not_gettable': + mock_3parclient.getVolume.assert_called() + mock_3parclient.getCPG.assert_called() + mock_3parclient.getHost.assert_called() + mock_3parclient.queryHost.assert_called() + # mock_3parclient.getPorts.assert_called() + mock_3parclient.getHostVLUNs.assert_called() + mock_3parclient.createVLUN.assert_called() - mock_fileutil = self.mock_objects['mock_fileutil'] - mock_fileutil.has_filesystem.assert_called() - mock_fileutil.create_filesystem.assert_called() - mock_fileutil.mkdir_for_mounting.assert_called() - mock_fileutil.mount_dir.assert_called() - # lost+found directory removed or not - mock_fileutil.remove_dir.assert_called() + mock_fileutil = self.mock_objects['mock_fileutil'] + mock_fileutil.has_filesystem.assert_called() + mock_fileutil.create_filesystem.assert_called() + mock_fileutil.mkdir_for_mounting.assert_called() + mock_fileutil.mount_dir.assert_called() + # lost+found directory removed or not + mock_fileutil.remove_dir.assert_called() - mock_etcd = self.mock_objects['mock_etcd'] - mock_etcd.get_vol_byname.assert_called() - mock_etcd.update_vol.assert_called() + mock_etcd.update_vol.assert_called() - mock_protocol_connector = self.mock_objects['mock_protocol_connector'] - mock_protocol_connector.connect_volume.assert_called() + mock_protocol_connector = \ + self.mock_objects['mock_protocol_connector'] + mock_protocol_connector.connect_volume.assert_called() + + mock_etcd.get_vol_byname.assert_called() # Host not registered with supplied name @@ -516,8 +577,8 @@ def setup_mock_osbrick_connector(self): data.connector def override_configuration(self, all_configs): - all_configs['DEFAULT'].hpe3par_iscsi_chap_enabled = True - all_configs['DEFAULT'].use_multipath = False + all_configs['block'][1]['DEFAULT'].hpe3par_iscsi_chap_enabled = True + all_configs['block'][1]['DEFAULT'].use_multipath = False def check_response(self, resp): # resp -> {u'Mountpoint': u'/tmp', u'Name': u'test-vol-001', @@ -643,7 +704,7 @@ def setup_mock_3parclient(self): def override_configuration(self, all_configs): # config.hpe3par_iscsi_chap_enabled = True - all_configs['DEFAULT'].use_multipath = False + all_configs['block'][1]['DEFAULT'].use_multipath = False def check_response(self, resp): # resp -> {u'Mountpoint': u'/tmp', u'Name': u'test-vol-001', @@ -807,6 +868,7 @@ def __init__(self, **kwargs): self._unmounted_vol['is_snap'] = True self._unmounted_vol['display_name'] = data.SNAPSHOT_NAME1 self._unmounted_vol['id'] = data.SNAPSHOT_ID1 + self._unmounted_vol['snap_metadata'] = data.snap1_metadata def setup_mock_etcd(self): mock_etcd = self.mock_objects['mock_etcd'] @@ -920,6 +982,9 @@ def check_response(self, resp): self._test_case.assertEqual(resp['Err'], u'') self._test_case.assertEqual(resp['Devicename'], u'/tmp') + mock_etcd = self.mock_objects['mock_etcd'] + mock_etcd.update_vol.assert_called() + # Check if these functions were actually invoked # in the flow or not mock_3parclient = self.mock_objects['mock_3parclient'] diff --git a/test/removesnapshot_tester.py b/test/removesnapshot_tester.py index 56c0313d..b930f703 100644 --- a/test/removesnapshot_tester.py +++ b/test/removesnapshot_tester.py @@ -38,6 +38,24 @@ def setup_mock_objects(self): ] +class TestRemoveSnapshotSchedule(RemoveSnapshotUnitTest): + def check_response(self, resp): + self._test_case.assertEqual(resp, {u"Err": ''}) + + def get_request_params(self): + return {"Name": data.snap4['display_name']} + + def setup_mock_objects(self): + parent_vol = copy.deepcopy(data.volume_with_snap_schedule) + snapshot = copy.deepcopy(data.snap4) + mock_etcd = self.mock_objects['mock_etcd'] + mock_etcd.get_vol_byname.side_effect = [ + snapshot, + snapshot, + parent_vol + ] + + # # Tries to remove a snapshot present at the second level # # This shouldn't even enter driver code # class TestRemoveMultilevelSnapshot(RemoveSnapshotUnitTest): @@ -89,7 +107,6 @@ def setup_mock_objects(self): mock_etcd.get_vol_byname.return_value = None def check_response(self, resp): - # expected = {u'Err': u'snapshot %s does not exist!' - # % self.snapshot_name} - expected = {u'Err': u''} + msg = 'Volume name to remove not found: %s' % self.snapshot_name + expected = {u'Err': msg} self._test_case.assertEqual(expected, resp) diff --git a/test/removevolume_tester.py b/test/removevolume_tester.py new file mode 100644 index 00000000..20233765 --- /dev/null +++ b/test/removevolume_tester.py @@ -0,0 +1,154 @@ +import test.fake_3par_data as data +import test.hpe_docker_unit_test as hpedockerunittest +import copy + +from oslo_config import cfg +CONF = cfg.CONF + + +class RemoveVolumeUnitTest(hpedockerunittest.HpeDockerUnitTestExecutor): + def _get_plugin_api(self): + return 'volumedriver_remove' + + def override_configuration(self, all_configs): + pass + + +class TestRemoveVolume(RemoveVolumeUnitTest): + + def __init__(self, test_obj): + self._test_obj = test_obj + + def get_request_params(self): + return self._test_obj.get_request_params() + + def setup_mock_objects(self): + self._test_obj.setup_mock_objects(self.mock_objects) + + def check_response(self, resp): + self._test_obj.check_response(resp, self.mock_objects, + self._test_case) + + # Nested class to handle regular volume + class Regular(object): + def get_request_params(self): + vol_name = data.volume['name'] + return {"Name": vol_name, + "Opts": {}} + + def setup_mock_objects(self, mock_objects): + mock_etcd = mock_objects['mock_etcd'] + mock_etcd.get_vol_byname.return_value = copy.deepcopy(data.volume) + + def check_response(self, resp, mock_objects, test_case): + # Check if these functions were actually invoked + # in the flow or not + mock_3parclient = mock_objects['mock_3parclient'] + mock_3parclient.getWsApiVersion.assert_called() + + mock_3parclient.deleteVolume.assert_called() + + mock_etcd = mock_objects['mock_etcd'] + mock_etcd.delete_vol.assert_called() + + # REPLICATED VOLUME + class ReplicatedVolume(object): + def __init__(self, params): + self._params = params + + def get_request_params(self): + vol_name = data.replicated_volume['name'] + return {"Name": vol_name, + "Opts": {}} + + def setup_mock_objects(self, mock_objects): + mock_etcd = mock_objects['mock_etcd'] + mock_etcd.get_vol_byname.return_value = data.replicated_volume + + mock_3parclient = mock_objects['mock_3parclient'] + + if self._params.get('rm_last_volume'): + # Simulate that this is the last volume + mock_3parclient.getRemoteCopyGroup.side_effect = [ + {'role': self._params['role']}, + {'volumes': []} + ] + else: + # Simulate that this is NOT the last volume + mock_3parclient.getRemoteCopyGroup.side_effect = [ + {'role': self._params['role']}, + {'volumes': ['dummy-vol1', 'dummy-vol2']} + ] + + def check_response(self, resp, mock_objects, test_case): + if self._params['role'] == data.ROLE_PRIMARY: + test_case.assertEqual(resp, {u"Err": ''}) + + mock_3parclient = mock_objects['mock_3parclient'] + mock_3parclient.getRemoteCopyGroup.assert_called() + mock_3parclient.stopRemoteCopy.assert_called() + mock_3parclient.removeVolumeFromRemoteCopyGroup.assert_called() + mock_3parclient.deleteVolume.assert_called() + if self._params.get('rm_last_volume'): + mock_3parclient.removeRemoteCopyGroup.assert_called() + else: + mock_3parclient.removeRemoteCopyGroup.assert_not_called() + mock_3parclient.startRemoteCopy.asssert_called() + + mock_etcd = mock_objects['mock_etcd'] + mock_etcd.delete_vol.assert_called() + else: + msg = "Error: Failed to delete volume: %(vol)s as rcg: " \ + "%(rcg)s do not have valid role" % { + 'vol': data.VOLUME_3PAR_NAME, + 'rcg': data.replicated_volume['rcg_info'][ + 'local_rcg_name']} + test_case.assertEqual(resp, {u"Err": msg}) + + +class TestRemoveNonExistentVolume(RemoveVolumeUnitTest): + def get_request_params(self): + return {"Name": data.VOLUME_NAME, + "Opts": {}} + + def setup_mock_objects(self): + mock_etcd = self.mock_objects['mock_etcd'] + # Return None to simulate volume doesnt' exist + mock_etcd.get_vol_byname.return_value = None + + def check_response(self, resp): + msg = 'Volume name to remove not found: %s' % data.VOLUME_NAME + self._test_case.assertEqual(resp, {u"Err": msg}) + + # Check if these functions were actually invoked + # in the flow or not + mock_3parclient = self.mock_objects['mock_3parclient'] + mock_3parclient.getWsApiVersion.assert_called() + mock_3parclient.deleteVolume.assert_not_called() + + mock_etcd = self.mock_objects['mock_etcd'] + mock_etcd.delete_vol.assert_not_called() + + +class TestRemoveVolumeWithChildSnapshot(RemoveVolumeUnitTest): + def get_request_params(self): + return {"Name": data.VOLUME_NAME, + "Opts": {}} + + def setup_mock_objects(self): + mock_etcd = self.mock_objects['mock_etcd'] + mock_etcd.get_vol_byname.return_value = data.volume_with_snapshots + + def check_response(self, resp): + msg = 'Err: Volume %s has one or more child snapshots - volume ' \ + 'cannot be deleted!' % data.VOLUME_NAME + self._test_case.assertEqual(resp, {u"Err": msg}) + + # Check if these functions were actually invoked + # in the flow or not + mock_3parclient = self.mock_objects['mock_3parclient'] + mock_3parclient.getWsApiVersion.assert_called() + mock_3parclient.deleteVolume.assert_not_called() + + mock_etcd = self.mock_objects['mock_etcd'] + mock_etcd.delete_vol.assert_not_called() diff --git a/test/setup_mock.py b/test/setup_mock.py index 5daa5131..2eafc826 100644 --- a/test/setup_mock.py +++ b/test/setup_mock.py @@ -1,15 +1,26 @@ import mock +from hpe3parclient import http import test.fake_3par_data as data from hpedockerplugin.hpe import hpe_3par_common as hpecommon +from hpedockerplugin.hpe import hpe_3par_mediator as hpe_3par_mediator +from hpedockerplugin.hpe import utils from hpedockerplugin import volume_manager as mgr from hpedockerplugin import backend_orchestrator as orch +from hpedockerplugin import file_backend_orchestrator as f_orch from oslo_config import cfg CONF = cfg.CONF def mock_decorator(func): + @mock.patch( + 'hpedockerplugin.file_manager.sh' + ) + @mock.patch( + 'hpedockerplugin.file_manager.os', + spec=True + ) @mock.patch( 'hpedockerplugin.volume_manager.connector.FibreChannelConnector', spec=True @@ -23,15 +34,31 @@ def mock_decorator(func): spec=True ) @mock.patch( - 'hpedockerplugin.volume_manager.util.EtcdUtil', + 'hpedockerplugin.backend_orchestrator.util.EtcdUtil', + spec=True + ) + @mock.patch( + 'hpedockerplugin.file_backend_orchestrator.util.' + 'HpeFilePersonaEtcdClient', + spec=True + ) + @mock.patch( + 'hpedockerplugin.file_backend_orchestrator.util.' + 'HpeShareEtcdClient', spec=True ) @mock.patch( 'hpedockerplugin.hpe.hpe_3par_common.client.HPE3ParClient', - spec=True, + spec=True + ) + @mock.patch( + 'hpedockerplugin.hpe.hpe_3par_mediator.file_client.' + 'HPE3ParFilePersonaClient', spec=True ) - def setup_mock_wrapper(self, mock_3parclient, mock_etcd, mock_fileutil, - mock_iscsi_connector, mock_fc_connector, + def setup_mock_wrapper(self, mock_file_client, mock_3parclient, + mock_share_etcd, mock_fp_etcd, mock_etcd, + mock_fileutil, mock_iscsi_connector, + mock_fc_connector, mock_os, mock_sh, *args, **kwargs): # Override the value as without it it throws an exception CONF.set_override('ssh_hosts_key_file', @@ -49,27 +76,53 @@ def setup_mock_wrapper(self, mock_3parclient, mock_etcd, mock_fileutil, with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') \ as mock_create_client, \ - mock.patch.object(orch.Orchestrator, '_get_etcd_util') \ - as mock_get_etcd_util, \ + mock.patch.object(orch.VolumeBackendOrchestrator, + '_get_etcd_client') \ + as _get_etcd_client, \ mock.patch.object(mgr.VolumeManager, '_get_connector') \ as mock_get_connector, \ mock.patch('hpedockerplugin.volume_manager.connector') \ as mock_osbricks_connector, \ - mock.patch.object(mgr.VolumeManager, '_get_node_id') \ + mock.patch.object(orch.VolumeBackendOrchestrator, + '_get_node_id') \ as mock_get_node_id, \ - mock.patch.object(mgr.VolumeManager, '_decrypt_password') \ - as mock_decrypt_password: + mock.patch.object(f_orch.FileBackendOrchestrator, + '_get_node_id') \ + as mock_file_get_node_id, \ + mock.patch.object(utils.PasswordDecryptor, + 'decrypt_password') \ + as mock_decrypt_password, \ + mock.patch.object(f_orch.FileBackendOrchestrator, + '_get_etcd_client') \ + as mock_get_etcd_client, \ + mock.patch.object(f_orch.FileBackendOrchestrator, + '_get_fp_etcd_client') \ + as mock_get_fp_etcd_client, \ + mock.patch.object(hpe_3par_mediator.HPE3ParMediator, + '_create_client') \ + as mock_create_file_client: mock_create_client.return_value = mock_3parclient - mock_get_etcd_util.return_value = mock_etcd + _get_etcd_client.return_value = mock_etcd mock_get_connector.return_value = mock_protocol_connector mock_get_node_id.return_value = data.THIS_NODE_ID + mock_file_get_node_id.return_value = data.THIS_NODE_ID mock_decrypt_password.return_value = data.HPE3PAR_USER_PASS + mock_create_file_client.return_value = mock_file_client + mock_get_etcd_client.return_value = mock_share_etcd + mock_get_fp_etcd_client.return_value = mock_fp_etcd + mock_file_client.http = mock.Mock(spec=http.HTTPJSONRESTClient) - mock_objects = \ - {'mock_3parclient': mock_3parclient, - 'mock_fileutil': mock_fileutil, - 'mock_osbricks_connector': mock_osbricks_connector, - 'mock_protocol_connector': mock_protocol_connector, - 'mock_etcd': mock_etcd} + mock_objects = { + 'mock_3parclient': mock_3parclient, + 'mock_file_client': mock_file_client, + 'mock_fileutil': mock_fileutil, + 'mock_osbricks_connector': mock_osbricks_connector, + 'mock_protocol_connector': mock_protocol_connector, + 'mock_etcd': mock_etcd, + 'mock_share_etcd': mock_share_etcd, + 'mock_fp_etcd': mock_fp_etcd, + 'mock_os': mock_os, + 'mock_sh': mock_sh + } return func(self, mock_objects, *args, **kwargs) return setup_mock_wrapper diff --git a/test/test_hpe_plugin.py b/test/test_hpe_plugin.py deleted file mode 100644 index 29d5f52e..00000000 --- a/test/test_hpe_plugin.py +++ /dev/null @@ -1,457 +0,0 @@ -# (c) Copyright [2016] Hewlett Packard Enterprise Development LP -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import time - -from io import BytesIO - -from zope.interface import implementer - -from twisted.internet.endpoints import UNIXClientEndpoint -from twisted.web.iweb import IAgentEndpointFactory -from twisted.web.client import Agent, readBody, FileBodyProducer - -from twisted.internet import reactor -from twisted.web.http_headers import Headers -import json -from json import dumps - -from twisted.trial import unittest -import subprocess -from sh import cat -from sh import kill - -from config.setupcfg import getdefaultconfig, setup_logging -from oslo_log import log as logging - -LOG = logging.getLogger(__name__) - -CONFIG_FILE = '/etc/hpedockerplugin/hpe.conf' -CONFIG = ['--config-file', CONFIG_FILE] - -TEST_DIR = os.path.abspath('../') -TWISTD_PID = TEST_DIR + '/twistd.pid' - -hpe_sock_path = b"/run/docker/plugins/hpe/hpe.sock" - - -@implementer(IAgentEndpointFactory) -class HPEEndpointFactory(object): - """ - Connect to hpe3's Unix socket. - """ - def __init__(self): - self.reactor = reactor - - def endpointForURI(self, uri): - return UNIXClientEndpoint(self.reactor, hpe_sock_path) - - -class HPEPLUGINTESTS(unittest.TestCase): - def _wait_for_pid_file(self, filename, wait_time): - count = 0 - while not os.path.exists(filename): - if count == wait_time: - break - time.sleep(1) - count += 1 - - if os.path.isfile(filename): - self.twistd_pid = cat(filename) - print('self.twistd_pid: %d ' % (self.twistd_pid)) - else: - raise ValueError("%s isn't a file!" % filename) - - def checkResponse(self, response, exp_result): - # TODO: convert to log messages - """ - print 'Response version:', response.version - print 'Response code:', response.code - print 'Response phrase:', response.phrase - print 'Response headers:' - print pformat(list(response.headers.getAllRawHeaders())) - """ - """ - LOG.debug("Response Body %s", str(response.version)) - LOG.debug("Response Body %s", str(response.code)) - LOG.debug("Response Body %s", str(response.phrase)) - LOG.debug("Response Body %s", - str(list(response.headers.getAllRawHeaders()))) - LOG.debug("Expected Results %s", str(exp_result)) - """ - - d = readBody(response) - d.addCallback(self.assertResponse, exp_result) - return d - - def getResponse(self, response): - # TODO: convert to log messages - """ - print 'Response version:', response.version - print 'Response code:', response.code - print 'Response phrase:', response.phrase - print 'Response headers:' - print pformat(list(response.headers.getAllRawHeaders())) - """ - """ - LOG.debug("Response Body %s", str(response.version)) - LOG.debug("Response Body %s", str(response.code)) - LOG.debug("Response Body %s", str(response.phrase)) - LOG.debug("Response Body %s", - str(list(response.headers.getAllRawHeaders()))) - LOG.debug("Expected Results %s", str(exp_result)) - """ - - d = readBody(response) - return d - - def assertResponse(self, body, exp_result): - LOG.debug("Response Body %s", str(body)) - LOG.debug("Expected Results %s", str(exp_result)) - self.assertEqual(body, exp_result) - - def cbFailed(self, failure): - LOG.error("Test Failed %s", str(failure)) - self.fail(msg='Test Failed') - - """ - Connect to hpe3's Unix socket. - """ - def setUp(self): - # Setup Test Logging - # Set Logging level - # Setup the default, hpe3parconfig, and hpelefthandconfig - # configuration objects. - hpedefaultconfig = getdefaultconfig(CONFIG) - - logging_level = hpedefaultconfig.logging - setup_logging('test_hpe_plugin', logging_level) - - # Start HPE Docker Plugin - bashcommand = "/bin/twistd hpe_plugin_service" - try: - subprocess.check_output(['sh', '-c', bashcommand], cwd=TEST_DIR) - except Exception: - LOG.error("Test Setup Failed: Could not change dir") - self.fail(msg='Test Failed') - - self._wait_for_pid_file(TWISTD_PID, 5) - - def tearDown(self): - # Stop HPE Docker Plugin - kill(str(self.twistd_pid)) - - is_running = os.path.exists("/proc/%s" % str(self.twistd_pid)) - while is_running: - is_running = os.path.exists("/proc/%s" % str(self.twistd_pid)) - time.sleep(0.25) - - def test_hpe_activate(self): - path = b"/Plugin.Activate" - agent = Agent.usingEndpointFactory(reactor, HPEEndpointFactory()) - d = agent.request(b'POST', b"UNIX://localhost" + path) - d.addCallback(self.checkResponse, json.dumps({u"Implements": - [u"VolumeDriver"]})) - d.addErrback(self.cbFailed) - return d - - def test_hpe_create_volume(self): - name = 'test-create-volume' - path = b"/VolumeDriver.Create" - body = {u"Name": name, - u"Opts": None} - - headers = Headers({b"content-type": [b"application/json"]}) - body_producer = FileBodyProducer(BytesIO(dumps(body))) - agent = Agent.usingEndpointFactory(reactor, HPEEndpointFactory()) - d = agent.request(b'POST', b"UNIX://localhost" + path, headers, - body_producer) - d.addCallback(self.checkResponse, json.dumps({u"Err": ''})) - d.addErrback(self.cbFailed) - return d - - def test_hpe_create_volume_size_option(self): - name = 'test-create-volume' - path = b"/VolumeDriver.Create" - body = {u"Name": name, - u"Opts": {u"size": u"50"}} - - headers = Headers({b"content-type": [b"application/json"]}) - body_producer = FileBodyProducer(BytesIO(dumps(body))) - agent = Agent.usingEndpointFactory(reactor, HPEEndpointFactory()) - d = agent.request(b'POST', b"UNIX://localhost" + path, headers, - body_producer) - d.addCallback(self.checkResponse, json.dumps({u"Err": ''})) - d.addCallback(self._remove_volume_callback, name) - d.addErrback(self.cbFailed) - return d - - def test_hpe_create_volume_provisioning_option(self): - name = 'test-create-volume' - path = b"/VolumeDriver.Create" - body = {u"Name": name, - u"Opts": {u"provisioning": u"full"}} - - headers = Headers({b"content-type": [b"application/json"]}) - body_producer = FileBodyProducer(BytesIO(dumps(body))) - agent = Agent.usingEndpointFactory(reactor, HPEEndpointFactory()) - d = agent.request(b'POST', b"UNIX://localhost" + path, headers, - body_producer) - d.addCallback(self.checkResponse, json.dumps({u"Err": ''})) - d.addCallback(self._remove_volume_callback, name) - d.addErrback(self.cbFailed) - return d - - def test_hpe_create_volume_invalid_provisioning_option(self): - name = 'test-create-volume-fake' - path = b"/VolumeDriver.Create" - body = {u"Name": name, - u"Opts": {u"provisioning": u"fake"}} - - headers = Headers({b"content-type": [b"application/json"]}) - body_producer = FileBodyProducer(BytesIO(dumps(body))) - agent = Agent.usingEndpointFactory(reactor, HPEEndpointFactory()) - d = agent.request(b'POST', b"UNIX://localhost" + path, headers, - body_producer) - d.addCallback(self.checkResponse, json.dumps({ - u"Err": "Invalid input received: Must specify a valid " + - "provisioning type ['thin', 'full', " + - "'dedup'], value 'fake' is invalid."})) - d.addCallback(self._remove_volume_callback, name) - d.addErrback(self.cbFailed) - return d - - def test_hpe_create_volume_invalid_option(self): - name = 'test-create-volume-fake' - path = b"/VolumeDriver.Create" - body = {u"Name": name, - u"Opts": {u"fake": u"fake"}} - - headers = Headers({b"content-type": [b"application/json"]}) - body_producer = FileBodyProducer(BytesIO(dumps(body))) - agent = Agent.usingEndpointFactory(reactor, HPEEndpointFactory()) - d = agent.request(b'POST', b"UNIX://localhost" + path, headers, - body_producer) - d.addCallback(self.checkResponse, json.dumps({ - u"Err": "create volume failed, error is: fake is not a valid " - "option. Valid options are: ['size', 'provisioning', " - "'flash-cache']"})) - d.addCallback(self._remove_volume_callback, name) - d.addErrback(self.cbFailed) - return d - - def _remove_volume_callback(self, body, name): - # NOTE: body arg is the result from last deferred call. - # Python complains about parameter mis-match if you don't include it - return self._remove_volume(name) - - def _remove_volume(self, name): - path = b"/VolumeDriver.Remove" - body = {u"Name": name} - - headers = Headers({b"content-type": [b"application/json"]}) - body_producer = FileBodyProducer(BytesIO(dumps(body))) - agent = Agent.usingEndpointFactory(reactor, HPEEndpointFactory()) - d = agent.request(b'POST', b"UNIX://localhost" + path, headers, - body_producer) - d.addCallback(self.checkResponse, json.dumps({u"Err": ''})) - d.addErrback(self.cbFailed) - return d - - def test_hpe_remove_volume(self): - name = 'test-create-volume' - return self._remove_volume(name) - - def _get_volume_mount_path(self, body, name): - # NOTE: body arg is the result from last deferred call. - # Python complains about parameter mis-match if you don't include it - # In this test, we need it to compare expected results with Path - # request - - # Compare path returned by mount (body) with Get Path request - path = b"/VolumeDriver.Path" - newbody = {u"Name": name} - headers = Headers({b"content-type": [b"application/json"]}) - body_producer = FileBodyProducer(BytesIO(dumps(newbody))) - agent = Agent.usingEndpointFactory(reactor, HPEEndpointFactory()) - d = agent.request(b'POST', b"UNIX://localhost" + path, headers, - body_producer) - d.addCallback(self.checkResponse, body) - d.addErrback(self.cbFailed) - return d - - def _mount_the_volume(self, body, name): - # NOTE: body arg is the result from last deferred call. - # Python complains about parameter mis-match if you don't include it - - # Mount the previously created volume - path = b"/VolumeDriver.Mount" - newbody = {u"Name": name} - headers = Headers({b"content-type": [b"application/json"]}) - body_producer = FileBodyProducer(BytesIO(dumps(newbody))) - agent = Agent.usingEndpointFactory(reactor, HPEEndpointFactory()) - d = agent.request(b'POST', b"UNIX://localhost" + path, headers, - body_producer) - - d.addCallback(self.getResponse) - - # If we get a valid response from Path request then we assume - # the mount passed. - # TODO: Add additonal logic to verify the mountpath - d.addCallback(self._get_volume_mount_path, name) - return d - - def _unmount_the_volume(self, body, name): - # NOTE: body arg is the result from last deferred call. - # Python complains about parameter mis-match if you don't include it - path = b"/VolumeDriver.Unmount" - newbody = {u"Name": name} - headers = Headers({b"content-type": [b"application/json"]}) - body_producer = FileBodyProducer(BytesIO(dumps(newbody))) - - agent = Agent.usingEndpointFactory(reactor, HPEEndpointFactory()) - d = agent.request(b'POST', b"UNIX://localhost" + path, headers, - body_producer) - d.addCallback(self.checkResponse, json.dumps({u"Err": ''})) - d.addErrback(self.cbFailed) - return d - - def broken_test_hpe_mount_umount_volume(self): - name = 'test-mount-volume' - path = b"/VolumeDriver.Create" - body = {u"Name": name} - - # Create a volume to be mounted - headers = Headers({b"content-type": [b"application/json"]}) - body_producer = FileBodyProducer(BytesIO(dumps(body))) - agent = Agent.usingEndpointFactory(reactor, HPEEndpointFactory()) - d = agent.request(b'POST', b"UNIX://localhost" + path, headers, - body_producer) - d.addCallback(self.checkResponse, json.dumps({u"Err": ''})) - d.addErrback(self.cbFailed) - - # Mount the previously created volume - d.addCallback(self._mount_the_volume, name) - - # UMount the previously created volume - d.addCallback(self._unmount_the_volume, name) - - # Remove the previously created volume - d.addCallback(self._remove_volume_callback, name) - return d - - def test_hpe_get_volume(self): - name = 'test-get-volume' - path = b"/VolumeDriver.Create" - body = {u"Name": name} - - # Create a volume to be mounted - headers = Headers({b"content-type": [b"application/json"]}) - body_producer = FileBodyProducer(BytesIO(dumps(body))) - agent = Agent.usingEndpointFactory(reactor, HPEEndpointFactory()) - d = agent.request(b'POST', b"UNIX://localhost" + path, headers, - body_producer) - d.addCallback(self.checkResponse, json.dumps({u"Err": ''})) - d.addErrback(self.cbFailed) - - # Get the previously created volume - expected = {u"Volume": {u"Status": {}, - u"Mountpoint": '', - u"Name": name}, - u"Err": ''} - d.addCallback(self._get_volume, name, expected) - - # Remove the previously created volume - d.addCallback(self._remove_volume_callback, name) - return d - - def test_hpe_get_non_existent_volume(self): - name = 'test-get-volume' - - # Get the previously created volume - expected = {u"Err": ''} - d = self._get_volume({}, name, expected) - - return d - - def _get_volume(self, body, name, expected): - path = b"/VolumeDriver.Get" - body = {u"Name": name} - - # Get a volume - headers = Headers({b"content-type": [b"application/json"]}) - body_producer = FileBodyProducer(BytesIO(dumps(body))) - agent = Agent.usingEndpointFactory(reactor, HPEEndpointFactory()) - d = agent.request(b'POST', b"UNIX://localhost" + path, headers, - body_producer) - d.addCallback(self.checkResponse, json.dumps(expected)) - d.addErrback(self.cbFailed) - - return d - - def broken_test_hpe_list_volume(self): - name = 'test-list-volume' - path = b"/VolumeDriver.Create" - body = {u"Name": name} - - # Create a volume to be mounted - headers = Headers({b"content-type": [b"application/json"]}) - body_producer = FileBodyProducer(BytesIO(dumps(body))) - agent = Agent.usingEndpointFactory(reactor, HPEEndpointFactory()) - d = agent.request(b'POST', b"UNIX://localhost" + path, headers, - body_producer) - d.addCallback(self.checkResponse, json.dumps({u"Err": ''})) - d.addErrback(self.cbFailed) - - # List volumes - expected = {u"Err": '', - u"Volumes": [{u"Mountpoint": '', - u"Name": name}]} - d.addCallback(self._list_volumes, name, expected) - - # Remove the previously created volume - d.addCallback(self._remove_volume_callback, name) - - return d - - def broken_test_hpe_list_volume_no_volumes(self): - path = b"/VolumeDriver.List" - - # Create a volume to be mounted - headers = Headers({b"content-type": [b"application/json"]}) - body_producer = FileBodyProducer(BytesIO(dumps({}))) - agent = Agent.usingEndpointFactory(reactor, HPEEndpointFactory()) - d = agent.request(b'POST', b"UNIX://localhost" + path, headers, - body_producer) - d.addCallback(self.checkResponse, json.dumps({u"Err": '', - u"Volumes": []})) - d.addErrback(self.cbFailed) - - return d - - def _list_volumes(self, body, name, expected): - path = b"/VolumeDriver.List" - body = {u"Name": name} - - # Get a volume - headers = Headers({b"content-type": [b"application/json"]}) - body_producer = FileBodyProducer(BytesIO(dumps(body))) - agent = Agent.usingEndpointFactory(reactor, HPEEndpointFactory()) - d = agent.request(b'POST', b"UNIX://localhost" + path, headers, - body_producer) - d.addCallback(self.checkResponse, json.dumps(expected)) - d.addErrback(self.cbFailed) - - return d diff --git a/test/test_hpe_plugin_enable_disable.py b/test/test_hpe_plugin_enable_disable.py new file mode 100644 index 00000000..bd22237e --- /dev/null +++ b/test/test_hpe_plugin_enable_disable.py @@ -0,0 +1,51 @@ +import logging +import testtools + +import test.enableplugin_tester as enableplugin_tester + +logger = logging.getLogger('hpedockerplugin') +logger.level = logging.DEBUG +fh = logging.FileHandler('./unit_tests_run.log') +fh.setLevel(logging.DEBUG) +fmt = logging.Formatter('%(name)s - %(levelname)s - %(message)s') +fh.setFormatter(fmt) +logger.addHandler(fh) + + +def tc_banner_decorator(func): + def banner_wrapper(self, *args, **kwargs): + # logger = logging.getLogger(__name__) + logger.info('Starting - %s' % func.__name__) + logger.info('========================================================' + '===========') + func(self, *args, **kwargs) + logger.info('Finished - %s' % func.__name__) + logger.info('========================================================' + '===========\n\n') + return banner_wrapper + + +class HpeDockerEnableDisableUnitTests(object): + @tc_banner_decorator + def test_enable(self): + test = enableplugin_tester.TestEnablePlugin() + test.run_test(self) + + @tc_banner_decorator + def test_plugin_init_fails(self): + test = enableplugin_tester.TestPluginInitializationFails() + test.run_test(self) + + +class HpeDockerMixedIscsiDefaultUnitTest(HpeDockerEnableDisableUnitTests, + testtools.TestCase): + @property + def protocol(self): + return 'mixed_iscsi_default' + + +class HpeDockerMixedFcDefaultUnitTest(HpeDockerEnableDisableUnitTests, + testtools.TestCase): + @property + def protocol(self): + return 'mixed_fc_default' diff --git a/test/test_hpe_plugin_v2.py b/test/test_hpe_plugin_v2.py index 39ea7a95..c1d511e7 100644 --- a/test/test_hpe_plugin_v2.py +++ b/test/test_hpe_plugin_v2.py @@ -1,22 +1,36 @@ import logging import testtools +from config import setupcfg +from hpedockerplugin.hpe import hpe3par_opts as plugin_opts + +import test.createshare_tester as createshare_tester import test.createvolume_tester as createvolume_tester import test.createreplicatedvolume_tester as createrepvolume_tester import test.clonevolume_tester as clonevolume_tester import test.createsnapshot_tester as createsnapshot_tester +import test.deleteshare_tester as deleteshare_tester +import test.fake_3par_data as data import test.getvolume_tester as getvolume_tester +import test.listvolume_tester as listvolume_tester +import test.mountshare_tester as mountshare_tester import test.mountvolume_tester as mountvolume_tester import test.removesnapshot_tester as removesnapshot_tester +import test.removevolume_tester as removevolume_tester + # import revertsnapshot_tester +import test.unmountshare_tester as unmountshare_tester import test.unmountvolume_tester as unmountvolume_tester logger = logging.getLogger('hpedockerplugin') logger.level = logging.DEBUG fh = logging.FileHandler('./unit_tests_run.log') fh.setLevel(logging.DEBUG) -fmt = logging.Formatter('%(name)s - %(levelname)s - %(message)s') -fh.setFormatter(fmt) +# fmt = logging.Formatter('%(name)s - %(levelname)s - %(message)s') +formatter = logging.Formatter('%(asctime)-12s [%(levelname)s] ' + '%(name)s [%(thread)d] ' + '%(threadName)s %(message)s') +fh.setFormatter(formatter) logger.addHandler(fh) BKEND_3PAR_PP_REP = '3par_pp_rep' @@ -41,6 +55,22 @@ def banner_wrapper(self, *args, **kwargs): # TODO: Make this class abstract # Base test class containing common tests class HpeDockerUnitTestsBase(object): + def _get_real_config_file(self): + return '/etc/hpedockerplugin/hpe.conf' + + def _get_test_config_file(self): + cfg_file_name = './test/config/hpe_%s.conf' % \ + self.protocol.lower() + return cfg_file_name + + def _get_configs(self, cfg_param): + host_config = setupcfg.get_host_config( + cfg_param, setupcfg.CONF) + host_config.set_override('ssh_hosts_key_file', + data.KNOWN_HOSTS_FILE) + backend_configs = setupcfg.get_all_backend_configs( + cfg_param, setupcfg.CONF, plugin_opts.hpe3par_opts) + return {'block': (host_config, backend_configs)} """ CREATE VOLUME related tests @@ -75,6 +105,21 @@ def test_import_volume_with_other_option(self): test = createvolume_tester.TestImportVolumeOtherOption() test.run_test(self) + @tc_banner_decorator + def test_import_already_managed_volume(self): + test = createvolume_tester.TestImportAlreadyManagedVolume() + test.run_test(self) + + @tc_banner_decorator + def test_import_volume_with_different_domain(self): + test = createvolume_tester.TestImportVolumeDifferentDomain() + test.run_test(self) + + @tc_banner_decorator + def test_import_volume_with_invalid_options(self): + test = createvolume_tester.TestImportVolumeWithInvalidOptions() + test.run_test(self) + @tc_banner_decorator def test_create_volume_with_qos(self): test = createvolume_tester.TestCreateVolumeWithQOS() @@ -85,11 +130,6 @@ def test_create_volume_with_invalid_qos(self): test = createvolume_tester.TestCreateVolumeWithInvalidQOS() test.run_test(self) - @tc_banner_decorator - def test_create_volume_with_mutually_exclusive_list(self): - test = createvolume_tester.TestCreateVolumeWithMutuallyExclusiveList() - test.run_test(self) - @tc_banner_decorator def test_create_volume_with_flashcache_and_qos(self): test = createvolume_tester.TestCreateVolumeWithFlashCacheAndQOS() @@ -137,9 +177,25 @@ def test_create_vol_set_flash_cache_fails(self): test = createvolume_tester.TestCreateVolSetFlashCacheFails() test.run_test(self) + @tc_banner_decorator + def test_create_vol_with_mutually_exclusive_opts(self): + test = createvolume_tester.\ + TestCreateVolumeWithMutuallyExclusiveOptions() + test.run_test(self) + + @tc_banner_decorator + def test_create_vol_with_invalid_options(self): + test = createvolume_tester.TestCreateVolumeWithInvalidOptions() + test.run_test(self) + """ REPLICATION related tests """ + @tc_banner_decorator + def test_create_default_replicated_volume_fails(self): + test = createrepvolume_tester.TestCreateVolumeDefaultFails() + test.run_test(self) + @tc_banner_decorator def test_create_pp_replicated_volume_and_rcg(self): test = createrepvolume_tester.TestCreateReplicatedVolumeAndRCG( @@ -192,6 +248,12 @@ def test_create_ap_streaming_replicated_volume_and_rcg_create_fails(self): backend_name=BKEND_3PAR_AP_STREAMING_REP) test.run_test(self) + @tc_banner_decorator + def test_create_replicated_vol_with_invalid_opts(self): + test = createrepvolume_tester.\ + TestCreateReplicatedVolumeWithInvalidOptions() + test.run_test(self) + """ CLONE VOLUME related tests """ @@ -211,8 +273,8 @@ def test_clone_offline_copy(self): test.run_test(self) @tc_banner_decorator - def test_clone_offline_copy_fails(self): - test = clonevolume_tester.TestCloneOfflineCopyFails() + def test_clone_from_base_volume_active_task(self): + test = clonevolume_tester.TestCloneFromBaseVolumeActiveTask() test.run_test(self) @tc_banner_decorator @@ -265,6 +327,11 @@ def test_clone_with_flashcache_and_qos_etcd_save_fails(self): test = clonevolume_tester.TestCloneWithFlashCacheAndQOSEtcdSaveFails() test.run_test(self) + @tc_banner_decorator + def test_clone_volume_with_invalid_options(self): + test = clonevolume_tester.TestCloneVolumeWithInvalidOptions() + test.run_test(self) + """ CREATE REVERT SNAPSHOT related tests """ @@ -307,14 +374,105 @@ def test_create_snapshot_etcd_save_fails(self): test = createsnapshot_tester.TestCreateSnapshotEtcdSaveFails() test.run_test(self) + @tc_banner_decorator + def test_create_snapshot_invalid_options(self): + test = createsnapshot_tester.TestCreateSnapshotInvalidOptions() + test.run_test(self) + + """ + CREATE SNAPSHOT SCHEDULE related tests + """ + @tc_banner_decorator + def test_create_snap_schedule(self): + test = createsnapshot_tester.TestCreateSnpSchedule() + test.run_test(self) + + @tc_banner_decorator + def test_create_snap_schedule_neg_freq(self): + test = createsnapshot_tester.TestCreateSnpSchedNegFreq() + test.run_test(self) + + @tc_banner_decorator + def test_create_snap_schedule_neg_prefx(self): + test = createsnapshot_tester.TestCreateSnpSchedNegPrefx() + test.run_test(self) + + @tc_banner_decorator + def test_create_snap_schedule_inv_prefx_len(self): + test = createsnapshot_tester.TestCreateSnpSchedInvPrefxLen() + test.run_test(self) + + @tc_banner_decorator + def test_create_snap_schedule_no_schedname(self): + test = createsnapshot_tester.TestCreateSnpSchedNoSchedName() + test.run_test(self) + + @tc_banner_decorator + def test_create_snap_schedule_with_ret_to_base(self): + test = createsnapshot_tester.TestCreateSnpSchedwithRetToBase() + test.run_test(self) + + @tc_banner_decorator + def test_create_snap_schedule_ret_exp_neg(self): + test = createsnapshot_tester.TestCreateSnpSchedRetExpNeg() + test.run_test(self) + + @tc_banner_decorator + def test_create_snap_schedule_inv_sched_freq(self): + test = createsnapshot_tester.TestCreateSnpSchedInvSchedFreq() + test.run_test(self) + """ REMOVE VOLUME related tests """ @tc_banner_decorator + def test_remove_regular_volume(self): + rm_regular_vol = removevolume_tester.TestRemoveVolume.Regular() + test = removevolume_tester.TestRemoveVolume(rm_regular_vol) + test.run_test(self) + + def test_remove_replicated_volume_role_primary(self): + params = {'role': data.ROLE_PRIMARY} + rm_rep_vol = removevolume_tester.TestRemoveVolume.ReplicatedVolume( + params) + test = removevolume_tester.TestRemoveVolume(rm_rep_vol) + test.run_test(self) + + def test_remove_replicated_volume_role_secondary(self): + params = {'role': data.ROLE_SECONDARY} + rm_rep_vol = removevolume_tester.TestRemoveVolume.ReplicatedVolume( + params) + test = removevolume_tester.TestRemoveVolume(rm_rep_vol) + test.run_test(self) + + def test_remove_last_replicated_volume(self): + params = {'role': data.ROLE_PRIMARY, 'rm_last_volume': True} + rm_rep_vol = removevolume_tester.TestRemoveVolume.ReplicatedVolume( + params) + test = removevolume_tester.TestRemoveVolume(rm_rep_vol) + test.run_test(self) + + def test_remove_non_existent_volume(self): + test = removevolume_tester.TestRemoveNonExistentVolume() + test.run_test(self) + + def test_remove_volume_with_child_snapshot(self): + test = removevolume_tester.TestRemoveVolumeWithChildSnapshot() + test.run_test(self) + + """ + REMOVE SNAPSHOT related tests + """ + @tc_banner_decorator def test_remove_snapshot(self): test = removesnapshot_tester.TestRemoveSnapshot() test.run_test(self) + @tc_banner_decorator + def test_remove_snapshot_schedule(self): + test = removesnapshot_tester.TestRemoveSnapshotSchedule() + test.run_test(self) + # @tc_banner_decorator # def test_remove_multilevel_snapshot(self): # test = removesnapshot_tester.TestRemoveMultilevelSnapshot() @@ -392,8 +550,18 @@ def test_unmount_snap_mounted_twice_on_this_node(self): # This will un-mount the volume as the last mount-id gets removed test.run_test(self) + @tc_banner_decorator + def test_unmount_vol_not_owned_by_this_node(self): + # This is a special test case which makes use of the same tester + # to execute this TC twice. The idea + # is to start with a volume which has two mount-ids i.e. it has been + # mounted twice. This TC tries to unmount it twice and checks if + # node_mount_info got removed from the volume object + test = unmountvolume_tester.TestUnmountVolNotOwnedByThisNode() + test.run_test(self) + """ - INSPECT SNAPSHOT related tests + INSPECT VOLUME/SNAPSHOT related tests """ @tc_banner_decorator def test_sync_snapshots(self): @@ -402,7 +570,7 @@ def test_sync_snapshots(self): @tc_banner_decorator def test_qos_vol(self): - test = getvolume_tester.TestQosVolume() + test = getvolume_tester.TestGetVolumeWithQos() test.run_test(self) @tc_banner_decorator @@ -410,6 +578,34 @@ def test_clone_vol(self): test = getvolume_tester.TestCloneVolume() test.run_test(self) + @tc_banner_decorator + def test_get_vol_with_get_qos_fails(self): + test = getvolume_tester.TestGetVolumeWithGetQoSFails() + test.run_test(self) + + @tc_banner_decorator + def test_get_rcg_vol(self): + test = getvolume_tester.TestGetRcgVolume() + test.run_test(self) + + @tc_banner_decorator + def test_get_rcg_vol_fails(self): + test = getvolume_tester.TestGetRcgVolumeFails() + test.run_test(self) + + """ + LIST VOLUMES related tests + """ + @tc_banner_decorator + def test_list_volumes(self): + test = listvolume_tester.TestListVolumeDefault() + test.run_test(self) + + @tc_banner_decorator + def test_list_no_volumes(self): + test = listvolume_tester.TestListNoVolumes() + test.run_test(self) + class HpeDockerISCSIUnitTests(HpeDockerUnitTestsBase, testtools.TestCase): @property @@ -543,6 +739,54 @@ def test_mount_snap_fc_host(self): test = mountvolume_tester.TestMountVolumeFCHost(is_snap=True) test.run_test(self) + @tc_banner_decorator + def test_mount_ap_replicated_volume_fc_host_rcg_normal(self): + vol_params = {'vol_type': 'replicated', + 'rep_type': 'active-passive', + 'rcg_state': 'normal'} + test = mountvolume_tester.TestMountVolumeFCHost(vol_params=vol_params) + test.run_test(self) + + @tc_banner_decorator + def test_mount_ap_replicated_volume_fc_host_rcg_failover(self): + vol_params = {'vol_type': 'replicated', + 'rep_type': 'active-passive', + 'rcg_state': 'failover'} + test = mountvolume_tester.TestMountVolumeFCHost(vol_params=vol_params) + test.run_test(self) + + @tc_banner_decorator + def test_mount_ap_replicated_volume_fc_host_rcg_recover(self): + vol_params = {'vol_type': 'replicated', + 'rep_type': 'active-passive', + 'rcg_state': 'recover'} + test = mountvolume_tester.TestMountVolumeFCHost(vol_params=vol_params) + test.run_test(self) + + @tc_banner_decorator + def test_mount_ap_replicated_volume_fc_host_rcgs_ungettable(self): + vol_params = {'vol_type': 'replicated', + 'rep_type': 'active-passive', + 'rcg_state': 'rcgs_not_gettable'} + test = mountvolume_tester.TestMountVolumeFCHost(vol_params=vol_params) + test.run_test(self) + + @tc_banner_decorator + def test_mount_ap_replicated_volume_fc_host_pri_rcg_gettable(self): + vol_params = {'vol_type': 'replicated', + 'rep_type': 'active-passive', + 'rcg_state': 'only_primary_rcg_gettable'} + test = mountvolume_tester.TestMountVolumeFCHost(vol_params=vol_params) + test.run_test(self) + + @tc_banner_decorator + def test_mount_ap_replicated_volume_fc_host_sec_rcg_gettable(self): + vol_params = {'vol_type': 'replicated', + 'rep_type': 'active-passive', + 'rcg_state': 'only_secondary_rcg_gettable'} + test = mountvolume_tester.TestMountVolumeFCHost(vol_params=vol_params) + test.run_test(self) + @tc_banner_decorator def test_mount_volume_fc_host_vlun_exists(self): test = mountvolume_tester.TestMountVolumeFCHostVLUNExists() @@ -552,3 +796,76 @@ def test_mount_volume_fc_host_vlun_exists(self): def test_mount_snap_fc_host_vlun_exists(self): test = mountvolume_tester.TestMountVolumeFCHostVLUNExists(is_snap=True) test.run_test(self) + + +# TODO: Unit tests for share need more work +# To be taken up after creating intial PR +class HpeDockerShareUnitTests(testtools.TestCase): + def _get_real_config_file(self): + return '/etc/hpedockerplugin/hpe.conf' + + def _get_test_config_file(self): + cfg_file_name = './test/config/hpe.conf' + return cfg_file_name + + def _get_configs(self, cfg_param): + host_config = setupcfg.get_host_config( + cfg_param, setupcfg.FILE_CONF) + host_config.set_override('ssh_hosts_key_file', + data.KNOWN_HOSTS_FILE) + backend_configs = setupcfg.get_all_backend_configs( + cfg_param, setupcfg.FILE_CONF, plugin_opts.hpe3par_file_opts) + return {'file': (host_config, backend_configs)} + + @property + def protocol(self): + return 'file' + + @tc_banner_decorator + def test_create_first_default_share(self): + test = createshare_tester.TestCreateFirstDefaultShare() + test.run_test(self) + + @tc_banner_decorator + def test_create_second_default_share(self): + test = createshare_tester.TestCreateSecondDefaultShare() + test.run_test(self) + + @tc_banner_decorator + def test_create_share_on_legacy_fpg(self): + test = createshare_tester.TestCreateShareOnLegacyFpg() + test.run_test(self) + + # TODO: TC to be enabled once tester class implementation is done + @tc_banner_decorator + def test_create_first_default_share_set_quota_fails(self): + test = createshare_tester.TestCreateFirstDefaultShareSetQuotaFails() + test.run_test(self) + + @tc_banner_decorator + def test_remove_regular_share(self): + del_regular_share = deleteshare_tester.TestDeleteShare.Regular() + test = deleteshare_tester.TestDeleteShare(del_regular_share) + test.run_test(self) + + @tc_banner_decorator + def test_remove_share_with_acl(self): + params = {'share_with_acl': True} + del_regular_share = deleteshare_tester.TestDeleteShare.Regular(params) + test = deleteshare_tester.TestDeleteShare(del_regular_share) + test.run_test(self) + + @tc_banner_decorator + def test_mount_share(self): + test = mountshare_tester.TestMountNfsShare() + test.run_test(self) + + @tc_banner_decorator + def test_mount_share_with_acl(self): + test = mountshare_tester.TestMountNfsShareWithAcl() + test.run_test(self) + + @tc_banner_decorator + def test_unmount_share(self): + test = unmountshare_tester.TestUnmountNfsShare() + test.run_test(self) diff --git a/test/unmountshare_tester.py b/test/unmountshare_tester.py new file mode 100644 index 00000000..a8fe02aa --- /dev/null +++ b/test/unmountshare_tester.py @@ -0,0 +1,70 @@ +import copy + +import test.fake_3par_data as data +import test.hpe_docker_unit_test as hpedockerunittest + + +class UnmountShareUnitTest(hpedockerunittest.HpeDockerUnitTestExecutor): + def __init__(self): + self._backend_name = None + self._share = copy.deepcopy(data.etcd_mounted_share) + + def _get_plugin_api(self): + return 'volumedriver_unmount' + + def get_request_params(self): + return {"Name": 'GoodShare', + "ID": "Fake-Mount-ID"} + + def setup_mock_objects(self): + def _setup_mock_3parclient(): + self.setup_mock_3parclient() + + def _setup_mock_etcd(): + mock_share_etcd = self.mock_objects['mock_share_etcd'] + mock_share_etcd.get_share.return_value = self._share + # Allow child class to make changes + self.setup_mock_etcd() + + # def _setup_mock_fileutil(): + # mock_fileutil = self.mock_objects['mock_fileutil'] + # mock_fileutil.mkdir_for_mounting.return_value = '/tmp' + # # Let the flow create filesystem + # mock_fileutil.has_filesystem.return_value = False + # # Allow child class to make changes + # self.setup_mock_fileutil() + _setup_mock_3parclient() + _setup_mock_etcd() + # _setup_mock_fileutil() + + def setup_mock_3parclient(self): + pass + + def setup_mock_etcd(self): + pass + + def setup_mock_fileutil(self): + pass + + +class TestUnmountNfsShare(UnmountShareUnitTest): + def __init__(self, **kwargs): + super(type(self), self).__init__(**kwargs) + + # def setup_mock_3parclient(self): + # mock_client = self.mock_objects['mock_3parclient'] + + def check_response(self, resp): + pass + # mnt_point = '/opt/hpe/data/hpedocker-GoodShare' + # dev_name = '192.168.98.41:/DockerFpg_2/DockerVfs_2/GoodShare' + # expected = { + # 'Mountpoint': mnt_point, + # 'Err': '', + # 'Name': 'GoodShare', + # 'Devicename': dev_name} + # expected_keys = ["Mountpoint", "Name", "Err", "Devicename"] + # for key in expected_keys: + # self._test_case.assertIn(key, resp) + # + # self._test_case.assertEqual(resp, expected) diff --git a/test/unmountvolume_tester.py b/test/unmountvolume_tester.py index 1cbdee79..631c1fb5 100644 --- a/test/unmountvolume_tester.py +++ b/test/unmountvolume_tester.py @@ -213,6 +213,52 @@ def check_response(self, resp): self._tc_run_cnt += 1 + +# This TC should carry out the cleanup steps +class TestUnmountVolNotOwnedByThisNode(UnmountVolumeUnitTest): + # This TC needs to be executed twice from outside and for each + # execution, the state of volume gets modified. Setting up + # the volume object to be used across two runs along with + # the run-count that is used to take decisions + def __init__(self, **kwargs): + super(type(self), self).__init__(**kwargs) + self._vol = copy.deepcopy(data.vol_mounted_on_other_node) + + def _setup_mock_3parclient(self): + mock_3parclient = self.mock_objects['mock_3parclient'] + mock_3parclient.queryHost.return_value = data.fake_hosts + # Returning more VLUNs + if not self._is_snap: + mock_3parclient.getHostVLUNs.side_effect = \ + [data.host_vluns, data.host_vluns] + else: + mock_3parclient.getHostVLUNs.side_effect = \ + [data.snap_host_vluns, data.snap_host_vluns] + + def _setup_mock_etcd(self): + mock_etcd = self.mock_objects['mock_etcd'] + mock_etcd.get_vol_byname.return_value = self._vol + mock_etcd.get_vol_path_info.return_value = \ + {'path': '/dummy-path', + 'connection_info': {'data': 'dummy-conn-inf'}, + 'mount_dir': '/dummy-mnt-dir'} + + def check_response(self, resp): + self._test_case.assertEqual(resp, {u"Err": ''}) + + vol = self._vol + mock_etcd = self.mock_objects['mock_etcd'] + mock_etcd.save_vol.assert_called_with(vol) + self._test_case.assertIn('node_mount_info', + self._vol) + + mock_3parclient = self.mock_objects['mock_3parclient'] + mock_3parclient.getWsApiVersion.assert_called() + mock_3parclient.queryHost.assert_called() + mock_3parclient.getHostVLUNs.assert_called() + mock_3parclient.deleteVLUN.assert_called() + mock_3parclient.deleteHost.assert_called() + # # TODO: # class TestUnmountVolumeChapCredentialsNotFound(UnmountVolumeUnitTest): # pass diff --git a/tox.ini b/tox.ini index d5232944..65915930 100644 --- a/tox.ini +++ b/tox.ini @@ -9,7 +9,7 @@ PYTHONHASHSEED=0 usedevelop = True install_command = pip install {opts} {packages} -deps = -r{toxinidir}/requirements.txt +deps = -r{toxinidir}/requirements-py3.txt -r{toxinidir}/test-requirements.txt commands = python -m testtools.run {posargs}