+ export WORKSPACE=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.9-crio-release + WORKSPACE=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.9-crio-release + [[ openshift-3.9-crio-release =~ openshift-.* ]] + [[ openshift-3.9-crio-release =~ .*-crio-.* ]] + export KUBEVIRT_PROVIDER=os-3.9.0-crio + KUBEVIRT_PROVIDER=os-3.9.0-crio + export KUBEVIRT_NUM_NODES=2 + KUBEVIRT_NUM_NODES=2 + export NFS_WINDOWS_DIR=/home/nfs/images/windows2016 + NFS_WINDOWS_DIR=/home/nfs/images/windows2016 + export NAMESPACE=kube-system + NAMESPACE=kube-system + trap '{ make cluster-down; }' EXIT + make cluster-down ./cluster/down.sh + make cluster-up ./cluster/up.sh Downloading ....... Downloading ....... Downloading ....... 2018/06/05 20:11:18 Waiting for host: 192.168.66.102:22 2018/06/05 20:11:21 Problem with dial: dial tcp 192.168.66.102:22: getsockopt: no route to host. Sleeping 5s 2018/06/05 20:11:29 Problem with dial: dial tcp 192.168.66.102:22: getsockopt: no route to host. Sleeping 5s 2018/06/05 20:11:37 Problem with dial: dial tcp 192.168.66.102:22: getsockopt: no route to host. Sleeping 5s 2018/06/05 20:11:42 Problem with dial: dial tcp 192.168.66.102:22: getsockopt: connection refused. Sleeping 5s 2018/06/05 20:11:47 Connected to tcp://192.168.66.102:22 + systemctl stop origin-master-api + systemctl disable origin-master-api Removed symlink /etc/systemd/system/multi-user.target.wants/origin-master-api.service. Removed symlink /etc/systemd/system/origin-node.service.wants/origin-master-api.service. + systemctl stop origin-master-controllers + systemctl disable origin-master-controllers Removed symlink /etc/systemd/system/multi-user.target.wants/origin-master-controllers.service. 2018/06/05 20:11:51 Waiting for host: 192.168.66.101:22 2018/06/05 20:11:54 Problem with dial: dial tcp 192.168.66.101:22: getsockopt: no route to host. Sleeping 5s 2018/06/05 20:12:02 Problem with dial: dial tcp 192.168.66.101:22: getsockopt: no route to host. Sleeping 5s 2018/06/05 20:12:10 Problem with dial: dial tcp 192.168.66.101:22: getsockopt: no route to host. Sleeping 5s 2018/06/05 20:12:18 Problem with dial: dial tcp 192.168.66.101:22: getsockopt: no route to host. Sleeping 5s 2018/06/05 20:12:23 Problem with dial: dial tcp 192.168.66.101:22: getsockopt: connection refused. Sleeping 5s 2018/06/05 20:12:28 Connected to tcp://192.168.66.101:22 + set +e + /usr/bin/oc get nodes NAME STATUS ROLES AGE VERSION node01 Ready master 15d v1.9.1+a0ce1bc657 + '[' 0 -ne 0 ']' + set -e + inventory_file=/root/inventory + echo '[new_nodes]' + sed -i '/\[OSEv3:children\]/a new_nodes' /root/inventory + nodes_found=false ++ seq 2 100 + for i in '$(seq 2 100)' ++ printf node%02d 2 + node=node02 ++ printf 192.168.66.1%02d 2 + node_ip=192.168.66.102 + set +e + ping 192.168.66.102 -c 1 PING 192.168.66.102 (192.168.66.102) 56(84) bytes of data. 64 bytes from 192.168.66.102: icmp_seq=1 ttl=64 time=1.17 ms --- 192.168.66.102 ping statistics --- 1 packets transmitted, 1 received, 0% packet loss, time 0ms rtt min/avg/max/mdev = 1.170/1.170/1.170/0.000 ms Found node02. Adding it to the inventory. + '[' 0 -ne 0 ']' + nodes_found=true + set -e + echo '192.168.66.102 node02' + echo 'Found node02. Adding it to the inventory.' + echo 'node02 openshift_node_labels="{'\''region'\'': '\''infra'\'','\''zone'\'': '\''default'\''}" openshift_schedulable=true openshift_ip=192.168.66.102' + for i in '$(seq 2 100)' ++ printf node%02d 3 + node=node03 ++ printf 192.168.66.1%02d 3 + node_ip=192.168.66.103 + set +e + ping 192.168.66.103 -c 1 PING 192.168.66.103 (192.168.66.103) 56(84) bytes of data. From 192.168.66.101 icmp_seq=1 Destination Host Unreachable --- 192.168.66.103 ping statistics --- 1 packets transmitted, 0 received, +1 errors, 100% packet loss, time 0ms + '[' 1 -ne 0 ']' + break + '[' true = true ']' + ansible-playbook -i /root/inventory /usr/share/ansible/openshift-ansible/playbooks/openshift-node/scaleup.yml PLAY [Populate config host groups] ********************************************* TASK [Load group name mapping variables] *************************************** ok: [localhost] TASK [Evaluate groups - g_etcd_hosts or g_new_etcd_hosts required] ************* skipping: [localhost] TASK [Evaluate groups - g_master_hosts or g_new_master_hosts required] ********* skipping: [localhost] TASK [Evaluate groups - g_node_hosts or g_new_node_hosts required] ************* skipping: [localhost] TASK [Evaluate groups - g_lb_hosts required] *********************************** skipping: [localhost] TASK [Evaluate groups - g_nfs_hosts required] ********************************** skipping: [localhost] TASK [Evaluate groups - g_nfs_hosts is single host] **************************** skipping: [localhost] TASK [Evaluate groups - g_glusterfs_hosts required] **************************** skipping: [localhost] TASK [Evaluate groups - Fail if no etcd hosts group is defined] **************** skipping: [localhost] TASK [Evaluate oo_all_hosts] *************************************************** ok: [localhost] => (item=node01) ok: [localhost] => (item=node02) TASK [Evaluate oo_masters] ***************************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_first_master] ************************************************ ok: [localhost] TASK [Evaluate oo_new_etcd_to_config] ****************************************** TASK [Evaluate oo_masters_to_config] ******************************************* ok: [localhost] => (item=node01) TASK [Evaluate oo_etcd_to_config] ********************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_first_etcd] ************************************************** ok: [localhost] TASK [Evaluate oo_etcd_hosts_to_upgrade] *************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_etcd_hosts_to_backup] **************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_nodes_to_config] ********************************************* ok: [localhost] => (item=node02) TASK [Add master to oo_nodes_to_config] **************************************** skipping: [localhost] => (item=node01) TASK [Evaluate oo_lb_to_config] ************************************************ TASK [Evaluate oo_nfs_to_config] *********************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_glusterfs_to_config] ***************************************** TASK [Evaluate oo_etcd_to_migrate] ********************************************* ok: [localhost] => (item=node01) PLAY [Ensure there are new_nodes] ********************************************** TASK [fail] ******************************************************************** skipping: [localhost] TASK [fail] ******************************************************************** skipping: [localhost] PLAY [Initialization Checkpoint Start] ***************************************** TASK [Set install initialization 'In Progress'] ******************************** ok: [node01] PLAY [Populate config host groups] ********************************************* TASK [Load group name mapping variables] *************************************** ok: [localhost] TASK [Evaluate groups - g_etcd_hosts or g_new_etcd_hosts required] ************* skipping: [localhost] TASK [Evaluate groups - g_master_hosts or g_new_master_hosts required] ********* skipping: [localhost] TASK [Evaluate groups - g_node_hosts or g_new_node_hosts required] ************* skipping: [localhost] TASK [Evaluate groups - g_lb_hosts required] *********************************** skipping: [localhost] TASK [Evaluate groups - g_nfs_hosts required] ********************************** skipping: [localhost] TASK [Evaluate groups - g_nfs_hosts is single host] **************************** skipping: [localhost] TASK [Evaluate groups - g_glusterfs_hosts required] **************************** skipping: [localhost] TASK [Evaluate groups - Fail if no etcd hosts group is defined] **************** skipping: [localhost] TASK [Evaluate oo_all_hosts] *************************************************** ok: [localhost] => (item=node01) ok: [localhost] => (item=node02) TASK [Evaluate oo_masters] ***************************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_first_master] ************************************************ ok: [localhost] TASK [Evaluate oo_new_etcd_to_config] ****************************************** TASK [Evaluate oo_masters_to_config] ******************************************* ok: [localhost] => (item=node01) TASK [Evaluate oo_etcd_to_config] ********************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_first_etcd] ************************************************** ok: [localhost] TASK [Evaluate oo_etcd_hosts_to_upgrade] *************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_etcd_hosts_to_backup] **************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_nodes_to_config] ********************************************* ok: [localhost] => (item=node02) TASK [Add master to oo_nodes_to_config] **************************************** skipping: [localhost] => (item=node01) TASK [Evaluate oo_lb_to_config] ************************************************ TASK [Evaluate oo_nfs_to_config] *********************************************** ok: [localhost] => (item=node01) TASK [Evaluate oo_glusterfs_to_config] ***************************************** TASK [Evaluate oo_etcd_to_migrate] ********************************************* ok: [localhost] => (item=node01) [WARNING]: Could not match supplied host pattern, ignoring: oo_lb_to_config PLAY [Ensure that all non-node hosts are accessible] *************************** TASK [Gathering Facts] ********************************************************* ok: [node01] PLAY [Initialize basic host facts] ********************************************* TASK [Gathering Facts] ********************************************************* ok: [node01] ok: [node02] TASK [openshift_sanitize_inventory : include_tasks] **************************** included: /usr/share/ansible/openshift-ansible/roles/openshift_sanitize_inventory/tasks/deprecations.yml for node01, node02 TASK [openshift_sanitize_inventory : Check for usage of deprecated variables] *** ok: [node01] ok: [node02] TASK [openshift_sanitize_inventory : debug] ************************************ skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : set_stats] ******************************** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Assign deprecated variables to correct counterparts] *** included: /usr/share/ansible/openshift-ansible/roles/openshift_sanitize_inventory/tasks/__deprecations_logging.yml for node01, node02 included: /usr/share/ansible/openshift-ansible/roles/openshift_sanitize_inventory/tasks/__deprecations_metrics.yml for node01, node02 TASK [openshift_sanitize_inventory : conditional_set_fact] ********************* ok: [node01] ok: [node02] TASK [openshift_sanitize_inventory : set_fact] ********************************* ok: [node01] ok: [node02] TASK [openshift_sanitize_inventory : conditional_set_fact] ********************* ok: [node01] ok: [node02] TASK [openshift_sanitize_inventory : Standardize on latest variable names] ***** ok: [node02] ok: [node01] TASK [openshift_sanitize_inventory : Normalize openshift_release] ************** skipping: [node02] skipping: [node01] TASK [openshift_sanitize_inventory : Abort when openshift_release is invalid] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : include_tasks] **************************** included: /usr/share/ansible/openshift-ansible/roles/openshift_sanitize_inventory/tasks/unsupported.yml for node01, node02 TASK [openshift_sanitize_inventory : Ensure that openshift_use_dnsmasq is true] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure that openshift_node_dnsmasq_install_network_manager_hook is true] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : set_fact] ********************************* skipping: [node01] => (item=None) skipping: [node02] => (item=None) TASK [openshift_sanitize_inventory : Ensure that dynamic provisioning is set if using dynamic storage] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure the hosted registry's GlusterFS storage is configured correctly] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure the hosted registry's GlusterFS storage is configured correctly] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure clusterid is set along with the cloudprovider] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure ansible_service_broker_remove and ansible_service_broker_install are mutually exclusive] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure template_service_broker_remove and template_service_broker_install are mutually exclusive] *** skipping: [node01] skipping: [node02] TASK [openshift_sanitize_inventory : Ensure that all requires vsphere configuration variables are set] *** skipping: [node01] skipping: [node02] TASK [Detecting Operating System from ostree_booted] *************************** ok: [node02] ok: [node01] TASK [set openshift_deployment_type if unset] ********************************** skipping: [node01] skipping: [node02] TASK [initialize_facts set fact openshift_is_atomic and openshift_is_containerized] *** ok: [node01] ok: [node02] TASK [Determine Atomic Host Docker Version] ************************************ skipping: [node01] skipping: [node02] TASK [assert atomic host docker version is 1.12 or later] ********************** skipping: [node01] skipping: [node02] PLAY [Initialize special first-master variables] ******************************* TASK [Gathering Facts] ********************************************************* ok: [node01] TASK [set_fact] **************************************************************** ok: [node01] PLAY [Disable web console if required] ***************************************** TASK [set_fact] **************************************************************** skipping: [node01] PLAY [Install packages necessary for installer] ******************************** TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [Ensure openshift-ansible installer package deps are installed] *********** ok: [node02] => (item=iproute) ok: [node02] => (item=dbus-python) ok: [node02] => (item=PyYAML) ok: [node02] => (item=python-ipaddress) ok: [node02] => (item=yum-utils) TASK [Ensure various deps for running system containers are installed] ********* skipping: [node02] => (item=atomic) skipping: [node02] => (item=ostree) skipping: [node02] => (item=runc) PLAY [Initialize cluster facts] ************************************************ TASK [Gathering Facts] ********************************************************* ok: [node01] ok: [node02] TASK [Gather Cluster facts] **************************************************** ok: [node01] changed: [node02] TASK [Set fact of no_proxy_internal_hostnames] ********************************* skipping: [node01] skipping: [node02] TASK [Initialize openshift.node.sdn_mtu] *************************************** ok: [node01] ok: [node02] PLAY [Determine openshift_version to configure on first master] **************** TASK [Gathering Facts] ********************************************************* skipping: [node01] TASK [include_role] ************************************************************ skipping: [node01] TASK [debug] ******************************************************************* skipping: [node01] PLAY [Set openshift_version for etcd, node, and master hosts] ****************** skipping: no hosts matched PLAY [Ensure the requested version packages are available.] ******************** skipping: no hosts matched PLAY [Verify Requirements] ***************************************************** TASK [Gathering Facts] ********************************************************* ok: [node01] TASK [Run variable sanity checks] ********************************************** ok: [node01] PLAY [Initialization Checkpoint End] ******************************************* TASK [Set install initialization 'Complete'] *********************************** ok: [node01] PLAY [Validate node hostnames] ************************************************* TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [Query DNS for IP address of node02] ************************************** ok: [node02] TASK [Validate openshift_hostname when defined] ******************************** skipping: [node02] TASK [Validate openshift_ip exists on node when defined] *********************** skipping: [node02] PLAY [Setup yum repositories for all hosts] ************************************ TASK [rhel_subscribe : fail] *************************************************** skipping: [node02] TASK [rhel_subscribe : Install Red Hat Subscription manager] ******************* skipping: [node02] TASK [rhel_subscribe : Is host already registered?] **************************** skipping: [node02] TASK [rhel_subscribe : Register host] ****************************************** skipping: [node02] TASK [rhel_subscribe : fail] *************************************************** skipping: [node02] TASK [rhel_subscribe : Determine if OpenShift Pool Already Attached] *********** skipping: [node02] TASK [rhel_subscribe : Attach to OpenShift Pool] ******************************* skipping: [node02] TASK [rhel_subscribe : include_tasks] ****************************************** skipping: [node02] TASK [openshift_repos : openshift_repos detect ostree] ************************* ok: [node02] TASK [openshift_repos : Ensure libselinux-python is installed] ***************** ok: [node02] TASK [openshift_repos : Remove openshift_additional.repo file] ***************** ok: [node02] TASK [openshift_repos : Create any additional repos that are defined] ********** TASK [openshift_repos : include_tasks] ***************************************** skipping: [node02] TASK [openshift_repos : include_tasks] ***************************************** included: /usr/share/ansible/openshift-ansible/roles/openshift_repos/tasks/centos_repos.yml for node02 TASK [openshift_repos : Configure origin gpg keys] ***************************** ok: [node02] TASK [openshift_repos : Configure correct origin release repository] *********** ok: [node02] => (item=/usr/share/ansible/openshift-ansible/roles/openshift_repos/templates/CentOS-OpenShift-Origin.repo.j2) TASK [openshift_repos : Ensure clean repo cache in the event repos have been changed manually] *** changed: [node02] => { "msg": "First run of openshift_repos" } TASK [openshift_repos : Record that openshift_repos already ran] *************** ok: [node02] RUNNING HANDLER [openshift_repos : refresh cache] ****************************** changed: [node02] PLAY [Configure os_firewall] *************************************************** TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [os_firewall : Detecting Atomic Host Operating System] ******************** ok: [node02] TASK [os_firewall : Set fact r_os_firewall_is_atomic] ************************** ok: [node02] TASK [os_firewall : include_tasks] ********************************************* skipping: [node02] TASK [os_firewall : include_tasks] ********************************************* included: /usr/share/ansible/openshift-ansible/roles/os_firewall/tasks/iptables.yml for node02 TASK [os_firewall : Ensure firewalld service is not enabled] ******************* ok: [node02] TASK [os_firewall : Wait 10 seconds after disabling firewalld] ***************** skipping: [node02] TASK [os_firewall : Install iptables packages] ********************************* ok: [node02] => (item=iptables) ok: [node02] => (item=iptables-services) TASK [os_firewall : Start and enable iptables service] ************************* ok: [node02 -> node02] => (item=node02) TASK [os_firewall : need to pause here, otherwise the iptables service starting can sometimes cause ssh to fail] *** skipping: [node02] PLAY [create oo_hosts_containerized_managed_true host group] ******************* TASK [Gathering Facts] ********************************************************* ok: [node01] TASK [group_by] **************************************************************** ok: [node01] PLAY [oo_nodes_to_config] ****************************************************** TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [container_runtime : Setup the docker-storage for overlay] **************** skipping: [node02] PLAY [create oo_hosts_containerized_managed_true host group] ******************* TASK [Gathering Facts] ********************************************************* ok: [node01] TASK [group_by] **************************************************************** ok: [node01] PLAY [oo_nodes_to_config] ****************************************************** TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [openshift_excluder : Install excluders] ********************************** included: /usr/share/ansible/openshift-ansible/roles/openshift_excluder/tasks/install.yml for node02 TASK [openshift_excluder : Install docker excluder - yum] ********************** ok: [node02] TASK [openshift_excluder : Install docker excluder - dnf] ********************** skipping: [node02] TASK [openshift_excluder : Install openshift excluder - yum] ******************* skipping: [node02] TASK [openshift_excluder : Install openshift excluder - dnf] ******************* skipping: [node02] TASK [openshift_excluder : set_fact] ******************************************* ok: [node02] TASK [openshift_excluder : Enable excluders] *********************************** included: /usr/share/ansible/openshift-ansible/roles/openshift_excluder/tasks/exclude.yml for node02 TASK [openshift_excluder : Check for docker-excluder] ************************** ok: [node02] TASK [openshift_excluder : Enable docker excluder] ***************************** changed: [node02] TASK [openshift_excluder : Check for openshift excluder] *********************** ok: [node02] TASK [openshift_excluder : Enable openshift excluder] ************************** skipping: [node02] TASK [container_runtime : include_tasks] *************************************** included: /usr/share/ansible/openshift-ansible/roles/container_runtime/tasks/common/pre.yml for node02 TASK [container_runtime : include_tasks] *************************************** skipping: [node02] TASK [container_runtime : Add enterprise registry, if necessary] *************** skipping: [node02] TASK [container_runtime : include_tasks] *************************************** skipping: [node02] TASK [container_runtime : Get current installed Docker version] **************** ok: [node02] TASK [container_runtime : include_tasks] *************************************** included: /usr/share/ansible/openshift-ansible/roles/container_runtime/tasks/docker_sanity.yml for node02 TASK [container_runtime : Error out if Docker pre-installed but too old] ******* skipping: [node02] TASK [container_runtime : Error out if requested Docker is too old] ************ skipping: [node02] TASK [container_runtime : Fail if Docker version requested but downgrade is required] *** skipping: [node02] TASK [container_runtime : Error out if attempting to upgrade Docker across the 1.10 boundary] *** skipping: [node02] TASK [container_runtime : Install Docker] ************************************** skipping: [node02] TASK [container_runtime : Ensure docker.service.d directory exists] ************ ok: [node02] TASK [container_runtime : Configure Docker service unit file] ****************** ok: [node02] TASK [container_runtime : stat] ************************************************ ok: [node02] TASK [container_runtime : Set registry params] ********************************* skipping: [node02] => (item={u'reg_conf_var': u'ADD_REGISTRY', u'reg_flag': u'--add-registry', u'reg_fact_val': []}) skipping: [node02] => (item={u'reg_conf_var': u'BLOCK_REGISTRY', u'reg_flag': u'--block-registry', u'reg_fact_val': []}) skipping: [node02] => (item={u'reg_conf_var': u'INSECURE_REGISTRY', u'reg_flag': u'--insecure-registry', u'reg_fact_val': []}) TASK [container_runtime : Place additional/blocked/insecure registries in /etc/containers/registries.conf] *** skipping: [node02] TASK [container_runtime : Set Proxy Settings] ********************************** skipping: [node02] => (item={u'reg_conf_var': u'HTTP_PROXY', u'reg_fact_val': u''}) skipping: [node02] => (item={u'reg_conf_var': u'HTTPS_PROXY', u'reg_fact_val': u''}) skipping: [node02] => (item={u'reg_conf_var': u'NO_PROXY', u'reg_fact_val': u''}) TASK [container_runtime : Set various Docker options] ************************** ok: [node02] TASK [container_runtime : stat] ************************************************ ok: [node02] TASK [container_runtime : Configure Docker Network OPTIONS] ******************** ok: [node02] TASK [container_runtime : Detect if docker is already started] ***************** ok: [node02] TASK [container_runtime : Start the Docker service] **************************** ok: [node02] TASK [container_runtime : set_fact] ******************************************** ok: [node02] TASK [container_runtime : include_tasks] *************************************** included: /usr/share/ansible/openshift-ansible/roles/container_runtime/tasks/common/post.yml for node02 TASK [container_runtime : Ensure /var/lib/containers exists] ******************* ok: [node02] TASK [container_runtime : Fix SELinux Permissions on /var/lib/containers] ****** ok: [node02] TASK [container_runtime : include_tasks] *************************************** included: /usr/share/ansible/openshift-ansible/roles/container_runtime/tasks/registry_auth.yml for node02 TASK [container_runtime : Check for credentials file for registry auth] ******** skipping: [node02] TASK [container_runtime : Create credentials for docker cli registry auth] ***** skipping: [node02] TASK [container_runtime : Create credentials for docker cli registry auth (alternative)] *** skipping: [node02] TASK [container_runtime : stat the docker data dir] **************************** ok: [node02] TASK [container_runtime : include_tasks] *************************************** skipping: [node02] TASK [container_runtime : Fail quickly if openshift_docker_options are set] **** skipping: [node02] TASK [container_runtime : include_tasks] *************************************** skipping: [node02] TASK [container_runtime : include_tasks] *************************************** skipping: [node02] TASK [container_runtime : Install Docker so we can use the client] ************* skipping: [node02] TASK [container_runtime : Disable Docker] ************************************** skipping: [node02] TASK [container_runtime : Ensure proxies are in the atomic.conf] *************** skipping: [node02] TASK [container_runtime : debug] *********************************************** skipping: [node02] TASK [container_runtime : include_tasks] *************************************** skipping: [node02] TASK [container_runtime : Pre-pull Container Engine System Container image] **** skipping: [node02] TASK [container_runtime : Ensure container-engine.service.d directory exists] *** skipping: [node02] TASK [container_runtime : Ensure /etc/docker directory exists] ***************** skipping: [node02] TASK [container_runtime : Install Container Engine System Container] *********** skipping: [node02] TASK [container_runtime : Configure Container Engine Service File] ************* skipping: [node02] TASK [container_runtime : Configure Container Engine] ************************** skipping: [node02] TASK [container_runtime : Start the Container Engine service] ****************** skipping: [node02] TASK [container_runtime : set_fact] ******************************************** skipping: [node02] TASK [container_runtime : include_tasks] *************************************** skipping: [node02] TASK [container_runtime : Check we are not using node as a Docker container with CRI-O] *** skipping: [node02] TASK [container_runtime : include_tasks] *************************************** included: /usr/share/ansible/openshift-ansible/roles/container_runtime/tasks/common/pre.yml for node02 TASK [container_runtime : include_tasks] *************************************** skipping: [node02] TASK [container_runtime : Add enterprise registry, if necessary] *************** skipping: [node02] TASK [container_runtime : include_tasks] *************************************** included: /usr/share/ansible/openshift-ansible/roles/container_runtime/tasks/common/syscontainer_packages.yml for node02 TASK [container_runtime : Ensure container-selinux is installed] *************** ok: [node02] TASK [container_runtime : Ensure atomic is installed] ************************** ok: [node02] TASK [container_runtime : Ensure runc is installed] **************************** ok: [node02] TASK [container_runtime : Check that overlay is in the kernel] ***************** changed: [node02] TASK [container_runtime : Add overlay to modprobe.d] *************************** skipping: [node02] TASK [container_runtime : Manually modprobe overlay into the kernel] *********** skipping: [node02] TASK [container_runtime : Enable and start systemd-modules-load] *************** skipping: [node02] TASK [container_runtime : Ensure proxies are in the atomic.conf] *************** included: /usr/share/ansible/openshift-ansible/roles/container_runtime/tasks/common/atomic_proxy.yml for node02 TASK [container_runtime : Add http_proxy to /etc/atomic.conf] ****************** skipping: [node02] TASK [container_runtime : Add https_proxy to /etc/atomic.conf] ***************** skipping: [node02] TASK [container_runtime : Add no_proxy to /etc/atomic.conf] ******************** skipping: [node02] TASK [container_runtime : debug] *********************************************** ok: [node02] => { "l_crio_image": "docker.io/kubevirtci/crio:1.9.10" } TASK [container_runtime : Pre-pull CRI-O System Container image] *************** ok: [node02] TASK [container_runtime : Install CRI-O System Container] ********************** ok: [node02] TASK [container_runtime : Remove CRI-O default configuration files] ************ ok: [node02] => (item=/etc/cni/net.d/200-loopback.conf) ok: [node02] => (item=/etc/cni/net.d/100-crio-bridge.conf) TASK [container_runtime : Create the CRI-O configuration] ********************** ok: [node02] TASK [container_runtime : Ensure CNI configuration directory exists] *********** ok: [node02] TASK [container_runtime : Add iptables allow rules] **************************** ok: [node02] => (item={u'port': u'10010/tcp', u'service': u'crio'}) TASK [container_runtime : Remove iptables rules] ******************************* TASK [container_runtime : Add firewalld allow rules] *************************** skipping: [node02] => (item={u'port': u'10010/tcp', u'service': u'crio'}) TASK [container_runtime : Remove firewalld allow rules] ************************ TASK [container_runtime : Configure the CNI network] *************************** ok: [node02] TASK [container_runtime : Create /etc/sysconfig/crio-storage] ****************** ok: [node02] TASK [container_runtime : Create /etc/sysconfig/crio-network] ****************** ok: [node02] TASK [container_runtime : Start the CRI-O service] ***************************** ok: [node02] TASK [container_runtime : include_tasks] *************************************** included: /usr/share/ansible/openshift-ansible/roles/container_runtime/tasks/common/post.yml for node02 TASK [container_runtime : Ensure /var/lib/containers exists] ******************* ok: [node02] TASK [container_runtime : Fix SELinux Permissions on /var/lib/containers] ****** ok: [node02] TASK [container_runtime : include_tasks] *************************************** included: /usr/share/ansible/openshift-ansible/roles/container_runtime/tasks/registry_auth.yml for node02 TASK [container_runtime : Check for credentials file for registry auth] ******** skipping: [node02] TASK [container_runtime : Create credentials for docker cli registry auth] ***** skipping: [node02] TASK [container_runtime : Create credentials for docker cli registry auth (alternative)] *** skipping: [node02] TASK [container_runtime : stat the docker data dir] **************************** ok: [node02] TASK [container_runtime : include_tasks] *************************************** skipping: [node02] PLAY [Determine openshift_version to configure on first master] **************** TASK [Gathering Facts] ********************************************************* ok: [node01] TASK [include_role] ************************************************************ TASK [openshift_version : Use openshift.common.version fact as version to configure if already installed] *** ok: [node01] TASK [openshift_version : include_tasks] *************************************** included: /usr/share/ansible/openshift-ansible/roles/openshift_version/tasks/first_master_rpm_version.yml for node01 TASK [openshift_version : Set rpm version to configure if openshift_pkg_version specified] *** skipping: [node01] TASK [openshift_version : Set openshift_version for rpm installation] ********** included: /usr/share/ansible/openshift-ansible/roles/openshift_version/tasks/check_available_rpms.yml for node01 TASK [openshift_version : Get available origin version] ************************ ok: [node01] TASK [openshift_version : fail] ************************************************ skipping: [node01] TASK [openshift_version : set_fact] ******************************************** skipping: [node01] TASK [openshift_version : debug] *********************************************** ok: [node01] TASK [openshift_version : set_fact] ******************************************** ok: [node01] TASK [openshift_version : debug] *********************************************** skipping: [node01] TASK [openshift_version : set_fact] ******************************************** skipping: [node01] TASK [openshift_version : debug] *********************************************** ok: [node01] TASK [openshift_version : debug] *********************************************** ok: [node01] TASK [openshift_version : debug] *********************************************** ok: [node01] TASK [openshift_version : debug] *********************************************** ok: [node01] TASK [debug] ******************************************************************* ok: [node01] => { "msg": "openshift_pkg_version set to -3.9.0" } PLAY [Set openshift_version for etcd, node, and master hosts] ****************** TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [set_fact] **************************************************************** ok: [node02] PLAY [Ensure the requested version packages are available.] ******************** TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [include_role] ************************************************************ TASK [openshift_version : Check openshift_version for rpm installation] ******** included: /usr/share/ansible/openshift-ansible/roles/openshift_version/tasks/check_available_rpms.yml for node02 TASK [openshift_version : Get available origin version] ************************ ok: [node02] TASK [openshift_version : fail] ************************************************ skipping: [node02] TASK [openshift_version : Fail if rpm version and docker image version are different] *** skipping: [node02] TASK [openshift_version : For an RPM install, abort when the release requested does not match the available version.] *** skipping: [node02] TASK [openshift_version : debug] *********************************************** ok: [node02] => { "openshift_release": "VARIABLE IS NOT DEFINED!" } TASK [openshift_version : debug] *********************************************** ok: [node02] => { "openshift_image_tag": "v3.9.0" } TASK [openshift_version : debug] *********************************************** ok: [node02] => { "openshift_pkg_version": "-3.9.0" } PLAY [Node Install Checkpoint Start] ******************************************* TASK [Set Node install 'In Progress'] ****************************************** ok: [node01] PLAY [Create OpenShift certificates for node hosts] **************************** TASK [openshift_node_certificates : Ensure CA certificate exists on openshift_ca_host] *** ok: [node02 -> node01] TASK [openshift_node_certificates : fail] ************************************** skipping: [node02] TASK [openshift_node_certificates : Check status of node certificates] ********* ok: [node02] => (item=system:node:node02.crt) ok: [node02] => (item=system:node:node02.key) ok: [node02] => (item=system:node:node02.kubeconfig) ok: [node02] => (item=ca.crt) ok: [node02] => (item=server.key) ok: [node02] => (item=server.crt) TASK [openshift_node_certificates : set_fact] ********************************** ok: [node02] TASK [openshift_node_certificates : Create openshift_generated_configs_dir if it does not exist] *** ok: [node02 -> node01] TASK [openshift_node_certificates : find] ************************************** ok: [node02 -> node01] TASK [openshift_node_certificates : Generate the node client config] *********** changed: [node02 -> node01] => (item=node02) TASK [openshift_node_certificates : Generate the node server certificate] ****** changed: [node02 -> node01] => (item=node02) TASK [openshift_node_certificates : Create a tarball of the node config directories] *** changed: [node02 -> node01] TASK [openshift_node_certificates : Retrieve the node config tarballs from the master] *** changed: [node02 -> node01] TASK [openshift_node_certificates : Ensure certificate directory exists] ******* ok: [node02] TASK [openshift_node_certificates : Unarchive the tarball on the node] ********* changed: [node02] TASK [openshift_node_certificates : Delete local temp directory] *************** ok: [node02 -> localhost] TASK [openshift_node_certificates : Copy OpenShift CA to system CA trust] ****** ok: [node02] => (item={u'cert': u'/etc/origin/node/ca.crt', u'id': u'openshift'}) PLAY [Disable excluders] ******************************************************* TASK [openshift_excluder : Detecting Atomic Host Operating System] ************* ok: [node02] TASK [openshift_excluder : Debug r_openshift_excluder_enable_docker_excluder] *** ok: [node02] => { "r_openshift_excluder_enable_docker_excluder": true } TASK [openshift_excluder : Debug r_openshift_excluder_enable_openshift_excluder] *** ok: [node02] => { "r_openshift_excluder_enable_openshift_excluder": true } TASK [openshift_excluder : Fail if invalid openshift_excluder_action provided] *** skipping: [node02] TASK [openshift_excluder : Fail if r_openshift_excluder_upgrade_target is not defined] *** skipping: [node02] TASK [openshift_excluder : Include main action task file] ********************** included: /usr/share/ansible/openshift-ansible/roles/openshift_excluder/tasks/disable.yml for node02 TASK [openshift_excluder : Include verify_upgrade.yml when upgrading] ********** skipping: [node02] TASK [openshift_excluder : Disable excluders before the upgrade to remove older excluding expressions] *** included: /usr/share/ansible/openshift-ansible/roles/openshift_excluder/tasks/unexclude.yml for node02 TASK [openshift_excluder : Check for docker-excluder] ************************** ok: [node02] TASK [openshift_excluder : disable docker excluder] **************************** changed: [node02] TASK [openshift_excluder : Check for openshift excluder] *********************** ok: [node02] TASK [openshift_excluder : disable openshift excluder] ************************* changed: [node02] TASK [openshift_excluder : Include install.yml] ******************************** included: /usr/share/ansible/openshift-ansible/roles/openshift_excluder/tasks/install.yml for node02 TASK [openshift_excluder : Install docker excluder - yum] ********************** skipping: [node02] TASK [openshift_excluder : Install docker excluder - dnf] ********************** skipping: [node02] TASK [openshift_excluder : Install openshift excluder - yum] ******************* skipping: [node02] TASK [openshift_excluder : Install openshift excluder - dnf] ******************* skipping: [node02] TASK [openshift_excluder : set_fact] ******************************************* skipping: [node02] TASK [openshift_excluder : Include exclude.yml] ******************************** included: /usr/share/ansible/openshift-ansible/roles/openshift_excluder/tasks/exclude.yml for node02 TASK [openshift_excluder : Check for docker-excluder] ************************** ok: [node02] TASK [openshift_excluder : Enable docker excluder] ***************************** changed: [node02] TASK [openshift_excluder : Check for openshift excluder] *********************** ok: [node02] TASK [openshift_excluder : Enable openshift excluder] ************************** changed: [node02] TASK [openshift_excluder : Include unexclude.yml] ****************************** included: /usr/share/ansible/openshift-ansible/roles/openshift_excluder/tasks/unexclude.yml for node02 TASK [openshift_excluder : Check for docker-excluder] ************************** ok: [node02] TASK [openshift_excluder : disable docker excluder] **************************** skipping: [node02] TASK [openshift_excluder : Check for openshift excluder] *********************** ok: [node02] TASK [openshift_excluder : disable openshift excluder] ************************* changed: [node02] PLAY [Evaluate node groups] **************************************************** TASK [Gathering Facts] ********************************************************* ok: [localhost] TASK [Evaluate oo_containerized_master_nodes] ********************************** skipping: [localhost] => (item=node02) [WARNING]: Could not match supplied host pattern, ignoring: oo_containerized_master_nodes PLAY [Configure containerized nodes] ******************************************* skipping: no hosts matched PLAY [Configure nodes] ********************************************************* TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [openshift_clock : Determine if chrony is installed] ********************** [WARNING]: Consider using yum, dnf or zypper module rather than running rpm changed: [node02] TASK [openshift_clock : Install ntp package] *********************************** skipping: [node02] TASK [openshift_clock : Start and enable ntpd/chronyd] ************************* changed: [node02] TASK [openshift_cloud_provider : Set cloud provider facts] ********************* skipping: [node02] TASK [openshift_cloud_provider : Create cloudprovider config dir] ************** skipping: [node02] TASK [openshift_cloud_provider : include the defined cloud provider files] ***** skipping: [node02] TASK [openshift_node : fail] *************************************************** skipping: [node02] TASK [openshift_node : include_tasks] ****************************************** included: /usr/share/ansible/openshift-ansible/roles/openshift_node/tasks/dnsmasq_install.yml for node02 TASK [openshift_node : Check for NetworkManager service] *********************** ok: [node02] TASK [openshift_node : Set fact using_network_manager] ************************* ok: [node02] TASK [openshift_node : Install dnsmasq] **************************************** ok: [node02] TASK [openshift_node : ensure origin/node directory exists] ******************** ok: [node02] => (item=/etc/origin) changed: [node02] => (item=/etc/origin/node) TASK [openshift_node : Install node-dnsmasq.conf] ****************************** ok: [node02] TASK [openshift_node : include_tasks] ****************************************** skipping: [node02] TASK [openshift_node : include_tasks] ****************************************** included: /usr/share/ansible/openshift-ansible/roles/openshift_node/tasks/dnsmasq.yml for node02 TASK [openshift_node : Install dnsmasq configuration] ************************** ok: [node02] TASK [openshift_node : Deploy additional dnsmasq.conf] ************************* skipping: [node02] TASK [openshift_node : Enable dnsmasq] ***************************************** ok: [node02] TASK [openshift_node : include_tasks] ****************************************** included: /usr/share/ansible/openshift-ansible/roles/openshift_node/tasks/dnsmasq/network-manager.yml for node02 TASK [openshift_node : Install network manager dispatch script] **************** ok: [node02] TASK [openshift_node : Add iptables allow rules] ******************************* ok: [node02] => (item={u'port': u'10250/tcp', u'service': u'Kubernetes kubelet'}) ok: [node02] => (item={u'port': u'80/tcp', u'service': u'http'}) ok: [node02] => (item={u'port': u'443/tcp', u'service': u'https'}) ok: [node02] => (item={u'cond': u'openshift_use_openshift_sdn | bool', u'port': u'4789/udp', u'service': u'OpenShift OVS sdn'}) skipping: [node02] => (item={u'cond': False, u'port': u'179/tcp', u'service': u'Calico BGP Port'}) skipping: [node02] => (item={u'cond': False, u'port': u'/tcp', u'service': u'Kubernetes service NodePort TCP'}) skipping: [node02] => (item={u'cond': False, u'port': u'/udp', u'service': u'Kubernetes service NodePort UDP'}) TASK [openshift_node : Remove iptables rules] ********************************** TASK [openshift_node : Add firewalld allow rules] ****************************** skipping: [node02] => (item={u'port': u'10250/tcp', u'service': u'Kubernetes kubelet'}) skipping: [node02] => (item={u'port': u'80/tcp', u'service': u'http'}) skipping: [node02] => (item={u'port': u'443/tcp', u'service': u'https'}) skipping: [node02] => (item={u'cond': u'openshift_use_openshift_sdn | bool', u'port': u'4789/udp', u'service': u'OpenShift OVS sdn'}) skipping: [node02] => (item={u'cond': False, u'port': u'179/tcp', u'service': u'Calico BGP Port'}) skipping: [node02] => (item={u'cond': False, u'port': u'/tcp', u'service': u'Kubernetes service NodePort TCP'}) skipping: [node02] => (item={u'cond': False, u'port': u'/udp', u'service': u'Kubernetes service NodePort UDP'}) TASK [openshift_node : Remove firewalld allow rules] *************************** TASK [openshift_node : Update journald config] ********************************* included: /usr/share/ansible/openshift-ansible/roles/openshift_node/tasks/journald.yml for node02 TASK [openshift_node : Checking for journald.conf] ***************************** ok: [node02] TASK [openshift_node : Create journald persistence directories] **************** ok: [node02] TASK [openshift_node : Update journald setup] ********************************** ok: [node02] => (item={u'var': u'Storage', u'val': u'persistent'}) ok: [node02] => (item={u'var': u'Compress', u'val': True}) ok: [node02] => (item={u'var': u'SyncIntervalSec', u'val': u'1s'}) ok: [node02] => (item={u'var': u'RateLimitInterval', u'val': u'1s'}) ok: [node02] => (item={u'var': u'RateLimitBurst', u'val': 10000}) ok: [node02] => (item={u'var': u'SystemMaxUse', u'val': u'8G'}) ok: [node02] => (item={u'var': u'SystemKeepFree', u'val': u'20%'}) ok: [node02] => (item={u'var': u'SystemMaxFileSize', u'val': u'10M'}) ok: [node02] => (item={u'var': u'MaxRetentionSec', u'val': u'1month'}) ok: [node02] => (item={u'var': u'MaxFileSec', u'val': u'1day'}) ok: [node02] => (item={u'var': u'ForwardToSyslog', u'val': False}) ok: [node02] => (item={u'var': u'ForwardToWall', u'val': False}) TASK [openshift_node : Restart journald] *************************************** skipping: [node02] TASK [openshift_node : Disable swap] ******************************************* ok: [node02] TASK [openshift_node : include node installer] ********************************* included: /usr/share/ansible/openshift-ansible/roles/openshift_node/tasks/install.yml for node02 TASK [openshift_node : Install Node package, sdn-ovs, conntrack packages] ****** ok: [node02] => (item={u'name': u'origin-node-3.9.0'}) ok: [node02] => (item={u'name': u'origin-sdn-ovs-3.9.0', u'install': True}) ok: [node02] => (item={u'name': u'conntrack-tools'}) TASK [openshift_node : Pre-pull node image when containerized] ***************** skipping: [node02] TASK [openshift_node : Restart cri-o] ****************************************** changed: [node02] TASK [openshift_node : restart NetworkManager to ensure resolv.conf is present] *** skipping: [node02] TASK [openshift_node : sysctl] ************************************************* ok: [node02] TASK [openshift_node : include_tasks] ****************************************** included: /usr/share/ansible/openshift-ansible/roles/openshift_node/tasks/registry_auth.yml for node02 TASK [openshift_node : Check for credentials file for registry auth] *********** skipping: [node02] TASK [openshift_node : Create credentials for registry auth] ******************* skipping: [node02] TASK [openshift_node : Create credentials for registry auth (alternative)] ***** skipping: [node02] TASK [openshift_node : Setup ro mount of /root/.docker for containerized hosts] *** skipping: [node02] TASK [openshift_node : include standard node config] *************************** included: /usr/share/ansible/openshift-ansible/roles/openshift_node/tasks/config.yml for node02 TASK [openshift_node : Install the systemd units] ****************************** included: /usr/share/ansible/openshift-ansible/roles/openshift_node/tasks/systemd_units.yml for node02 TASK [openshift_node : Install Node service file] ****************************** ok: [node02] TASK [openshift_node : include node deps docker service file] ****************** skipping: [node02] TASK [openshift_node : include ovs service environment file] ******************* skipping: [node02] TASK [openshift_node : include_tasks] ****************************************** skipping: [node02] TASK [openshift_node : include_tasks] ****************************************** included: /usr/share/ansible/openshift-ansible/roles/openshift_node/tasks/config/configure-node-settings.yml for node02 TASK [openshift_node : Configure Node settings] ******************************** ok: [node02] => (item={u'regex': u'^OPTIONS=', u'line': u'OPTIONS=--loglevel=2 '}) ok: [node02] => (item={u'regex': u'^CONFIG_FILE=', u'line': u'CONFIG_FILE=/etc/origin/node/node-config.yaml'}) ok: [node02] => (item={u'regex': u'^IMAGE_VERSION=', u'line': u'IMAGE_VERSION=v3.9.0'}) TASK [openshift_node : include_tasks] ****************************************** included: /usr/share/ansible/openshift-ansible/roles/openshift_node/tasks/config/configure-proxy-settings.yml for node02 TASK [openshift_node : Configure Proxy Settings] ******************************* skipping: [node02] => (item={u'regex': u'^HTTP_PROXY=', u'line': u'HTTP_PROXY='}) skipping: [node02] => (item={u'regex': u'^HTTPS_PROXY=', u'line': u'HTTPS_PROXY='}) skipping: [node02] => (item={u'regex': u'^NO_PROXY=', u'line': u'NO_PROXY=[],172.30.0.0/16,10.128.0.0/14'}) TASK [openshift_node : Pull container images] ********************************** skipping: [node02] TASK [openshift_node : Start and enable openvswitch service] ******************* skipping: [node02] TASK [openshift_node : set_fact] *********************************************** ok: [node02] TASK [openshift_node : file] *************************************************** skipping: [node02] TASK [openshift_node : Create the Node config] ********************************* changed: [node02] TASK [openshift_node : Configure Node Environment Variables] ******************* TASK [openshift_node : Configure AWS Cloud Provider Settings] ****************** skipping: [node02] => (item=None) skipping: [node02] => (item=None) TASK [openshift_node : Wait for master API to become available before proceeding] *** skipping: [node02] TASK [openshift_node : Start and enable node dep] ****************************** skipping: [node02] TASK [openshift_node : Start and enable node] ********************************** ok: [node02] TASK [openshift_node : Dump logs from node service if it failed] *************** skipping: [node02] TASK [openshift_node : Abort if node failed to start] ************************** skipping: [node02] TASK [openshift_node : set_fact] *********************************************** ok: [node02] TASK [openshift_node : NFS storage plugin configuration] *********************** included: /usr/share/ansible/openshift-ansible/roles/openshift_node/tasks/storage_plugins/nfs.yml for node02 TASK [openshift_node : Install NFS storage plugin dependencies] **************** ok: [node02] TASK [openshift_node : Check for existence of nfs sebooleans] ****************** ok: [node02] => (item=virt_use_nfs) ok: [node02] => (item=virt_sandbox_use_nfs) TASK [openshift_node : Set seboolean to allow nfs storage plugin access from containers] *** ok: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-06-05 20:20:27.960721', '_ansible_no_log': False, u'stdout': u'virt_use_nfs --> on', u'cmd': [u'getsebool', u'virt_use_nfs'], u'rc': 0, 'item': u'virt_use_nfs', u'delta': u'0:00:00.031026', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'creates': None, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_use_nfs', u'removes': None, u'warn': True, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_use_nfs --> on'], 'failed_when_result': False, u'start': u'2018-06-05 20:20:27.929695', '_ansible_ignore_errors': None, 'failed': False}) skipping: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-06-05 20:20:29.349862', '_ansible_no_log': False, u'stdout': u'virt_use_nfs --> on', u'cmd': [u'getsebool', u'virt_sandbox_use_nfs'], u'rc': 0, 'item': u'virt_sandbox_use_nfs', u'delta': u'0:00:00.008874', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'creates': None, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_sandbox_use_nfs', u'removes': None, u'warn': True, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_use_nfs --> on'], 'failed_when_result': False, u'start': u'2018-06-05 20:20:29.340988', '_ansible_ignore_errors': None, 'failed': False}) TASK [openshift_node : Set seboolean to allow nfs storage plugin access from containers (python 3)] *** skipping: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-06-05 20:20:27.960721', '_ansible_no_log': False, u'stdout': u'virt_use_nfs --> on', u'cmd': [u'getsebool', u'virt_use_nfs'], u'rc': 0, 'item': u'virt_use_nfs', u'delta': u'0:00:00.031026', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'creates': None, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_use_nfs', u'removes': None, u'warn': True, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_use_nfs --> on'], 'failed_when_result': False, u'start': u'2018-06-05 20:20:27.929695', '_ansible_ignore_errors': None, 'failed': False}) skipping: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-06-05 20:20:29.349862', '_ansible_no_log': False, u'stdout': u'virt_use_nfs --> on', u'cmd': [u'getsebool', u'virt_sandbox_use_nfs'], u'rc': 0, 'item': u'virt_sandbox_use_nfs', u'delta': u'0:00:00.008874', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'creates': None, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_sandbox_use_nfs', u'removes': None, u'warn': True, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_use_nfs --> on'], 'failed_when_result': False, u'start': u'2018-06-05 20:20:29.340988', '_ansible_ignore_errors': None, 'failed': False}) TASK [openshift_node : GlusterFS storage plugin configuration] ***************** included: /usr/share/ansible/openshift-ansible/roles/openshift_node/tasks/storage_plugins/glusterfs.yml for node02 TASK [openshift_node : Install GlusterFS storage plugin dependencies] ********** ok: [node02] TASK [openshift_node : Check for existence of fusefs sebooleans] *************** ok: [node02] => (item=virt_use_fusefs) ok: [node02] => (item=virt_sandbox_use_fusefs) TASK [openshift_node : Set seboolean to allow gluster storage plugin access from containers] *** ok: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-06-05 20:20:36.905856', '_ansible_no_log': False, u'stdout': u'virt_use_fusefs --> on', u'cmd': [u'getsebool', u'virt_use_fusefs'], u'rc': 0, 'item': u'virt_use_fusefs', u'delta': u'0:00:00.008751', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'creates': None, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_use_fusefs', u'removes': None, u'warn': True, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_use_fusefs --> on'], 'failed_when_result': False, u'start': u'2018-06-05 20:20:36.897105', '_ansible_ignore_errors': None, 'failed': False}) ok: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-06-05 20:20:38.079933', '_ansible_no_log': False, u'stdout': u'virt_sandbox_use_fusefs --> on', u'cmd': [u'getsebool', u'virt_sandbox_use_fusefs'], u'rc': 0, 'item': u'virt_sandbox_use_fusefs', u'delta': u'0:00:00.012551', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'creates': None, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_sandbox_use_fusefs', u'removes': None, u'warn': True, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_sandbox_use_fusefs --> on'], 'failed_when_result': False, u'start': u'2018-06-05 20:20:38.067382', '_ansible_ignore_errors': None, 'failed': False}) TASK [openshift_node : Set seboolean to allow gluster storage plugin access from containers (python 3)] *** skipping: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-06-05 20:20:36.905856', '_ansible_no_log': False, u'stdout': u'virt_use_fusefs --> on', u'cmd': [u'getsebool', u'virt_use_fusefs'], u'rc': 0, 'item': u'virt_use_fusefs', u'delta': u'0:00:00.008751', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'creates': None, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_use_fusefs', u'removes': None, u'warn': True, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_use_fusefs --> on'], 'failed_when_result': False, u'start': u'2018-06-05 20:20:36.897105', '_ansible_ignore_errors': None, 'failed': False}) skipping: [node02] => (item={'_ansible_parsed': True, 'stderr_lines': [], '_ansible_item_result': True, u'end': u'2018-06-05 20:20:38.079933', '_ansible_no_log': False, u'stdout': u'virt_sandbox_use_fusefs --> on', u'cmd': [u'getsebool', u'virt_sandbox_use_fusefs'], u'rc': 0, 'item': u'virt_sandbox_use_fusefs', u'delta': u'0:00:00.012551', u'stderr': u'', u'changed': False, u'invocation': {u'module_args': {u'creates': None, u'executable': None, u'_uses_shell': False, u'_raw_params': u'getsebool virt_sandbox_use_fusefs', u'removes': None, u'warn': True, u'chdir': None, u'stdin': None}}, 'stdout_lines': [u'virt_sandbox_use_fusefs --> on'], 'failed_when_result': False, u'start': u'2018-06-05 20:20:38.067382', '_ansible_ignore_errors': None, 'failed': False}) TASK [openshift_node : Ceph storage plugin configuration] ********************** included: /usr/share/ansible/openshift-ansible/roles/openshift_node/tasks/storage_plugins/ceph.yml for node02 TASK [openshift_node : Install Ceph storage plugin dependencies] *************** ok: [node02] TASK [openshift_node : iSCSI storage plugin configuration] ********************* included: /usr/share/ansible/openshift-ansible/roles/openshift_node/tasks/storage_plugins/iscsi.yml for node02 TASK [openshift_node : Install iSCSI storage plugin dependencies] ************** ok: [node02] => (item=iscsi-initiator-utils) ok: [node02] => (item=device-mapper-multipath) TASK [openshift_node : restart services] *************************************** ok: [node02] => (item=multipathd) ok: [node02] => (item=rpcbind) TASK [openshift_node : Template multipath configuration] *********************** changed: [node02] TASK [openshift_node : Enable multipath] *************************************** changed: [node02] TASK [openshift_node : include_tasks] ****************************************** included: /usr/share/ansible/openshift-ansible/roles/openshift_node/tasks/config/workaround-bz1331590-ovs-oom-fix.yml for node02 TASK [openshift_node : Create OpenvSwitch service.d directory] ***************** ok: [node02] TASK [openshift_node : Install OpenvSwitch service OOM fix] ******************** ok: [node02] TASK [tuned : Check for tuned package] ***************************************** ok: [node02] TASK [tuned : Set tuned OpenShift variables] *********************************** ok: [node02] TASK [tuned : Ensure directory structure exists] ******************************* ok: [node02] => (item={'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'system_u', 'state': 'directory', 'ctime': 1526915770.737197, 'serole': 'object_r', 'gid': 0, 'mode': '0755', 'mtime': 1526915770.737197, 'owner': 'root', 'path': u'openshift', 'size': 24, 'root': u'/usr/share/ansible/openshift-ansible/roles/tuned/templates', 'setype': 'usr_t'}) ok: [node02] => (item={'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'system_u', 'state': 'directory', 'ctime': 1526915770.736197, 'serole': 'object_r', 'gid': 0, 'mode': '0755', 'mtime': 1526915770.736197, 'owner': 'root', 'path': u'openshift-control-plane', 'size': 24, 'root': u'/usr/share/ansible/openshift-ansible/roles/tuned/templates', 'setype': 'usr_t'}) ok: [node02] => (item={'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'system_u', 'state': 'directory', 'ctime': 1526915770.737197, 'serole': 'object_r', 'gid': 0, 'mode': '0755', 'mtime': 1526915770.737197, 'owner': 'root', 'path': u'openshift-node', 'size': 24, 'root': u'/usr/share/ansible/openshift-ansible/roles/tuned/templates', 'setype': 'usr_t'}) skipping: [node02] => (item={'src': u'/usr/share/ansible/openshift-ansible/roles/tuned/templates/recommend.conf', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'system_u', 'serole': 'object_r', 'ctime': 1526915770.7381968, 'state': 'file', 'gid': 0, 'mode': '0644', 'mtime': 1523891799.0, 'owner': 'root', 'path': u'recommend.conf', 'size': 268, 'root': u'/usr/share/ansible/openshift-ansible/roles/tuned/templates', 'setype': 'usr_t'}) skipping: [node02] => (item={'src': u'/usr/share/ansible/openshift-ansible/roles/tuned/templates/openshift/tuned.conf', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'system_u', 'serole': 'object_r', 'ctime': 1526915770.737197, 'state': 'file', 'gid': 0, 'mode': '0644', 'mtime': 1523891799.0, 'owner': 'root', 'path': u'openshift/tuned.conf', 'size': 593, 'root': u'/usr/share/ansible/openshift-ansible/roles/tuned/templates', 'setype': 'usr_t'}) skipping: [node02] => (item={'src': u'/usr/share/ansible/openshift-ansible/roles/tuned/templates/openshift-control-plane/tuned.conf', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'system_u', 'serole': 'object_r', 'ctime': 1526915770.736197, 'state': 'file', 'gid': 0, 'mode': '0644', 'mtime': 1523891799.0, 'owner': 'root', 'path': u'openshift-control-plane/tuned.conf', 'size': 744, 'root': u'/usr/share/ansible/openshift-ansible/roles/tuned/templates', 'setype': 'usr_t'}) skipping: [node02] => (item={'src': u'/usr/share/ansible/openshift-ansible/roles/tuned/templates/openshift-node/tuned.conf', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'system_u', 'serole': 'object_r', 'ctime': 1526915770.737197, 'state': 'file', 'gid': 0, 'mode': '0644', 'mtime': 1523891799.0, 'owner': 'root', 'path': u'openshift-node/tuned.conf', 'size': 135, 'root': u'/usr/share/ansible/openshift-ansible/roles/tuned/templates', 'setype': 'usr_t'}) TASK [tuned : Ensure files are populated from templates] *********************** skipping: [node02] => (item={'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'system_u', 'state': 'directory', 'ctime': 1526915770.737197, 'serole': 'object_r', 'gid': 0, 'mode': '0755', 'mtime': 1526915770.737197, 'owner': 'root', 'path': u'openshift', 'size': 24, 'root': u'/usr/share/ansible/openshift-ansible/roles/tuned/templates', 'setype': 'usr_t'}) skipping: [node02] => (item={'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'system_u', 'state': 'directory', 'ctime': 1526915770.736197, 'serole': 'object_r', 'gid': 0, 'mode': '0755', 'mtime': 1526915770.736197, 'owner': 'root', 'path': u'openshift-control-plane', 'size': 24, 'root': u'/usr/share/ansible/openshift-ansible/roles/tuned/templates', 'setype': 'usr_t'}) skipping: [node02] => (item={'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'system_u', 'state': 'directory', 'ctime': 1526915770.737197, 'serole': 'object_r', 'gid': 0, 'mode': '0755', 'mtime': 1526915770.737197, 'owner': 'root', 'path': u'openshift-node', 'size': 24, 'root': u'/usr/share/ansible/openshift-ansible/roles/tuned/templates', 'setype': 'usr_t'}) ok: [node02] => (item={'src': u'/usr/share/ansible/openshift-ansible/roles/tuned/templates/recommend.conf', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'system_u', 'serole': 'object_r', 'ctime': 1526915770.7381968, 'state': 'file', 'gid': 0, 'mode': '0644', 'mtime': 1523891799.0, 'owner': 'root', 'path': u'recommend.conf', 'size': 268, 'root': u'/usr/share/ansible/openshift-ansible/roles/tuned/templates', 'setype': 'usr_t'}) ok: [node02] => (item={'src': u'/usr/share/ansible/openshift-ansible/roles/tuned/templates/openshift/tuned.conf', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'system_u', 'serole': 'object_r', 'ctime': 1526915770.737197, 'state': 'file', 'gid': 0, 'mode': '0644', 'mtime': 1523891799.0, 'owner': 'root', 'path': u'openshift/tuned.conf', 'size': 593, 'root': u'/usr/share/ansible/openshift-ansible/roles/tuned/templates', 'setype': 'usr_t'}) ok: [node02] => (item={'src': u'/usr/share/ansible/openshift-ansible/roles/tuned/templates/openshift-control-plane/tuned.conf', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'system_u', 'serole': 'object_r', 'ctime': 1526915770.736197, 'state': 'file', 'gid': 0, 'mode': '0644', 'mtime': 1523891799.0, 'owner': 'root', 'path': u'openshift-control-plane/tuned.conf', 'size': 744, 'root': u'/usr/share/ansible/openshift-ansible/roles/tuned/templates', 'setype': 'usr_t'}) ok: [node02] => (item={'src': u'/usr/share/ansible/openshift-ansible/roles/tuned/templates/openshift-node/tuned.conf', 'group': u'root', 'uid': 0, 'selevel': 's0', 'seuser': 'system_u', 'serole': 'object_r', 'ctime': 1526915770.737197, 'state': 'file', 'gid': 0, 'mode': '0644', 'mtime': 1523891799.0, 'owner': 'root', 'path': u'openshift-node/tuned.conf', 'size': 135, 'root': u'/usr/share/ansible/openshift-ansible/roles/tuned/templates', 'setype': 'usr_t'}) TASK [tuned : Make tuned use the recommended tuned profile on restart] ********* changed: [node02] => (item=/etc/tuned/active_profile) changed: [node02] => (item=/etc/tuned/profile_mode) TASK [tuned : Restart tuned service] ******************************************* changed: [node02] TASK [nickhammond.logrotate : nickhammond.logrotate | Install logrotate] ******* ok: [node02] TASK [nickhammond.logrotate : nickhammond.logrotate | Setup logrotate.d scripts] *** RUNNING HANDLER [openshift_node : restart node] ******************************** changed: [node02] PLAY [create additional node network plugin groups] **************************** TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [group_by] **************************************************************** ok: [node02] TASK [group_by] **************************************************************** ok: [node02] TASK [group_by] **************************************************************** ok: [node02] TASK [group_by] **************************************************************** ok: [node02] TASK [group_by] **************************************************************** ok: [node02] [WARNING]: Could not match supplied host pattern, ignoring: oo_nodes_use_flannel [WARNING]: Could not match supplied host pattern, ignoring: oo_nodes_use_calico [WARNING]: Could not match supplied host pattern, ignoring: oo_nodes_use_contiv [WARNING]: Could not match supplied host pattern, ignoring: oo_nodes_use_kuryr PLAY [etcd_client node config] ************************************************* skipping: no hosts matched PLAY [Additional node config] ************************************************** skipping: no hosts matched PLAY [Additional node config] ************************************************** skipping: no hosts matched [WARNING]: Could not match supplied host pattern, ignoring: oo_nodes_use_nuage PLAY [Additional node config] ************************************************** skipping: no hosts matched PLAY [Configure Contiv masters] ************************************************ TASK [Gathering Facts] ********************************************************* ok: [node01] PLAY [Configure rest of Contiv nodes] ****************************************** TASK [Gathering Facts] ********************************************************* ok: [node02] ok: [node01] PLAY [Configure Kuryr node] **************************************************** skipping: no hosts matched PLAY [Additional node config] ************************************************** TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [openshift_manage_node : Wait for master API to become available before proceeding] *** skipping: [node02] TASK [openshift_manage_node : Wait for Node Registration] ********************** ok: [node02 -> node01] TASK [openshift_manage_node : include_tasks] *********************************** included: /usr/share/ansible/openshift-ansible/roles/openshift_manage_node/tasks/config.yml for node02 TASK [openshift_manage_node : Set node schedulability] ************************* ok: [node02 -> node01] TASK [openshift_manage_node : Label nodes] ************************************* changed: [node02 -> node01] TASK [Create group for deployment type] **************************************** ok: [node02] PLAY [Re-enable excluder if it was previously enabled] ************************* TASK [openshift_excluder : Detecting Atomic Host Operating System] ************* ok: [node02] TASK [openshift_excluder : Debug r_openshift_excluder_enable_docker_excluder] *** ok: [node02] => { "r_openshift_excluder_enable_docker_excluder": true } TASK [openshift_excluder : Debug r_openshift_excluder_enable_openshift_excluder] *** ok: [node02] => { "r_openshift_excluder_enable_openshift_excluder": true } TASK [openshift_excluder : Fail if invalid openshift_excluder_action provided] *** skipping: [node02] TASK [openshift_excluder : Fail if r_openshift_excluder_upgrade_target is not defined] *** skipping: [node02] TASK [openshift_excluder : Include main action task file] ********************** included: /usr/share/ansible/openshift-ansible/roles/openshift_excluder/tasks/enable.yml for node02 TASK [openshift_excluder : Install excluders] ********************************** included: /usr/share/ansible/openshift-ansible/roles/openshift_excluder/tasks/install.yml for node02 TASK [openshift_excluder : Install docker excluder - yum] ********************** skipping: [node02] TASK [openshift_excluder : Install docker excluder - dnf] ********************** skipping: [node02] TASK [openshift_excluder : Install openshift excluder - yum] ******************* skipping: [node02] TASK [openshift_excluder : Install openshift excluder - dnf] ******************* skipping: [node02] TASK [openshift_excluder : set_fact] ******************************************* skipping: [node02] TASK [openshift_excluder : Enable excluders] *********************************** included: /usr/share/ansible/openshift-ansible/roles/openshift_excluder/tasks/exclude.yml for node02 TASK [openshift_excluder : Check for docker-excluder] ************************** ok: [node02] TASK [openshift_excluder : Enable docker excluder] ***************************** changed: [node02] TASK [openshift_excluder : Check for openshift excluder] *********************** ok: [node02] TASK [openshift_excluder : Enable openshift excluder] ************************** changed: [node02] PLAY [Node Install Checkpoint End] ********************************************* TASK [Set Node install 'Complete'] ********************************************* ok: [node01] PLAY RECAP ********************************************************************* localhost : ok=25 changed=0 unreachable=0 failed=0 node01 : ok=42 changed=0 unreachable=0 failed=0 node02 : ok=208 changed=28 unreachable=0 failed=0 INSTALLER STATUS *************************************************************** Initialization : Complete (0:01:14) Node Install : Complete (0:04:47) + set +e + crio=false + grep crio /root/inventory openshift_use_crio=true openshift_crio_systemcontainer_image_override=docker.io/kubevirtci/crio:1.9.10 + '[' 0 -eq 0 ']' + crio=true + set -e + cat + ansible-playbook -i /root/inventory post_deployment_configuration --extra-vars=crio=true PLAY [new_nodes] *************************************************************** TASK [Gathering Facts] ********************************************************* ok: [node02] TASK [Restart openvswitch service] ********************************************* changed: [node02] PLAY [nodes, new_nodes] ******************************************************** TASK [Gathering Facts] ********************************************************* ok: [node01] ok: [node02] TASK [replace] ***************************************************************** changed: [node02] changed: [node01] TASK [replace] ***************************************************************** changed: [node01] changed: [node02] TASK [service] ***************************************************************** changed: [node01] changed: [node02] PLAY RECAP ********************************************************************* node01 : ok=4 changed=3 unreachable=0 failed=0 node02 : ok=6 changed=4 unreachable=0 failed=0 Sending file modes: C0755 217813128 oc Sending file modes: C0600 5645 admin.kubeconfig Cluster "node01:8443" set. Cluster "node01:8443" set. ++ kubectl get nodes --no-headers ++ cluster/kubectl.sh get nodes --no-headers ++ grep -v Ready + '[' -n '' ']' + echo 'Nodes are ready:' Nodes are ready: + kubectl get nodes + cluster/kubectl.sh get nodes NAME STATUS ROLES AGE VERSION node01 Ready master 15d v1.9.1+a0ce1bc657 node02 Ready 3m v1.9.1+a0ce1bc657 + make cluster-sync ./cluster/build.sh Building ... sha256:bfa4d0e4a1a6ecc8067d4e64dfd286bfa9c51c74b3def97ee58a46f3832bc088 go version go1.10 linux/amd64 go version go1.10 linux/amd64 make[1]: Entering directory `/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.9-crio-release/go/src/kubevirt.io/kubevirt' hack/dockerized "./hack/check.sh && KUBEVIRT_VERSION= ./hack/build-go.sh install " && ./hack/build-copy-artifacts.sh sha256:bfa4d0e4a1a6ecc8067d4e64dfd286bfa9c51c74b3def97ee58a46f3832bc088 go version go1.10 linux/amd64 go version go1.10 linux/amd64 Compiling tests... compiled tests.test hack/build-docker.sh build Sending build context to Docker daemon 36.14 MB Step 1/8 : FROM fedora:27 ---> 9110ae7f579f Step 2/8 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> dde0df1b6fe4 Step 3/8 : RUN useradd -u 1001 --create-home -s /bin/bash virt-controller ---> Using cache ---> 65d6d48cdb35 Step 4/8 : WORKDIR /home/virt-controller ---> Using cache ---> e1ade8663337 Step 5/8 : USER 1001 ---> Using cache ---> 2ce44d6f372a Step 6/8 : COPY virt-controller /virt-controller ---> Using cache ---> 66a65308ed1c Step 7/8 : ENTRYPOINT /virt-controller ---> Using cache ---> 8e0c0b9fe060 Step 8/8 : LABEL "kubevirt-functional-tests-openshift-3.9-crio-release1" '' "virt-controller" '' ---> Running in b908e3ac6248 ---> b527e0bd36db Removing intermediate container b908e3ac6248 Successfully built b527e0bd36db Sending build context to Docker daemon 38.08 MB Step 1/14 : FROM kubevirt/libvirt:3.7.0 ---> 60c80c8f7523 Step 2/14 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> d4ddb23dff45 Step 3/14 : RUN dnf -y install socat genisoimage util-linux libcgroup-tools ethtool sudo && dnf -y clean all && test $(id -u qemu) = 107 # make sure that the qemu user really is 107 ---> Using cache ---> 142a2ba860cf Step 4/14 : COPY sock-connector /sock-connector ---> Using cache ---> 02569da61faa Step 5/14 : COPY sh.sh /sh.sh ---> Using cache ---> 47d4a51575e2 Step 6/14 : COPY virt-launcher /virt-launcher ---> Using cache ---> b310aabc2a93 Step 7/14 : COPY kubevirt-sudo /etc/sudoers.d/kubevirt ---> Using cache ---> d43473f8500d Step 8/14 : RUN chmod 0640 /etc/sudoers.d/kubevirt ---> Using cache ---> e875bd4fa754 Step 9/14 : RUN rm -f /libvirtd.sh ---> Using cache ---> 4929d6d0088d Step 10/14 : COPY libvirtd.sh /libvirtd.sh ---> Using cache ---> b4519c145897 Step 11/14 : RUN chmod a+x /libvirtd.sh ---> Using cache ---> b0e9e7b9d267 Step 12/14 : COPY entrypoint.sh /entrypoint.sh ---> Using cache ---> 2feb367330ac Step 13/14 : ENTRYPOINT /entrypoint.sh ---> Using cache ---> 2189a7beda85 Step 14/14 : LABEL "kubevirt-functional-tests-openshift-3.9-crio-release1" '' "virt-launcher" '' ---> Running in fcebe6d007f2 ---> f8f8af030af3 Removing intermediate container fcebe6d007f2 Successfully built f8f8af030af3 Sending build context to Docker daemon 36.7 MB Step 1/5 : FROM fedora:27 ---> 9110ae7f579f Step 2/5 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> dde0df1b6fe4 Step 3/5 : COPY virt-handler /virt-handler ---> Using cache ---> fac84076a253 Step 4/5 : ENTRYPOINT /virt-handler ---> Using cache ---> 2744863382df Step 5/5 : LABEL "kubevirt-functional-tests-openshift-3.9-crio-release1" '' "virt-handler" '' ---> Running in bf16d43297a8 ---> a901cb513b0c Removing intermediate container bf16d43297a8 Successfully built a901cb513b0c Sending build context to Docker daemon 36.86 MB Step 1/8 : FROM fedora:27 ---> 9110ae7f579f Step 2/8 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> dde0df1b6fe4 Step 3/8 : RUN useradd -u 1001 --create-home -s /bin/bash virt-api ---> Using cache ---> 2eeb55f39191 Step 4/8 : WORKDIR /home/virt-api ---> Using cache ---> 56cea32a45d4 Step 5/8 : USER 1001 ---> Using cache ---> d121920c238b Step 6/8 : COPY virt-api /virt-api ---> Using cache ---> 9735d1a7b4c2 Step 7/8 : ENTRYPOINT /virt-api ---> Using cache ---> 1d7838987285 Step 8/8 : LABEL "kubevirt-functional-tests-openshift-3.9-crio-release1" '' "virt-api" '' ---> Running in 4952bd202069 ---> f3969e577e38 Removing intermediate container 4952bd202069 Successfully built f3969e577e38 Sending build context to Docker daemon 6.656 kB Step 1/10 : FROM fedora:27 ---> 9110ae7f579f Step 2/10 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> dde0df1b6fe4 Step 3/10 : ENV container docker ---> Using cache ---> 32cab959eac8 Step 4/10 : RUN dnf -y install scsi-target-utils bzip2 e2fsprogs ---> Using cache ---> c2339817cfe0 Step 5/10 : RUN mkdir -p /images ---> Using cache ---> a19645b68794 Step 6/10 : RUN curl http://dl-cdn.alpinelinux.org/alpine/v3.7/releases/x86_64/alpine-virt-3.7.0-x86_64.iso > /images/1-alpine.img ---> Using cache ---> 3f0fa7f50785 Step 7/10 : ADD run-tgt.sh / ---> Using cache ---> 35ac6b299ab7 Step 8/10 : EXPOSE 3260 ---> Using cache ---> 259db1618b21 Step 9/10 : CMD /run-tgt.sh ---> Using cache ---> 4c9f18dec05a Step 10/10 : LABEL "iscsi-demo-target-tgtd" '' "kubevirt-functional-tests-openshift-3.9-crio-release1" '' ---> Running in 2d2c21f9d211 ---> 824b28fb4338 Removing intermediate container 2d2c21f9d211 Successfully built 824b28fb4338 Sending build context to Docker daemon 2.56 kB Step 1/5 : FROM fedora:27 ---> 9110ae7f579f Step 2/5 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> dde0df1b6fe4 Step 3/5 : ENV container docker ---> Using cache ---> 32cab959eac8 Step 4/5 : RUN dnf -y install procps-ng nmap-ncat && dnf -y clean all ---> Using cache ---> 391fa00b27f9 Step 5/5 : LABEL "kubevirt-functional-tests-openshift-3.9-crio-release1" '' "vm-killer" '' ---> Running in 4bab1bc63ef8 ---> 4953facab85f Removing intermediate container 4bab1bc63ef8 Successfully built 4953facab85f Sending build context to Docker daemon 5.12 kB Step 1/7 : FROM debian:sid ---> bcec0ae8107e Step 2/7 : MAINTAINER "David Vossel" \ ---> Using cache ---> 6696837acee7 Step 3/7 : ENV container docker ---> Using cache ---> 2dd2b1a02be6 Step 4/7 : RUN apt-get update && apt-get install -y bash curl bzip2 qemu-utils && mkdir -p /disk && rm -rf /var/lib/apt/lists/* ---> Using cache ---> dd3c4950b5c8 Step 5/7 : ADD entry-point.sh / ---> Using cache ---> d221e0eb5770 Step 6/7 : CMD /entry-point.sh ---> Using cache ---> 6506e61a9f41 Step 7/7 : LABEL "kubevirt-functional-tests-openshift-3.9-crio-release1" '' "registry-disk-v1alpha" '' ---> Running in 18b118275da1 ---> 484a7f255e89 Removing intermediate container 18b118275da1 Successfully built 484a7f255e89 Sending build context to Docker daemon 2.56 kB Step 1/4 : FROM localhost:32831/kubevirt/registry-disk-v1alpha:devel ---> 484a7f255e89 Step 2/4 : MAINTAINER "David Vossel" \ ---> Running in e73ab788a196 ---> c9e6f8f53b10 Removing intermediate container e73ab788a196 Step 3/4 : RUN curl https://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img > /disk/cirros.img ---> Running in 5d3ddb1dd8dd  % Total % Received % Xferd Average Speed Time Time Time Current  Dload Upload Total Spent  Left  Speed  0 0 0 0 0  0 0 0 --:--:-- --:--:-- --:--:-- 0 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 3 12.1M 3 416k 0 0 387k 0 0:00:32 0:00:01 0:00:31 387k 43 12.1M 43 5408k 0 0 2691k 0 0:00:04 0:00:02 0:00:02 2691k 96 12.1M 96 11.7M 0 0 3966k 0 0:00:03 0:00:03 --:--:-- 3965k 100 12.1M 100 12.1M 0 0 4059k 0  0:00:03 0:00:03 --:--:-- 4059k  ---> b484821c191b Removing intermediate container 5d3ddb1dd8dd Step 4/4 : LABEL "cirros-registry-disk-demo" '' "kubevirt-functional-tests-openshift-3.9-crio-release1" '' ---> Running in bfffaae1e850 ---> f7508c51b93e Removing intermediate container bfffaae1e850 Successfully built f7508c51b93e Sending build context to Docker daemon 2.56 kB Step 1/4 : FROM localhost:32831/kubevirt/registry-disk-v1alpha:devel ---> 484a7f255e89 Step 2/4 : MAINTAINER "The KubeVirt Project" ---> Running in 85fad2947a08 ---> bac2d62b7d3d Removing intermediate container 85fad2947a08 Step 3/4 : RUN curl -g -L https://download.fedoraproject.org/pub/fedora/linux/releases/27/CloudImages/x86_64/images/Fedora-Cloud-Base-27-1.6.x86_64.qcow2 > /disk/fedora.qcow2 ---> Running in caf732c351ec   % Total % Received % Xferd Average Speed Time Time Time Current  Dload Upload Total Spent Left Speed 0  0 0  0 0  0   0  0 --:--:-- --:--:-- --:--:-- 0 0 0 0 0 0  0 0 0 --:--:-- --:--:-- --:--:-- 0  0 0 0 0 0 0 0  0 --:--:-- --:--:-- --:--:-- 0 0 221M 0 9293 0 0 5083 0 12:41:46 0:00:01 12:41:45 6220 0 221M 0 14765 0 0 5096 0 12:39:49 0:00:02 12:39:47 5760 0 221M 0 14765 0  0 3787 0 17:02:28 0:00:03 17:02:25 4141 0 221M 0 14765 0 0  3013 0 21:25:07 0:00:04 21:25:03 3233 0 221M 0 14765 0 0 2501  0 25:48:12 0:00:05 25:48:07 2651 0 221M 0 14765 0 0 2138 0 30:11:04 0:00:06 30:10:58 1078 0 221M 0 14765 0 0 1867 0 34:33:57 0:00:07 34:33:50  0 0 221M 0 17501 0 0 1939 0 33:16:56  0:00:09 33:16:47 534 0 221M 0 17501 0 0 1746 0 36:57:41 0:00:10 36:57:31 534 0 221M 0 17501 0 0 1587 0 40:39:52 0:00:11 40:39:41 534 0 221M 0 17501 0  0 1455 0 44:21:13 0:00:12 44:21:01  534 0 221M 0 17501 0 0 1343 0 48:03:09 0:00:13 48:02:56 534 0 221M 0 17501 0 0 1247 0 51:45:07 0:00:14 51:44:53 0 0 221M 0 17501 0 0 1164 0 55:26:32  0:00:15 55:26:17 0 0 221M 0 20237 0 0 1313 0 49:09:02 0:00:15 49:08:47 624 0 221M 0 33917 0 0 2043 0 31:35:17 0:00:16 31:35:01  3587 0 221M 0 33917 0 0 1926 0 33:30:25 0:00:17 33:30:08 3587 0 221M 0 33917 0 0 1823 0 35:24:01 0:00:18 35:23:43 3588 0 221M 0 33917 0 0 1730  0 37:18:11 0:00:19 37:17:52 3588 0 221M 0 33917 0 0 1645 0 39:13:51 0:00:20 39:13:31 2630 0 221M 0 33917 0 0 1569 0 41:07:52 0:00:21 41:07:31  0 0 221M 0 33917 0 0 1500 0 43:01:23 0:00:22 43:01:01 0 0 221M 0 33917 0 0 1436 0 44:56:26 0:00:23 44:56:03 0 0 221M 0 33917 0 0 1378 0 46:49:55 0:00:24 46:49:31 0 0 221M 0 33917 0 0 1324 0 48:44:32 0:00:25 48:44:07 0 0 221M 0 33917 0 0 1274 0 50:39:18 0:00:26 50:38:52 0 0 221M 0 33917 0 0 1228 0 52:33:09 0:00:27 52:32:42  0 0 221M 0 33917 0 0 1185 0 54:27:34 0:00:28 54:27:06  0 0 221M 0 33917 0 0 1145 0 56:21:44 0:00:29 56:21:15 0 0 221M 0 33917 0 0 1107 0 58:17:49 0:00:30 58:17:19 0 0 221M 0 33917 0 0 1072 0 60:12:01 0:00:31 60:11:30 0 0 221M 0 33917 0 0 1039 0 62:06:44 0:00:32 62:06:12 0 0 221M 0 33917 0 0 1008 0 64:01:21 0:00:33 64:00:48 0 0 221M 0 33917 0 0 979 0 65:55:08 0:00:34 65:54:34 0 0 221M 0 33917 0 0 952 0 67:47:18 0:00:35 67:46:43  0 0 221M 0 33917 0 0 925 0 69:46:02 0:00:36 69:45:26 0 0 221M 0 33917 0 0 901 0 71:37:32 0:00:37 71:36:55 0 0 221M 0 33917 0 0 877 0 73:35:08 0:00:38 73:34:30  0 0 221M 0 33917 0 0 855 0 75:28:45 0:00:39 75:28:06 0 0 221M 0 33917 0 0 834 0 77:22:47 0:00:40 77:22:07 0 0 221M 0 33917 0 0 814 0 79:16:51 0:00:41 79:16:10 0 0 221M 0 33917 0 0 795  0 81:10:32 0:00:42 81:09:50 0 0 221M 0 33917 0 0 777 0 83:03:22 0:00:43 83:02:39 0 0 221M 0 33917 0 0 759  0 85:01:33 0:00:44 85:00:49 0 0 221M 0 33917 0 0 743 0 86:51:25 0:00:45 86:50:40 0 0 221M 0 33917 0 0 727 0 88:46:06 0:00:46 88:45:20 0 0 221M 0 33917 0 0 711 0 90:45:58 0:00:47 90:45:11 0 0 221M 0 40757 0 0 836 0 77:11:40 0:00:48 77:10:52 1340 0 221M 0 40757 0 0 819 0 78:47:49 0:00:49 78:47:00 1340 0 221M 0 40757 0 0 803 0 80:22:01 0:00:50 80:21:11 1340 0 221M 0 40757 0 0 787 0 82:00:03 0:00:51 81:59:12 1340 0 221M 0 40757 0 0 772 0 83:35:39  0:00:52 83:34:47 1340 0 221M 0 40757 0 0 758 0 85:08:17 0:00:53 85:07:24  0 0 221M 0 40757 0 0 744 0 86:44:24 0:00:54 86:43:30 0 0 221M 0 40757 0 0 731 0 88:16:58 0:00:55 88:16:03 0 0 221M 0 40757 0 0 718 0 89:52:52 0:00:56 89:51:56 0 0 221M 0 40757 0 0 705 0 91:32:19 0:00:57 91:31:22 0 0 221M 0 40757 0 0 693 0 93:07:25 0:00:58 93:06:27 0 0 221M 0 40757 0 0 682 0 94:37:32 0:00:59 94:36:33 0 0 221M 0 40757 0 0 670 0 96:19:13 0:01:00 96:18:13 0 0 221M 0 40757 0 0 659 0 97:55:41 0:01:01 97:54:40 0 0 221M 0 40757 0 0 649 0 99:26:13 0:01:02 99:25:11 0 0 221M 0 40757 0 0 639  0 4d 04h  0:01:03 4d 04h 0 0 221M 0 40757 0 0 629 0 4d 06h 0:01:04 4d 06h 0 0 221M 0 40757 0 0 619 0 4d 08h 0:01:05 4d 08h 0  0 221M 0 40757 0 0 610 0 4d 09h 0:01:06 4d 09h 0 0 221M 0 40757 0 0 601 0 4d 11h 0:01:07 4d 11h 0 0 221M 0 40757 0 0 592 0 4d 13h 0:01:08 4d 12h 0 0 221M 0 40757 0 0 584 0 4d 14h 0:01:09 4d 14h 0 0 221M 0 40757 0 0 575 0 4d 16h 0:01:10 4d 16h 0 0 221M 0 40757 0 0 567 0 4d 17h 0:01:11 4d 17h 0 0 221M 0 40757 0 0 560 0 4d 19h 0:01:12 4d 19h 0 0 221M 0 40757 0 0 552 0 4d 20h 0:01:13 4d 20h 0 0 221M 0 40757 0 0 545 0 4d 22h 0:01:14 4d 22h 0 0 221M 0 40757 0 0 537 0 5d 00h 0:01:15  5d 00h 0 0 221M 0 40757 0 0 530 0 5d 01h 0:01:16 5d 01h  0 0 221M 0 40757 0 0 523 0 5d 03h 0:01:17 5d 03h 0 0 221M 0 40757 0 0 517 0 5d 04h 0:01:18 5d 04h  0 0 221M 0 40757 0 0 510 0 5d 06h 0:01:19 5d 06h  0 0 221M 0 40757 0 0 504 0 5d 08h 0:01:20 5d 08h 0 0 221M 0 40757 0 0 498 0 5d 09h 0:01:21 5d 09h 0 0 221M 0 40757 0 0 492 0 5d 11h 0:01:22 5d 11h  0 0 221M 0 40757 0 0 486 0 5d 12h 0:01:23 5d 12h 0 0 221M 0 40757 0 0 480 0 5d 14h 0:01:24 5d 14h 0 0 221M 0 40757 0 0 475 0 5d 15h 0:01:25 5d 15h 0 0 221M 0 65381 0 0 749 0 86:09:40 0:01:27 86:08:13 4523 0 221M 0 65381 0 0 741 0 87:05:29 0:01:28 87:04:01 4523 0 221M 0 65381 0 0 732 0 88:09:44 0:01:29 88:08:15 4523 0 221M 0 65381 0 0 724 0 89:08:11 0:01:30 89:06:41 4524 0 221M 0 65381 0 0 716 0 90:07:56 0:01:31 90:06:25 4524 0 221M 0 65381 0 0 708 0 91:09:02 0:01:32 91:07:30 0 0 221M 0 65381 0 0 701 0 92:03:39 0:01:33 92:02:06 0 0 221M 0 65381 0 0 693 0 93:07:25 0:01:34 93:05:51 0 0 221M 0 65381 0 0 686 0 94:04:26 0:01:35 94:02:51 0 0 221M 0 65381 0 0 679 0 95:02:37 0:01:36 95:01:01 0 0 221M 0 65381 0 0 672 0 96:02:01 0:01:37 96:00:24 0 0 221M 0 65381 0 0 665 0 97:02:41 0:01:38 97:01:03 0 0 221M 0 65381 0 0 658 0 98:04:37 0:01:39 98:02:58 0 0 221M 0 65381 0 0 652 0 98:58:46 0:01:40 98:57:06 0 0 221M 0 65381 0 0 645 0 4d 04h 0:01:41 4d 04h  0 0 221M 0 65381 0 0 639 0 4d 04h 0:01:42 4d 04h 0 0 221M 0 65381 0 0 633 0 4d 05h 0:01:43 4d 05h 0 0 221M 0 65381 0 0 627 0 4d 06h 0:01:44 4d 06h 0 0 221M 0 65381 0 0 621 0 4d 07h 0:01:45 4d 07h 0 0 221M 0 65381 0 0 615 0 4d 08h 0:01:46 4d 08h 0 0 221M 0 65381 0 0 609 0 4d 09h 0:01:47 4d 09h 0 0 221M 0 65381 0 0 603 0 4d 11h 0:01:48 4d 10h 0 0 221M 0 65381 0 0 598 0  4d 11h 0:01:49 4d 11h 0 0 221M 0 65381 0 0 592 0 4d 13h 0:01:50 4d 12h 0 0 221M 0 65381 0 0 587 0 4d 13h 0:01:51 4d 13h 0 0 221M 0 65381 0 0 582 0 4d 14h 0:01:52  4d 14h 0 0 221M 0 65381 0 0 577 0 4d 15h 0:01:53 4d 15h 0 0 221M 0 65381 0 0 572 0 4d 16h 0:01:54 4d 16h 0 0 221M 0 65381 0 0 567 0 4d 17h  0:01:55 4d 17h 0 0 221M 0 65381 0 0 562 0 4d 18h 0:01:56 4d 18h  0 0 221M 0 65381 0 0 557 0 4d 19h 0:01:57 4d 19h 0 0 221M 0 65381 0 0 552 0 4d 20h 0:01:58 4d 20h 0 0 221M 0 65381 0 0 548 0 4d 21h 0:01:59 4d 21h 0 0 221M 0 65381 0 0 543 0 4d 22h 0:02:00  4d 22h 0 0 221M 0 65381 0 0 539 0 4d 23h 0:02:01 4d 23h  0 0 221M 0 65381 0 0 534 0 5d 00h 0:02:02 5d 00h 0 0 221M 0 65381 0 0 530 0 5d 01h 0:02:03 5d 01h 0 0 221M 0 65381 0 0 526 0 5d 02h 0:02:04 5d 02h 0 0 221M 0 65381 0 0 521 0 5d 03h 0:02:05 5d 03h  0 0 221M 0 65381 0 0 517  0 5d 04h 0:02:06 5d 04h 0 0 221M 0 65381 0 0 513 0 5d 05h 0:02:07 5d 05h  0 0 221M 0 65381 0 0 509 0 5d 06h 0:02:08 5d 06h  0 0 221M 0 65381 0 0 505 0 5d 07h 0:02:09 5d 07h 0 0 221M 0 66749 0 0 515 0 5d 05h 0:02:09 5d 05h 333 0 221M 0 186k 0 0 1467 0 43:59:27 0:02:10 43:57:17 30922 0 221M 0 280k 0 0 2177 0 29:38:38 0:02:11 29:36:27 49008 0 221M 0 280k 0 0 2161 0 29:51:48 0:02:12 29:49:36 49008 0 221M 0 321k 0 0 2454 0 26:17:52 0:02:14 26:15:38 53663 0 221M 0 324k 0 0 2468 0 26:08:54 0:02:14 26:06:40 51105 0 221M 0 428k 0 0 3242 0 19:54:21 0:02:15 19:52:06 49591 0 221M 0 630k 0 0 4733 0 13:38:06 0:02:16 13:35:50 78582 0 221M 0 893k 0 0 6663 0 9:41:07 0:02:17 9:38:50 134k 0 221M 0 1099k 0 0 8139 0 7:55:44 0:02:18 7:53:26 188k 0 221M 0 1765k 0 0 12969  0 4:58:33 0:02:19 4:56:14 300k 1 221M 1 3747k 0 0 27351 0 2:21:34 0:02:20 2:19:14 666k 1 221M 1 3916k 0 0 28372 0 2:16:28 0:02:21 2:14:07 660k 3 221M 3 7263k 0 0 52225 0 1:14:08 0:02:22 1:11:46 1259k 4 221M 4 10.6M 0 0 77957 0 0:49:40 0:02:23  0:47:17 1963k 6 221M 6 15.3M 0 0 108k 0 0:34:50 0:02:24 0:32:26 2770k 10 221M 10 22.7M 0 0 160k 0 0:23:36 0:02:25 0:21:11 3905k 11 221M 11 25.5M 0 0 178k 0 0:21:07 0:02:26 0:18:41 4435k 11 221M 11 26.3M 0 0 183k 0 0:20:36 0:02:27 0:18:09 4007k 12 221M 12 27.3M 0 0 188k 0 0:20:01 0:02:28 0:17:33 3420k 12 221M 12 28.5M 0 0 195k 0 0:19:20 0:02:29 0:16:51 2735k 13 221M 13 29.8M 0  0 203k  0 0:18:35 0:02:30 0:16:05 1457k 14 221M 14 31.9M 0 0 216k 0 0:17:27 0:02:31 0:14:56 1325k 14  221M 14 32.6M 0 0 219k 0 0:17:12 0:02:32  0:14:40 1292k 15 221M 15 33.7M 0 0 225k 0 0:16:47 0:02:33 0:14:14 1233k 15 221M 15 33.8M 0 0 223k 0 0:16:54 0:02:34 0:14:20 981k 15 221M 15 33.9M 0 0  223k 0 0:16:53 0:02:35 0:14:18 843k 16 221M 16 36.4M 0 0 238k 0 0:15:50 0:02:36 0:13:14 908k 17 221M 17 37.9M 0 0 247k 0 0:15:18 0:02:37 0:12:41 1082k 18 221M 18 40.2M 0  0 260k 0 0:14:30 0:02:38 0:11:52 1421k 18 221M 18 42.0M 0 0 270k 0 0:13:58 0:02:39 0:11:19 1894k 19 221M 19 44.1M 0 0 282k 0  0:13:24 0:02:40 0:10:44 2089k 21 221M 21 47.0M 0 0 298k 0 0:12:40  0:02:41 0:09:59 2161k 22 221M 22 50.4M 0 0 318k 0 0:11:53 0:02:42 0:09:11 2553k 25 221M 25 55.6M 0 0 348k 0 0:10:50 0:02:43 0:08:07 3137k 27 221M 27 61.5M 0 0 383k 0 0:09:51 0:02:44 0:07:07 3984k 29 221M 29 66.1M 0 0 409k 0 0:09:13  0:02:45 0:06:28 4501k 31 221M 31 70.6M 0 0 435k 0 0:08:41 0:02:46 0:05:55 4855k 32 221M 32 71.3M 0 0 436k 0  0:08:39  0:02:47 0:05:52 4277k 33 221M 33 74.9M 0 0 455k  0 0:08:17 0:02:48 0:05:29 3976k 36 221M 36 80.1M  0 0  484k 0 0:07:48 0:02:49 0:04:59 3776k 37 221M 37 83.7M 0 0  503k   0  0:07:30 0:02:50 0:04:40 3526k 37 221M 37 83.9M 0 0 501k 0 0:07:31 0:02:51 0:04:40 2732k 39 221M 39 87.6M 0 0 520k 0 0:07:15 0:02:52 0:04:23 3298k 40 221M 40 89.7M 0 0 530k 0 0:07:07  0:02:53 0:04:14 3035k 43 221M 43 96.1M 0 0 564k 0 0:06:41 0:02:54 0:03:47 3290k 44 221M 44 98.0M 0 0 571k 0 0:06:37 0:02:55 0:03:42 2747k 44 221M 44 98.0M  0 0 569k 0  0:06:38 0:02:56 0:03:42 2873k 45 221M 45 101M 0 0 583k  0 0:06:28 0:02:57 0:03:31 2791k 45 221M 45 101M 0 0 581k 0 0:06:30 0:02:58 0:03:32 2334k 46 221M 46 103M 0 0 589k 0 0:06:25 0:02:59 0:03:26 1438k 48 221M 48 107M 0 0 612k 0 0:06:10 0:03:00 0:03:10 2185k 49 221M 49 109M 0 0 620k 0 0:06:05 0:03:01 0:03:04 2405k 50 221M 50 112M 0 0 632k  0 0:05:58 0:03:02 0:02:56 2339k 51 221M 51 113M 0 0 632k 0 0:05:58 0:03:03 0:02:55 2452k 53 221M 53 118M 0 0 657k  0 0:05:44 0:03:04 0:02:40 3123k 53  221M 53 118M 0 0 657k 0 0:05:45 0:03:05 0:02:40 2256k 53 221M 53 119M 0 0 657k 0 0:05:45 0:03:06 0:02:39 2011k 55 221M 55 123M  0 0 675k  0 0:05:35 0:03:07 0:02:28 2271k 58 221M 58 130M  0 0 708k  0 0:05:20 0:03:08 0:02:12 3472k 59 221M 59 131M 0 0 712k  0 0:05:18 0:03:09 0:02:09 2713k 60 221M 60 133M 0 0 715k  0 0:05:17 0:03:10 0:02:07 2857k 60 221M 60 133M 0  0 715k 0 0:05:16 0:03:11 0:02:05 2895k 60 221M 60 134M 0 0 718k 0 0:05:15 0:03:12 0:02:03 2299k 61 221M 61 135M 0 0 719k 0 0:05:15 0:03:13 0:02:02 1149k 61 221M 61 136M 0 0 720k 0 0:05:15 0:03:14 0:02:01 1021k 61 221M 61 137M 0 0 719k 0 0:05:15 0:03:15 0:02:00 856k 62 221M 62 138M 0 0 721k 0 0:05:14 0:03:16 0:01:58 939k 63 221M 63  141M 0 0 731k 0 0:05:10 0:03:17  0:01:53 1238k 64 221M 64 143M 0 0 738k 0 0:05:07 0:03:18 0:01:49 1448k 65 221M 65 145M 0 0 746k 0 0:05:03 0:03:19 0:01:44 1782k 66 221M 66 146M 0 0 746k 0 0:05:03 0:03:20 0:01:43 1808k 66 221M 66 146M  0 0 745k  0 0:05:04 0:03:21 0:01:43 1676k 67 221M 67 149M 0 0 754k 0 0:05:00 0:03:22 0:01:38 1695k 68 221M 68 151M 0 0 761k 0 0:04:57 0:03:23 0:01:34 1682k 69 221M 69 153M  0 0  766k 0 0:04:55  0:03:24  0:01:31 1561k 69 221M 69 154M  0    0 772k  0 0:04:53 0:03:25 0:01:28 1819k 70 221M 70 156M 0  0 777k 0 0:04:51 0:03:26 0:01:25 2096k 70 221M 70 157M 0 0 775k 0 0:04:52 0:03:27 0:01:25 1595k 71 221M 71 157M  0  0 775k 0 0:04:52 0:03:28 0:01:24 1348k 71 221M 71 159M 0 0 775k 0 0:04:52 0:03:30 0:01:22 1076k 71 221M 71 159M 0 0 774k 0 0:04:52 0:03:30 0:01:22 873k 71 221M 71 159M 0 0 771k 0 0:04:54 0:03:31 0:01:23 502k 72 221M 72 159M  0 0 770k  0 0:04:54 0:03:32 0:01:22 558k 73 221M 73 163M 0 0 782k 0 0:04:50 0:03:33 0:01:17 1037k 73 221M 73 163M 0 0 778k 0 0:04:51 0:03:34 0:01:17 922k 73 221M 73 163M 0 0 774k 0 0:04:52 0:03:35 0:01:17 763k 73 221M 73 163M 0 0 772k 0 0:04:53  0:03:36 0:01:17 848k 73 221M 73 163M 0 0 769k  0 0:04:54 0:03:37 0:01:17  720k 73 221M 73 163M 0 0 765k 0 0:04:56 0:03:38  0:01:18 2081 73 221M 73 163M 0 0 763k 0 0:04:57 0:03:39 0:01:18 6793 74 221M 74 164M 0 0 764k 0 0:04:56 0:03:40 0:01:16 263k 76 221M 76 168M  0 0   780k 0 0:04:50 0:03:41 0:01:09 1106k 76 221M 76 169M 0 0 779k 0 0:04:51 0:03:42 0:01:09 1184k 76 221M 76 169M 0 0 776k 0 0:04:52 0:03:43 0:01:09 1242k 76 221M 76 169M 0 0 775k 0 0:04:52 0:03:44 0:01:08 1342k 77 221M 77 171M 0 0 778k 0 0:04:51 0:03:45  0:01:06 1388k 78 221M 78 172M  0 0 782k   0 0:04:50 0:03:46  0:01:04 872k 78 221M 78 174M 0 0 786k 0  0:04:48 0:03:47 0:01:01 1140k 79 221M 79 176M 0 0 789k 0 0:04:47 0:03:48 0:00:59 1407k 79 221M 79 176M 0 0 790k 0 0:04:47 0:03:49 0:00:58 1415k 80 221M 80 177M 0 0 788k 0 0:04:47 0:03:50  0:00:57 1226k 80 221M 80 177M 0 0 786k 0 0:04:48 0:03:51 0:00:57 993k 80 221M 80 177M 0 0 783k 0 0:04:49 0:03:52  0:00:57 667k 80 221M 80 178M 0 0 783k 0 0:04:49 0:03:53 0:00:56 484k 80 221M 80 178M 0 0 780k 0 0:04:50 0:03:54 0:00:56 385k 80 221M 80 179M 0 0 779k 0 0:04:51 0:03:55 0:00:56 315k 81 221M 81 180M 0 0 783k 0 0:04:49 0:03:56 0:00:53  608k 81 221M 81 181M 0 0 783k 0 0:04:49  0:03:57 0:00:52 770k 82 221M 82 181M 0 0 780k 0 0:04:50 0:03:58 0:00:52 647k 82 221M 82 181M 0 0 777k 0 0:04:51 0:03:59 0:00:52 611k 82 221M 82 182M 0 0 776k 0 0:04:52  0:04:00 0:00:52 642k 82 221M 82 182M 0 0 773k 0 0:04:53 0:04:01 0:00:52 323k 82 221M 82 182M 0 0 770k 0  0:04:54 0:04:02 0:00:52 145k 82 221M 82 182M 0 0 766k 0  0:04:55 0:04:03 0:00:52 129k 82 221M 82 182M 0 0 763k  0  0:04:57 0:04:04 0:00:53 121k 82 221M 82 182M 0 0 761k 0 0:04:58 0:04:05 0:00:53 27028 82 221M 82 182M 0 0 756k 0 0:04:59 0:04:07 0:00:52 27813 82 221M 82 182M 0 0 753k 0  0:05:01 0:04:08  0:00:53 27210 82 221M 82 182M 0 0 750k 0 0:05:02 0:04:09 0:00:53 24146 82 221M 82 182M 0 0 747k 0 0:05:03  0:04:10 0:00:53 23718 82 221M 82 182M 0 0 745k  0 0:05:04 0:04:10 0:00:54 15416 82 221M 82 182M 0 0 743k 0 0:05:04 0:04:11 0:00:53 18368 82 221M 82 182M 0 0 741k  0 0:05:05 0:04:12 0:00:53 61493 82 221M 82 183M 0 0 739k  0 0:05:06 0:04:13 0:00:53 122k 83 221M 83 184M 0 0 744k 0 0:05:04 0:04:14 0:00:50 580k 84 221M 84 187M 0 0 750k 0 0:05:02 0:04:15 0:00:47 1049k 84 221M 84 187M 0 0 748k 0 0:05:03 0:04:16 0:00:47 990k 84 221M 84 188M 0 0  748k 0  0:05:03 0:04:17 0:00:46 1085k 85 221M 85 188M 0 0 747k  0 0:05:03 0:04:18 0:00:45 1135k 85 221M 85 188M 0 0 744k 0 0:05:04 0:04:19 0:00:45 772k 85 221M 85 188M 0 0 741k 0 0:05:05 0:04:20  0:00:45 290k 85 221M 85 188M 0 0 738k 0 0:05:07 0:04:21  0:00:46 260k 85 221M 85 188M 0 0 736k 0 0:05:07  0:04:22 0:00:45 146k 85 221M 85 189M 0  0 735k 0 0:05:08 0:04:23  0:00:45 121k 85 221M 85 189M 0 0  735k 0 0:05:08 0:04:24 0:00:44  238k 85 221M 85 189M 0 0 733k  0 0:05:09 0:04:25 0:00:44 259k 86 221M 86 191M 0  0 734k 0 0:05:08 0:04:26 0:00:42 484k 86 221M 86 192M 0 0 735k 0 0:05:08 0:04:27 0:00:41 680k 87 221M 87  193M 0 0  739k  0 0:05:06 0:04:28 0:00:38 956k 88 221M 88 195M 0 0 740k 0 0:05:06 0:04:29 0:00:37 995k 88 221M  88 195M 0   0 740k 0 0:05:06 0:04:30 0:00:36 1111k 89 221M 89 197M 0 0  746k 0 0:05:03 0:04:31 0:00:32 1398k 90 221M 90 201M 0 0 757k 0 0:04:59 0:04:32 0:00:27 1909k 91 221M 91 201M 0 0 756k 0 0:04:59 0:04:33 0:00:26 1680k 92 221M 92 205M 0 0 766k 0 0:04:55 0:04:34 0:00:21 2289k 93 221M 93 206M 0 0 769k 0 0:04:54 0:04:35 0:00:19 2336k 95 221M 95 211M 0 0 782k  0 0:04:49 0:04:36 0:00:13 2752k 96 221M 96 214M 0 0 791k 0 0:04:46 0:04:37 0:00:09 2636k 97 221M 97 216M 0 0 797k 0 0:04:44 0:04:38 0:00:06 3016k 98 221M 98 218M 0 0 799k 0  0:04:43 0:04:39 0:00:04 2592k 98 221M 98 218M 0 0 798k 0  0:04:44 0:04:40 0:00:04 2382k 98 221M 98 219M 0 0 797k 0 0:04:44 0:04:41 0:00:03 1603k 99 221M 99 219M 0 0 796k 0 0:04:44 0:04:42 0:00:02 1082k 99 221M 99 219M 0 0 794k 0 0:04:45 0:04:43 0:00:02 641k 99 221M 99 220M 0 0 792k 0  0:04:46 0:04:44 0:00:02 402k 99 221M 99 220M 0 0 790k 0 0:04:47 0:04:45 0:00:02 328k 99 221M 99 220M 0 0 787k 0 0:04:47 0:04:46 0:00:01 268k 99 221M 99 220M 0  0 785k  0 0:04:48 0:04:47 0:00:01 196k 99 221M 99 220M 0 0 784k 0 0:04:49 0:04:48 0:00:01 172k 99 221M 99 221M 0 0 782k 0 0:04:49 0:04:49 --:--:-- 217k 99 221M 99 221M 0 0 780k 0 0:04:50 0:04:50 --:--:-- 244k 100 221M 100 221M 0 0 780k 0 0:04:50 0:04:50 --:--:-- 291k  ---> bfcdf5d84624 Removing intermediate container caf732c351ec Step 4/4 : LABEL "fedora-cloud-registry-disk-demo" '' "kubevirt-functional-tests-openshift-3.9-crio-release1" '' ---> Running in 2f9e69b970d5 ---> 7ed46316c47d Removing intermediate container 2f9e69b970d5 Successfully built 7ed46316c47d Sending build context to Docker daemon 2.56 kB Step 1/4 : FROM localhost:32831/kubevirt/registry-disk-v1alpha:devel ---> 484a7f255e89 Step 2/4 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> bac2d62b7d3d Step 3/4 : RUN curl http://dl-cdn.alpinelinux.org/alpine/v3.7/releases/x86_64/alpine-virt-3.7.0-x86_64.iso > /disk/alpine.iso ---> Running in 464c6da9445d  % Total % Received % Xferd Average Speed Time Time Time Current  Dload Upload Total Spent  Left Speed  0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 11 37.0M 11 4284k 0 0 4448k 0 0:00:08 --:--:-- 0:00:08 4448k 42 37.0M 42 15.6M 0 0 8171k 0 0:00:04 0:00:01 0:00:03 8171k 74 37.0M 74 27.6M 0 0 9560k 0 0:00:03 0:00:02 0:00:01 9557k 100 37.0M 100 37.0M 0 0 10.0M 0  0:00:03 0:00:03 --:--:-- 10.0M  ---> c899b6820be0 Removing intermediate container 464c6da9445d Step 4/4 : LABEL "alpine-registry-disk-demo" '' "kubevirt-functional-tests-openshift-3.9-crio-release1" '' ---> Running in 619ac41f0e8c ---> 6e927fec3050 Removing intermediate container 619ac41f0e8c Successfully built 6e927fec3050 Sending build context to Docker daemon 33.97 MB Step 1/8 : FROM fedora:27 ---> 9110ae7f579f Step 2/8 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> dde0df1b6fe4 Step 3/8 : RUN useradd -u 1001 --create-home -s /bin/bash virtctl ---> Using cache ---> 6e6e1b7931e0 Step 4/8 : WORKDIR /home/virtctl ---> Using cache ---> 9d27e69a25f2 Step 5/8 : USER 1001 ---> Using cache ---> 1760a8e197af Step 6/8 : COPY subresource-access-test /subresource-access-test ---> Using cache ---> 3e4da12fd3a5 Step 7/8 : ENTRYPOINT /subresource-access-test ---> Using cache ---> e6a433351cff Step 8/8 : LABEL "kubevirt-functional-tests-openshift-3.9-crio-release1" '' "subresource-access-test" '' ---> Running in 949929ced3f8 ---> 2caf6746e2d8 Removing intermediate container 949929ced3f8 Successfully built 2caf6746e2d8 Sending build context to Docker daemon 3.072 kB Step 1/9 : FROM fedora:27 ---> 9110ae7f579f Step 2/9 : MAINTAINER "The KubeVirt Project" ---> Using cache ---> dde0df1b6fe4 Step 3/9 : ENV container docker ---> Using cache ---> 32cab959eac8 Step 4/9 : RUN dnf -y install make git gcc && dnf -y clean all ---> Using cache ---> 8e034c77f534 Step 5/9 : ENV GIMME_GO_VERSION 1.9.2 ---> Using cache ---> 28ec1d482013 Step 6/9 : RUN mkdir -p /gimme && curl -sL https://raw.githubusercontent.com/travis-ci/gimme/master/gimme | HOME=/gimme bash >> /etc/profile.d/gimme.sh ---> Using cache ---> db78d0286f58 Step 7/9 : ENV GOPATH "/go" GOBIN "/usr/bin" ---> Using cache ---> 7ebe54e98be4 Step 8/9 : RUN mkdir -p /go && source /etc/profile.d/gimme.sh && go get github.com/masterzen/winrm-cli ---> Using cache ---> a3b04c1816f5 Step 9/9 : LABEL "kubevirt-functional-tests-openshift-3.9-crio-release1" '' "winrmcli" '' ---> Running in 8a43cc239842 ---> 38bbf09b7d9d Removing intermediate container 8a43cc239842 Successfully built 38bbf09b7d9d hack/build-docker.sh push The push refers to a repository [localhost:32831/kubevirt/virt-controller] ddf3037a595d: Preparing 52069b1f5033: Preparing 39bae602f753: Preparing 52069b1f5033: Pushed ddf3037a595d: Pushed 39bae602f753: Pushed devel: digest: sha256:d4143077696498be26ddae67955614c1354790eaa4f76485ce67fd6deac081f4 size: 948 The push refers to a repository [localhost:32831/kubevirt/virt-launcher] 80456cf333b1: Preparing 827f571e028e: Preparing 827f571e028e: Preparing 8d8c95a99571: Preparing 32ad2d27cb6a: Preparing b7e0e35396a7: Preparing 06e0a78f7aa5: Preparing 4ebc38848be0: Preparing b9fd8c21001d: Preparing 4d2f0529ab56: Preparing 530cc55618cd: Preparing 34fa414dfdf6: Preparing 4ebc38848be0: Waiting 4d2f0529ab56: Waiting b9fd8c21001d: Waiting 06e0a78f7aa5: Waiting a1359dc556dd: Preparing 530cc55618cd: Waiting 34fa414dfdf6: Waiting 490c7c373332: Preparing 4b440db36f72: Preparing 39bae602f753: Preparing 39bae602f753: Waiting 4b440db36f72: Waiting 32ad2d27cb6a: Pushed 80456cf333b1: Pushed b7e0e35396a7: Pushed 827f571e028e: Pushed 8d8c95a99571: Pushed 4ebc38848be0: Pushed b9fd8c21001d: Pushed 530cc55618cd: Pushed 34fa414dfdf6: Pushed a1359dc556dd: Pushed 490c7c373332: Pushed 39bae602f753: Mounted from kubevirt/virt-controller 4d2f0529ab56: Pushed 06e0a78f7aa5: Pushed 4b440db36f72: Pushed devel: digest: sha256:e64935e26dde64b994f939f9fc68cd1dce4ee7cc935a68a9b4bfa03d0515a4a1 size: 3653 The push refers to a repository [localhost:32831/kubevirt/virt-handler] 12564629c7ff: Preparing 39bae602f753: Preparing 39bae602f753: Mounted from kubevirt/virt-launcher 12564629c7ff: Pushed devel: digest: sha256:ce9c07ae33af7b6458bcd271bf755f1926d84e87b518d2bdf0eeb3fdc3668859 size: 740 The push refers to a repository [localhost:32831/kubevirt/virt-api] 6dde516cdda2: Preparing 86b4b25303b4: Preparing 39bae602f753: Preparing 39bae602f753: Mounted from kubevirt/virt-handler 86b4b25303b4: Pushed 6dde516cdda2: Pushed devel: digest: sha256:9bdf542fa9f1fb5f9b2e20f83bd1577bc6dcaf8b94b1a9dd210872d862cd5962 size: 948 The push refers to a repository [localhost:32831/kubevirt/iscsi-demo-target-tgtd] 80220be9fed7: Preparing 89fef61f2c06: Preparing b18a27986676: Preparing db8a56c06e31: Preparing 39bae602f753: Preparing 39bae602f753: Mounted from kubevirt/virt-api 80220be9fed7: Pushed b18a27986676: Pushed 89fef61f2c06: Pushed db8a56c06e31: Pushed devel: digest: sha256:24df4dea89c17351e47f5fe8de458ec2fab4c5af1303c731edf4615b09fd44ba size: 1368 The push refers to a repository [localhost:32831/kubevirt/vm-killer] 040d3361950b: Preparing 39bae602f753: Preparing 39bae602f753: Mounted from kubevirt/iscsi-demo-target-tgtd 040d3361950b: Pushed devel: digest: sha256:7b605ca112e614214f8e69cc3695c13bccce26b010ebbf603494a5a537ae4e77 size: 740 The push refers to a repository [localhost:32831/kubevirt/registry-disk-v1alpha] 4cd98e29acca: Preparing 9beeb9a18439: Preparing 6709b2da72b8: Preparing 4cd98e29acca: Pushed 9beeb9a18439: Pushed 6709b2da72b8: Pushed devel: digest: sha256:b7afce1582d020ce79004d417dfbc14d37ad6df41de3fa8d80a5d3173b226ad6 size: 948 The push refers to a repository [localhost:32831/kubevirt/cirros-registry-disk-demo] 9b3a0ca9b630: Preparing 4cd98e29acca: Preparing 9beeb9a18439: Preparing 6709b2da72b8: Preparing 6709b2da72b8: Mounted from kubevirt/registry-disk-v1alpha 4cd98e29acca: Mounted from kubevirt/registry-disk-v1alpha 9beeb9a18439: Mounted from kubevirt/registry-disk-v1alpha 9b3a0ca9b630: Pushed devel: digest: sha256:077195c4b3a68cd3be2cff820c4a813bf0b6691044a3559e0bf3596ff4f1200c size: 1160 The push refers to a repository [localhost:32831/kubevirt/fedora-cloud-registry-disk-demo] 33f93c8db2b7: Preparing 4cd98e29acca: Preparing 9beeb9a18439: Preparing 6709b2da72b8: Preparing 9beeb9a18439: Mounted from kubevirt/cirros-registry-disk-demo 4cd98e29acca: Mounted from kubevirt/cirros-registry-disk-demo 6709b2da72b8: Mounted from kubevirt/cirros-registry-disk-demo 33f93c8db2b7: Pushed devel: digest: sha256:5c4d226f5b80dff0fbc0fda5c2644e72ac68d380610fd2fa11780b9ae8169bef size: 1161 The push refers to a repository [localhost:32831/kubevirt/alpine-registry-disk-demo] ce30bea5fe9b: Preparing 4cd98e29acca: Preparing 9beeb9a18439: Preparing 6709b2da72b8: Preparing 4cd98e29acca: Mounted from kubevirt/fedora-cloud-registry-disk-demo 6709b2da72b8: Mounted from kubevirt/fedora-cloud-registry-disk-demo 9beeb9a18439: Mounted from kubevirt/fedora-cloud-registry-disk-demo ce30bea5fe9b: Pushed devel: digest: sha256:883712024921e92e8a1fa696ffeaaf5ff6c65ae2f0ffefe1bb166a806670a9ea size: 1160 The push refers to a repository [localhost:32831/kubevirt/subresource-access-test] 3843f07abec5: Preparing 2c4f6b64d5e3: Preparing 39bae602f753: Preparing 39bae602f753: Mounted from kubevirt/vm-killer 2c4f6b64d5e3: Pushed 3843f07abec5: Pushed devel: digest: sha256:bf33fac6259ee0bf1aaf21b127c6077c12c18078809663375f2b4bf115824fe4 size: 948 The push refers to a repository [localhost:32831/kubevirt/winrmcli] 161ef5381259: Preparing 2bef46eb5bf3: Preparing ac5611d25ed9: Preparing 39bae602f753: Preparing 39bae602f753: Mounted from kubevirt/subresource-access-test 161ef5381259: Pushed ac5611d25ed9: Pushed 2bef46eb5bf3: Pushed devel: digest: sha256:19516495f580610f8ef0f4a0abee0a19cd01d350b258da4b64220b828393406c size: 1165 make[1]: Leaving directory `/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.9-crio-release/go/src/kubevirt.io/kubevirt' Done ./cluster/clean.sh + source hack/common.sh ++++ dirname 'hack/common.sh[0]' +++ cd hack/../ +++ pwd ++ KUBEVIRT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.9-crio-release/go/src/kubevirt.io/kubevirt ++ OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.9-crio-release/go/src/kubevirt.io/kubevirt/_out ++ VENDOR_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.9-crio-release/go/src/kubevirt.io/kubevirt/vendor ++ CMD_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.9-crio-release/go/src/kubevirt.io/kubevirt/_out/cmd ++ TESTS_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.9-crio-release/go/src/kubevirt.io/kubevirt/_out/tests ++ APIDOCS_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.9-crio-release/go/src/kubevirt.io/kubevirt/_out/apidocs ++ MANIFESTS_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.9-crio-release/go/src/kubevirt.io/kubevirt/_out/manifests ++ PYTHON_CLIENT_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.9-crio-release/go/src/kubevirt.io/kubevirt/_out/client-python ++ KUBEVIRT_PROVIDER=os-3.9.0-crio ++ KUBEVIRT_PROVIDER=os-3.9.0-crio ++ KUBEVIRT_NUM_NODES=2 ++ KUBEVIRT_NUM_NODES=2 ++ '[' -z kubevirt-functional-tests-openshift-3.9-crio-release ']' ++ provider_prefix=kubevirt-functional-tests-openshift-3.9-crio-release1 ++ job_prefix=kubevirt-functional-tests-openshift-3.9-crio-release1 +++ kubevirt_version +++ '[' -n '' ']' +++ '[' -d /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.9-crio-release/go/src/kubevirt.io/kubevirt/.git ']' ++++ git describe --always --tags +++ echo v0.5.1-alpha.2-46-g174936a ++ KUBEVIRT_VERSION=v0.5.1-alpha.2-46-g174936a + source cluster/os-3.9.0-crio/provider.sh ++ set -e ++ source cluster/os-3.9.0/provider.sh +++ set -e +++ image=os-3.9.0@sha256:234b3ae5c335c9fa32fa3bc01d5833f8f4d45420d82a8f8b12adc02687eb88b1 +++ source cluster/ephemeral-provider-common.sh ++++ set -e ++++ _cli='docker run --privileged --net=host --rm -v /var/run/docker.sock:/var/run/docker.sock kubevirtci/gocli@sha256:aa7f295a7908fa333ab5e98ef3af0bfafbabfd3cee2b83f9af47f722e3000f6a' ++ image=os-3.9.0-crio@sha256:107d03dad4da6957e28774b121a45e177f31d7b4ad43c6eab7b24d467e59e213 + source hack/config.sh ++ unset binaries docker_images docker_prefix docker_tag manifest_templates master_ip network_provider kubeconfig manifest_docker_prefix namespace ++ KUBEVIRT_PROVIDER=os-3.9.0-crio ++ KUBEVIRT_PROVIDER=os-3.9.0-crio ++ source hack/config-default.sh source hack/config-os-3.9.0-crio.sh +++ binaries='cmd/virt-controller cmd/virt-launcher cmd/virt-handler cmd/virtctl cmd/fake-qemu-process cmd/virt-api cmd/subresource-access-test' +++ docker_images='cmd/virt-controller cmd/virt-launcher cmd/virt-handler cmd/virt-api images/iscsi-demo-target-tgtd images/vm-killer cmd/registry-disk-v1alpha images/cirros-registry-disk-demo images/fedora-cloud-registry-disk-demo images/alpine-registry-disk-demo cmd/subresource-access-test images/winrmcli' +++ docker_prefix=kubevirt +++ docker_tag=latest +++ master_ip=192.168.200.2 +++ network_provider=flannel +++ kubeconfig=cluster/vagrant/.kubeconfig +++ namespace=kube-system ++ test -f hack/config-provider-os-3.9.0-crio.sh ++ source hack/config-provider-os-3.9.0-crio.sh +++ master_ip=127.0.0.1 +++ docker_tag=devel +++ kubeconfig=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.9-crio-release/go/src/kubevirt.io/kubevirt/cluster/os-3.9.0-crio/.kubeconfig +++ kubectl=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.9-crio-release/go/src/kubevirt.io/kubevirt/cluster/os-3.9.0-crio/.kubectl +++ docker_prefix=localhost:32831/kubevirt +++ manifest_docker_prefix=registry:5000/kubevirt ++ test -f hack/config-local.sh ++ export binaries docker_images docker_prefix docker_tag manifest_templates master_ip network_provider kubeconfig namespace + echo 'Cleaning up ...' Cleaning up ... + cluster/kubectl.sh get vms --all-namespaces -o=custom-columns=NAME:.metadata.name,NAMESPACE:.metadata.namespace,FINALIZERS:.metadata.finalizers --no-headers + grep foregroundDeleteVirtualMachine + read p the server doesn't have a resource type "vms" + _kubectl delete ds -l kubevirt.io -n kube-system --cascade=false --grace-period 0 No resources found + _kubectl delete pods -n kube-system -l=kubevirt.io=libvirt --force --grace-period 0 No resources found + _kubectl delete pods -n kube-system -l=kubevirt.io=virt-handler --force --grace-period 0 No resources found + namespaces=(default ${namespace}) + for i in '${namespaces[@]}' + _kubectl -n default delete apiservices -l kubevirt.io + export KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + cluster/os-3.9.0-crio/.kubectl -n default delete apiservices -l kubevirt.io No resources found + _kubectl -n default delete deployment -l kubevirt.io + export KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + cluster/os-3.9.0-crio/.kubectl -n default delete deployment -l kubevirt.io No resources found + _kubectl -n default delete rs -l kubevirt.io + export KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + cluster/os-3.9.0-crio/.kubectl -n default delete rs -l kubevirt.io No resources found + _kubectl -n default delete services -l kubevirt.io + export KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + cluster/os-3.9.0-crio/.kubectl -n default delete services -l kubevirt.io No resources found + _kubectl -n default delete apiservices -l kubevirt.io + export KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + cluster/os-3.9.0-crio/.kubectl -n default delete apiservices -l kubevirt.io No resources found + _kubectl -n default delete validatingwebhookconfiguration -l kubevirt.io + export KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + cluster/os-3.9.0-crio/.kubectl -n default delete validatingwebhookconfiguration -l kubevirt.io No resources found + _kubectl -n default delete secrets -l kubevirt.io + export KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + cluster/os-3.9.0-crio/.kubectl -n default delete secrets -l kubevirt.io No resources found + _kubectl -n default delete pv -l kubevirt.io + export KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + cluster/os-3.9.0-crio/.kubectl -n default delete pv -l kubevirt.io No resources found + _kubectl -n default delete pvc -l kubevirt.io + export KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + cluster/os-3.9.0-crio/.kubectl -n default delete pvc -l kubevirt.io No resources found + _kubectl -n default delete ds -l kubevirt.io + export KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + cluster/os-3.9.0-crio/.kubectl -n default delete ds -l kubevirt.io No resources found + _kubectl -n default delete customresourcedefinitions -l kubevirt.io + export KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + cluster/os-3.9.0-crio/.kubectl -n default delete customresourcedefinitions -l kubevirt.io No resources found + _kubectl -n default delete pods -l kubevirt.io + export KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + cluster/os-3.9.0-crio/.kubectl -n default delete pods -l kubevirt.io No resources found + _kubectl -n default delete clusterrolebinding -l kubevirt.io + export KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + cluster/os-3.9.0-crio/.kubectl -n default delete clusterrolebinding -l kubevirt.io No resources found + _kubectl -n default delete rolebinding -l kubevirt.io + export KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + cluster/os-3.9.0-crio/.kubectl -n default delete rolebinding -l kubevirt.io No resources found + _kubectl -n default delete roles -l kubevirt.io + export KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + cluster/os-3.9.0-crio/.kubectl -n default delete roles -l kubevirt.io No resources found + _kubectl -n default delete clusterroles -l kubevirt.io + export KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + cluster/os-3.9.0-crio/.kubectl -n default delete clusterroles -l kubevirt.io No resources found + _kubectl -n default delete serviceaccounts -l kubevirt.io + export KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + cluster/os-3.9.0-crio/.kubectl -n default delete serviceaccounts -l kubevirt.io No resources found ++ _kubectl -n default get crd offlinevirtualmachines.kubevirt.io ++ export KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig ++ wc -l ++ KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig ++ cluster/os-3.9.0-crio/.kubectl -n default get crd offlinevirtualmachines.kubevirt.io Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "offlinevirtualmachines.kubevirt.io" not found + '[' 0 -gt 0 ']' + for i in '${namespaces[@]}' + _kubectl -n kube-system delete apiservices -l kubevirt.io + export KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + cluster/os-3.9.0-crio/.kubectl -n kube-system delete apiservices -l kubevirt.io No resources found + _kubectl -n kube-system delete deployment -l kubevirt.io + export KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + cluster/os-3.9.0-crio/.kubectl -n kube-system delete deployment -l kubevirt.io No resources found + _kubectl -n kube-system delete rs -l kubevirt.io + export KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + cluster/os-3.9.0-crio/.kubectl -n kube-system delete rs -l kubevirt.io No resources found + _kubectl -n kube-system delete services -l kubevirt.io + export KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + cluster/os-3.9.0-crio/.kubectl -n kube-system delete services -l kubevirt.io No resources found + _kubectl -n kube-system delete apiservices -l kubevirt.io + export KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + cluster/os-3.9.0-crio/.kubectl -n kube-system delete apiservices -l kubevirt.io No resources found + _kubectl -n kube-system delete validatingwebhookconfiguration -l kubevirt.io + export KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + cluster/os-3.9.0-crio/.kubectl -n kube-system delete validatingwebhookconfiguration -l kubevirt.io No resources found + _kubectl -n kube-system delete secrets -l kubevirt.io + export KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + cluster/os-3.9.0-crio/.kubectl -n kube-system delete secrets -l kubevirt.io No resources found + _kubectl -n kube-system delete pv -l kubevirt.io + export KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + cluster/os-3.9.0-crio/.kubectl -n kube-system delete pv -l kubevirt.io No resources found + _kubectl -n kube-system delete pvc -l kubevirt.io + export KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + cluster/os-3.9.0-crio/.kubectl -n kube-system delete pvc -l kubevirt.io No resources found + _kubectl -n kube-system delete ds -l kubevirt.io + export KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + cluster/os-3.9.0-crio/.kubectl -n kube-system delete ds -l kubevirt.io No resources found + _kubectl -n kube-system delete customresourcedefinitions -l kubevirt.io + export KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + cluster/os-3.9.0-crio/.kubectl -n kube-system delete customresourcedefinitions -l kubevirt.io No resources found + _kubectl -n kube-system delete pods -l kubevirt.io + export KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + cluster/os-3.9.0-crio/.kubectl -n kube-system delete pods -l kubevirt.io No resources found + _kubectl -n kube-system delete clusterrolebinding -l kubevirt.io + export KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + cluster/os-3.9.0-crio/.kubectl -n kube-system delete clusterrolebinding -l kubevirt.io No resources found + _kubectl -n kube-system delete rolebinding -l kubevirt.io + export KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + cluster/os-3.9.0-crio/.kubectl -n kube-system delete rolebinding -l kubevirt.io No resources found + _kubectl -n kube-system delete roles -l kubevirt.io + export KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + cluster/os-3.9.0-crio/.kubectl -n kube-system delete roles -l kubevirt.io No resources found + _kubectl -n kube-system delete clusterroles -l kubevirt.io + export KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + cluster/os-3.9.0-crio/.kubectl -n kube-system delete clusterroles -l kubevirt.io No resources found + _kubectl -n kube-system delete serviceaccounts -l kubevirt.io + export KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + cluster/os-3.9.0-crio/.kubectl -n kube-system delete serviceaccounts -l kubevirt.io No resources found ++ _kubectl -n kube-system get crd offlinevirtualmachines.kubevirt.io ++ wc -l ++ export KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig ++ KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig ++ cluster/os-3.9.0-crio/.kubectl -n kube-system get crd offlinevirtualmachines.kubevirt.io Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "offlinevirtualmachines.kubevirt.io" not found + '[' 0 -gt 0 ']' + sleep 2 + echo Done Done ./cluster/deploy.sh + source hack/common.sh ++++ dirname 'hack/common.sh[0]' +++ cd hack/../ +++ pwd ++ KUBEVIRT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.9-crio-release/go/src/kubevirt.io/kubevirt ++ OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.9-crio-release/go/src/kubevirt.io/kubevirt/_out ++ VENDOR_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.9-crio-release/go/src/kubevirt.io/kubevirt/vendor ++ CMD_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.9-crio-release/go/src/kubevirt.io/kubevirt/_out/cmd ++ TESTS_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.9-crio-release/go/src/kubevirt.io/kubevirt/_out/tests ++ APIDOCS_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.9-crio-release/go/src/kubevirt.io/kubevirt/_out/apidocs ++ MANIFESTS_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.9-crio-release/go/src/kubevirt.io/kubevirt/_out/manifests ++ PYTHON_CLIENT_OUT_DIR=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.9-crio-release/go/src/kubevirt.io/kubevirt/_out/client-python ++ KUBEVIRT_PROVIDER=os-3.9.0-crio ++ KUBEVIRT_PROVIDER=os-3.9.0-crio ++ KUBEVIRT_NUM_NODES=2 ++ KUBEVIRT_NUM_NODES=2 ++ '[' -z kubevirt-functional-tests-openshift-3.9-crio-release ']' ++ provider_prefix=kubevirt-functional-tests-openshift-3.9-crio-release1 ++ job_prefix=kubevirt-functional-tests-openshift-3.9-crio-release1 +++ kubevirt_version +++ '[' -n '' ']' +++ '[' -d /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.9-crio-release/go/src/kubevirt.io/kubevirt/.git ']' ++++ git describe --always --tags +++ echo v0.5.1-alpha.2-46-g174936a ++ KUBEVIRT_VERSION=v0.5.1-alpha.2-46-g174936a + source cluster/os-3.9.0-crio/provider.sh ++ set -e ++ source cluster/os-3.9.0/provider.sh +++ set -e +++ image=os-3.9.0@sha256:234b3ae5c335c9fa32fa3bc01d5833f8f4d45420d82a8f8b12adc02687eb88b1 +++ source cluster/ephemeral-provider-common.sh ++++ set -e ++++ _cli='docker run --privileged --net=host --rm -v /var/run/docker.sock:/var/run/docker.sock kubevirtci/gocli@sha256:aa7f295a7908fa333ab5e98ef3af0bfafbabfd3cee2b83f9af47f722e3000f6a' ++ image=os-3.9.0-crio@sha256:107d03dad4da6957e28774b121a45e177f31d7b4ad43c6eab7b24d467e59e213 + source hack/config.sh ++ unset binaries docker_images docker_prefix docker_tag manifest_templates master_ip network_provider kubeconfig manifest_docker_prefix namespace ++ KUBEVIRT_PROVIDER=os-3.9.0-crio ++ KUBEVIRT_PROVIDER=os-3.9.0-crio ++ source hack/config-default.sh source hack/config-os-3.9.0-crio.sh +++ binaries='cmd/virt-controller cmd/virt-launcher cmd/virt-handler cmd/virtctl cmd/fake-qemu-process cmd/virt-api cmd/subresource-access-test' +++ docker_images='cmd/virt-controller cmd/virt-launcher cmd/virt-handler cmd/virt-api images/iscsi-demo-target-tgtd images/vm-killer cmd/registry-disk-v1alpha images/cirros-registry-disk-demo images/fedora-cloud-registry-disk-demo images/alpine-registry-disk-demo cmd/subresource-access-test images/winrmcli' +++ docker_prefix=kubevirt +++ docker_tag=latest +++ master_ip=192.168.200.2 +++ network_provider=flannel +++ kubeconfig=cluster/vagrant/.kubeconfig +++ namespace=kube-system ++ test -f hack/config-provider-os-3.9.0-crio.sh ++ source hack/config-provider-os-3.9.0-crio.sh +++ master_ip=127.0.0.1 +++ docker_tag=devel +++ kubeconfig=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.9-crio-release/go/src/kubevirt.io/kubevirt/cluster/os-3.9.0-crio/.kubeconfig +++ kubectl=/var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.9-crio-release/go/src/kubevirt.io/kubevirt/cluster/os-3.9.0-crio/.kubectl +++ docker_prefix=localhost:32831/kubevirt +++ manifest_docker_prefix=registry:5000/kubevirt ++ test -f hack/config-local.sh ++ export binaries docker_images docker_prefix docker_tag manifest_templates master_ip network_provider kubeconfig namespace + echo 'Deploying ...' Deploying ... + [[ -z openshift-3.9-crio-release ]] + [[ openshift-3.9-crio-release =~ .*-dev ]] + [[ openshift-3.9-crio-release =~ .*-release ]] + for manifest in '${MANIFESTS_OUT_DIR}/release/*' + [[ /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.9-crio-release/go/src/kubevirt.io/kubevirt/_out/manifests/release/demo-content.yaml =~ .*demo.* ]] + continue + for manifest in '${MANIFESTS_OUT_DIR}/release/*' + [[ /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.9-crio-release/go/src/kubevirt.io/kubevirt/_out/manifests/release/kubevirt.yaml =~ .*demo.* ]] + _kubectl create -f /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.9-crio-release/go/src/kubevirt.io/kubevirt/_out/manifests/release/kubevirt.yaml + export KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + cluster/os-3.9.0-crio/.kubectl create -f /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.9-crio-release/go/src/kubevirt.io/kubevirt/_out/manifests/release/kubevirt.yaml clusterrole "kubevirt.io:admin" created clusterrole "kubevirt.io:edit" created clusterrole "kubevirt.io:view" created serviceaccount "kubevirt-apiserver" created clusterrolebinding "kubevirt-apiserver" created clusterrolebinding "kubevirt-apiserver-auth-delegator" created rolebinding "kubevirt-apiserver" created role "kubevirt-apiserver" created clusterrole "kubevirt-apiserver" created clusterrole "kubevirt-controller" created serviceaccount "kubevirt-controller" created serviceaccount "kubevirt-privileged" created clusterrolebinding "kubevirt-controller" created clusterrolebinding "kubevirt-controller-cluster-admin" created clusterrolebinding "kubevirt-privileged-cluster-admin" created clusterrole "kubevirt.io:default" created clusterrolebinding "kubevirt.io:default" created service "virt-api" created deployment "virt-api" created deployment "virt-controller" created daemonset "virt-handler" created customresourcedefinition "virtualmachines.kubevirt.io" created customresourcedefinition "virtualmachinereplicasets.kubevirt.io" created customresourcedefinition "virtualmachinepresets.kubevirt.io" created customresourcedefinition "offlinevirtualmachines.kubevirt.io" created + _kubectl create -f /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.9-crio-release/go/src/kubevirt.io/kubevirt/_out/manifests/testing -R + export KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + cluster/os-3.9.0-crio/.kubectl create -f /var/lib/swarm/workspace/kubevirt-functional-tests-openshift-3.9-crio-release/go/src/kubevirt.io/kubevirt/_out/manifests/testing -R persistentvolumeclaim "disk-alpine" created persistentvolume "iscsi-disk-alpine" created persistentvolumeclaim "disk-custom" created persistentvolume "iscsi-disk-custom" created daemonset "iscsi-demo-target-tgtd" created serviceaccount "kubevirt-testing" created clusterrolebinding "kubevirt-testing-cluster-admin" created + [[ os-3.9.0-crio =~ os-3.9.0.* ]] + _kubectl adm policy add-scc-to-user privileged -z kubevirt-controller -n kube-system + export KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + cluster/os-3.9.0-crio/.kubectl adm policy add-scc-to-user privileged -z kubevirt-controller -n kube-system scc "privileged" added to: ["system:serviceaccount:kube-system:kubevirt-controller"] + _kubectl adm policy add-scc-to-user privileged -z kubevirt-testing -n kube-system + export KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + cluster/os-3.9.0-crio/.kubectl adm policy add-scc-to-user privileged -z kubevirt-testing -n kube-system scc "privileged" added to: ["system:serviceaccount:kube-system:kubevirt-testing"] + _kubectl adm policy add-scc-to-user privileged -z kubevirt-privileged -n kube-system + export KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + cluster/os-3.9.0-crio/.kubectl adm policy add-scc-to-user privileged -z kubevirt-privileged -n kube-system scc "privileged" added to: ["system:serviceaccount:kube-system:kubevirt-privileged"] + _kubectl adm policy add-scc-to-user privileged -z kubevirt-apiserver -n kube-system + export KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + cluster/os-3.9.0-crio/.kubectl adm policy add-scc-to-user privileged -z kubevirt-apiserver -n kube-system scc "privileged" added to: ["system:serviceaccount:kube-system:kubevirt-apiserver"] + _kubectl adm policy add-scc-to-user privileged admin + export KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + KUBECONFIG=cluster/os-3.9.0-crio/.kubeconfig + cluster/os-3.9.0-crio/.kubectl adm policy add-scc-to-user privileged admin scc "privileged" added to: ["admin"] + echo Done Done ++ grep -v Running ++ kubectl get pods -n kube-system --no-headers ++ cluster/kubectl.sh get pods -n kube-system --no-headers + '[' -n 'iscsi-demo-target-tgtd-747gw 0/1 ContainerCreating 0 28s iscsi-demo-target-tgtd-dnwc7 0/1 ContainerCreating 0 28s virt-api-fd96f94b5-flxxr 0/1 ContainerCreating 0 32s virt-api-fd96f94b5-t4xff 0/1 ContainerCreating 0 32s virt-controller-5f7c946cc4-5smd9 0/1 ContainerCreating 0 32s virt-controller-5f7c946cc4-mwx8s 0/1 ContainerCreating 0 32s virt-handler-hvpzb 0/1 ContainerCreating 0 26s virt-handler-s5d2r 0/1 ContainerCreating 0 26s' ']' + echo 'Waiting for kubevirt pods to enter the Running state ...' Waiting for kubevirt pods to enter the Running state ... + kubectl get pods -n kube-system --no-headers + grep -v Running + cluster/kubectl.sh get pods -n kube-system --no-headers Error from server (Timeout): the server was unable to return a response in the time allotted, but may still be processing the request (get pods) + true + sleep 10 ++ kubectl get pods -n kube-system --no-headers ++ cluster/kubectl.sh get pods -n kube-system --no-headers ++ grep -v Running Error from server (Timeout): the server was unable to return a response in the time allotted, but may still be processing the request (get pods) + '[' -n '' ']' ++ kubectl get pods -n kube-system '-ocustom-columns=status:status.containerStatuses[*].ready,metadata:metadata.name' --no-headers ++ awk '!/virt-controller/ && /false/' ++ cluster/kubectl.sh get pods -n kube-system '-ocustom-columns=status:status.containerStatuses[*].ready,metadata:metadata.name' --no-headers Unable to connect to the server: unexpected EOF + '[' -n '' ']' ++ kubectl get pods -n kube-system '-ocustom-columns=status:status.containerStatuses[*].ready,metadata:metadata.name' --no-headers ++ awk '/virt-controller/ && /true/' ++ cluster/kubectl.sh get pods -n kube-system '-ocustom-columns=status:status.containerStatuses[*].ready,metadata:metadata.name' --no-headers ++ wc -l Unable to connect to the server: read tcp 127.0.0.1:58384->127.0.0.1:32828: read: connection reset by peer + '[' 0 -lt 1 ']' + echo 'Waiting for KubeVirt virt-controller container to become ready ...' Waiting for KubeVirt virt-controller container to become ready ... + kubectl get pods -n kube-system '-ocustom-columns=status:status.containerStatuses[*].ready,metadata:metadata.name' --no-headers + awk '/virt-controller/ && /true/' + cluster/kubectl.sh get pods -n kube-system '-ocustom-columns=status:status.containerStatuses[*].ready,metadata:metadata.name' --no-headers + wc -l Unable to connect to the server: read tcp 127.0.0.1:58388->127.0.0.1:32828: read: connection reset by peer 0 + sleep 10 ++ kubectl get pods -n kube-system '-ocustom-columns=status:status.containerStatuses[*].ready,metadata:metadata.name' --no-headers ++ awk '/virt-controller/ && /true/' ++ cluster/kubectl.sh get pods -n kube-system '-ocustom-columns=status:status.containerStatuses[*].ready,metadata:metadata.name' --no-headers ++ wc -l Unable to connect to the server: net/http: TLS handshake timeout + '[' 0 -lt 1 ']' + echo 'Waiting for KubeVirt virt-controller container to become ready ...' Waiting for KubeVirt virt-controller container to become ready ... + kubectl get pods -n kube-system '-ocustom-columns=status:status.containerStatuses[*].ready,metadata:metadata.name' --no-headers + awk '/virt-controller/ && /true/' + cluster/kubectl.sh get pods -n kube-system '-ocustom-columns=status:status.containerStatuses[*].ready,metadata:metadata.name' --no-headers + wc -l 0 + sleep 10 ++ kubectl get pods -n kube-system '-ocustom-columns=status:status.containerStatuses[*].ready,metadata:metadata.name' --no-headers ++ awk '/virt-controller/ && /true/' ++ cluster/kubectl.sh get pods -n kube-system '-ocustom-columns=status:status.containerStatuses[*].ready,metadata:metadata.name' --no-headers ++ wc -l Error from server (Timeout): the server was unable to return a response in the time allotted, but may still be processing the request (get pods) + '[' 0 -lt 1 ']' + echo 'Waiting for KubeVirt virt-controller container to become ready ...' Waiting for KubeVirt virt-controller container to become ready ... + kubectl get pods -n kube-system '-ocustom-columns=status:status.containerStatuses[*].ready,metadata:metadata.name' --no-headers + awk '/virt-controller/ && /true/' + cluster/kubectl.sh get pods -n kube-system '-ocustom-columns=status:status.containerStatuses[*].ready,metadata:metadata.name' --no-headers + wc -l Error from server (Timeout): the server was unable to return a response in the time allotted, but may still be processing the request (get pods) 0 + sleep 10 ++ kubectl get pods -n kube-system '-ocustom-columns=status:status.containerStatuses[*].ready,metadata:metadata.name' --no-headers ++ awk '/virt-controller/ && /true/' ++ wc -l ++ cluster/kubectl.sh get pods -n kube-system '-ocustom-columns=status:status.containerStatuses[*].ready,metadata:metadata.name' --no-headers Error from server (Timeout): the server was unable to return a response in the time allotted, but may still be processing the request (get pods) + '[' 0 -lt 1 ']' + echo 'Waiting for KubeVirt virt-controller container to become ready ...' Waiting for KubeVirt virt-controller container to become ready ... + awk '/virt-controller/ && /true/' + kubectl get pods -n kube-system '-ocustom-columns=status:status.containerStatuses[*].ready,metadata:metadata.name' --no-headers + cluster/kubectl.sh get pods -n kube-system '-ocustom-columns=status:status.containerStatuses[*].ready,metadata:metadata.name' --no-headers + wc -l cluster/ephemeral-provider-common.sh: line 67: 25382 Terminated ${KUBEVIRT_PATH}cluster/$KUBEVIRT_PROVIDER/.kubectl "$@" 0 + sleep 10 ++ make cluster-down